mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-21 23:21:59 +00:00
Merge branch 'master' of github.com:ClickHouse/ClickHouse into parallel-parsing-input-format
This commit is contained in:
commit
6a55e018e4
2
.gitmodules
vendored
2
.gitmodules
vendored
@ -183,7 +183,7 @@
|
|||||||
url = https://github.com/kthohr/stats.git
|
url = https://github.com/kthohr/stats.git
|
||||||
[submodule "contrib/krb5"]
|
[submodule "contrib/krb5"]
|
||||||
path = contrib/krb5
|
path = contrib/krb5
|
||||||
url = https://github.com/krb5/krb5
|
url = https://github.com/ClickHouse-Extras/krb5
|
||||||
[submodule "contrib/cyrus-sasl"]
|
[submodule "contrib/cyrus-sasl"]
|
||||||
path = contrib/cyrus-sasl
|
path = contrib/cyrus-sasl
|
||||||
url = https://github.com/cyrusimap/cyrus-sasl
|
url = https://github.com/cyrusimap/cyrus-sasl
|
||||||
|
@ -6,6 +6,7 @@ set (SRCS
|
|||||||
demangle.cpp
|
demangle.cpp
|
||||||
getFQDNOrHostName.cpp
|
getFQDNOrHostName.cpp
|
||||||
getMemoryAmount.cpp
|
getMemoryAmount.cpp
|
||||||
|
getPageSize.cpp
|
||||||
getThreadId.cpp
|
getThreadId.cpp
|
||||||
JSON.cpp
|
JSON.cpp
|
||||||
LineReader.cpp
|
LineReader.cpp
|
||||||
|
@ -1,5 +1,6 @@
|
|||||||
#include <stdexcept>
|
#include <stdexcept>
|
||||||
#include "common/getMemoryAmount.h"
|
#include "common/getMemoryAmount.h"
|
||||||
|
#include "common/getPageSize.h"
|
||||||
|
|
||||||
#include <unistd.h>
|
#include <unistd.h>
|
||||||
#include <sys/types.h>
|
#include <sys/types.h>
|
||||||
@ -18,7 +19,7 @@ uint64_t getMemoryAmountOrZero()
|
|||||||
if (num_pages <= 0)
|
if (num_pages <= 0)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
int64_t page_size = sysconf(_SC_PAGESIZE);
|
int64_t page_size = getPageSize();
|
||||||
if (page_size <= 0)
|
if (page_size <= 0)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
8
base/common/getPageSize.cpp
Normal file
8
base/common/getPageSize.cpp
Normal file
@ -0,0 +1,8 @@
|
|||||||
|
#include "common/getPageSize.h"
|
||||||
|
|
||||||
|
#include <unistd.h>
|
||||||
|
|
||||||
|
Int64 getPageSize()
|
||||||
|
{
|
||||||
|
return sysconf(_SC_PAGESIZE);
|
||||||
|
}
|
6
base/common/getPageSize.h
Normal file
6
base/common/getPageSize.h
Normal file
@ -0,0 +1,6 @@
|
|||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include "common/types.h"
|
||||||
|
|
||||||
|
/// Get memory page size
|
||||||
|
Int64 getPageSize();
|
@ -47,6 +47,7 @@ SRCS(
|
|||||||
errnoToString.cpp
|
errnoToString.cpp
|
||||||
getFQDNOrHostName.cpp
|
getFQDNOrHostName.cpp
|
||||||
getMemoryAmount.cpp
|
getMemoryAmount.cpp
|
||||||
|
getPageSize.cpp
|
||||||
getResource.cpp
|
getResource.cpp
|
||||||
getThreadId.cpp
|
getThreadId.cpp
|
||||||
mremap.cpp
|
mremap.cpp
|
||||||
|
@ -41,9 +41,10 @@ if (SANITIZE)
|
|||||||
if (COMPILER_CLANG)
|
if (COMPILER_CLANG)
|
||||||
set (TSAN_FLAGS "${TSAN_FLAGS} -fsanitize-blacklist=${CMAKE_SOURCE_DIR}/tests/tsan_suppressions.txt")
|
set (TSAN_FLAGS "${TSAN_FLAGS} -fsanitize-blacklist=${CMAKE_SOURCE_DIR}/tests/tsan_suppressions.txt")
|
||||||
else()
|
else()
|
||||||
message (WARNING "TSAN suppressions was not passed to the compiler (since the compiler is not clang)")
|
set (MESSAGE "TSAN suppressions was not passed to the compiler (since the compiler is not clang)\n")
|
||||||
message (WARNING "Use the following command to pass them manually:")
|
set (MESSAGE "${MESSAGE}Use the following command to pass them manually:\n")
|
||||||
message (WARNING " export TSAN_OPTIONS=\"$TSAN_OPTIONS suppressions=${CMAKE_SOURCE_DIR}/tests/tsan_suppressions.txt\"")
|
set (MESSAGE "${MESSAGE} export TSAN_OPTIONS=\"$TSAN_OPTIONS suppressions=${CMAKE_SOURCE_DIR}/tests/tsan_suppressions.txt\"")
|
||||||
|
message (WARNING "${MESSAGE}")
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
|
|
||||||
@ -57,8 +58,18 @@ if (SANITIZE)
|
|||||||
endif ()
|
endif ()
|
||||||
|
|
||||||
elseif (SANITIZE STREQUAL "undefined")
|
elseif (SANITIZE STREQUAL "undefined")
|
||||||
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${SAN_FLAGS} -fsanitize=undefined -fno-sanitize-recover=all -fno-sanitize=float-divide-by-zero -fsanitize-blacklist=${CMAKE_SOURCE_DIR}/tests/ubsan_suppressions.txt")
|
set (UBSAN_FLAGS "-fsanitize=undefined -fno-sanitize-recover=all -fno-sanitize=float-divide-by-zero")
|
||||||
set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${SAN_FLAGS} -fsanitize=undefined -fno-sanitize-recover=all -fno-sanitize=float-divide-by-zero -fsanitize-blacklist=${CMAKE_SOURCE_DIR}/tests/ubsan_suppressions.txt")
|
if (COMPILER_CLANG)
|
||||||
|
set (UBSAN_FLAGS "${UBSAN_FLAGS} -fsanitize-blacklist=${CMAKE_SOURCE_DIR}/tests/ubsan_suppressions.txt")
|
||||||
|
else()
|
||||||
|
set (MESSAGE "UBSAN suppressions was not passed to the compiler (since the compiler is not clang)\n")
|
||||||
|
set (MESSAGE "${MESSAGE}Use the following command to pass them manually:\n")
|
||||||
|
set (MESSAGE "${MESSAGE} export UBSAN_OPTIONS=\"$UBSAN_OPTIONS suppressions=${CMAKE_SOURCE_DIR}/tests/ubsan_suppressions.txt\"")
|
||||||
|
message (WARNING "${MESSAGE}")
|
||||||
|
endif()
|
||||||
|
|
||||||
|
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${SAN_FLAGS} ${UBSAN_FLAGS}")
|
||||||
|
set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${SAN_FLAGS} ${UBSAN_FLAGS}")
|
||||||
if (CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
|
if (CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
|
||||||
set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -fsanitize=undefined")
|
set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -fsanitize=undefined")
|
||||||
endif()
|
endif()
|
||||||
|
2
contrib/boost
vendored
2
contrib/boost
vendored
@ -1 +1 @@
|
|||||||
Subproject commit a7ceabe4747ecc3309dd3dcd9de4b29660dfd298
|
Subproject commit 0b98b443aa7bb77d65efd7b23b3b8c8a0ab5f1f3
|
2
contrib/krb5
vendored
2
contrib/krb5
vendored
@ -1 +1 @@
|
|||||||
Subproject commit 99f7ad2831a01f264c07eed42a0a3a9336b86184
|
Subproject commit 90ff6f4f8c695d6bf1aaba78a9b8942be92141c2
|
2
contrib/libgsasl
vendored
2
contrib/libgsasl
vendored
@ -1 +1 @@
|
|||||||
Subproject commit 140fb58250588c8323285b75fcf127c4adc33dfa
|
Subproject commit 383ee28e82f69fa16ed43b48bd9c8ee5b313ab84
|
2
contrib/libhdfs3
vendored
2
contrib/libhdfs3
vendored
@ -1 +1 @@
|
|||||||
Subproject commit 30552ac527f2c14070d834e171493b2e7f662375
|
Subproject commit 095b9d48b400abb72d967cb0539af13b1e3d90cf
|
@ -17,7 +17,12 @@ if (NOT USE_INTERNAL_PROTOBUF_LIBRARY AND PROTOBUF_OLD_ABI_COMPAT)
|
|||||||
endif ()
|
endif ()
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
set(WITH_KERBEROS false)
|
if (${ENABLE_LIBRARIES} AND ${ENABLE_KRB5})
|
||||||
|
SET(WITH_KERBEROS 1)
|
||||||
|
else()
|
||||||
|
SET(WITH_KERBEROS 0)
|
||||||
|
endif()
|
||||||
|
|
||||||
# project and source dir
|
# project and source dir
|
||||||
set(HDFS3_ROOT_DIR ${ClickHouse_SOURCE_DIR}/contrib/libhdfs3)
|
set(HDFS3_ROOT_DIR ${ClickHouse_SOURCE_DIR}/contrib/libhdfs3)
|
||||||
set(HDFS3_SOURCE_DIR ${HDFS3_ROOT_DIR}/src)
|
set(HDFS3_SOURCE_DIR ${HDFS3_ROOT_DIR}/src)
|
||||||
@ -28,11 +33,6 @@ set(CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/CMake" ${CMAKE_MODULE_PATH})
|
|||||||
include(Platform)
|
include(Platform)
|
||||||
include(Options)
|
include(Options)
|
||||||
|
|
||||||
# prefer shared libraries
|
|
||||||
if (WITH_KERBEROS)
|
|
||||||
find_package(KERBEROS REQUIRED)
|
|
||||||
endif()
|
|
||||||
|
|
||||||
# source
|
# source
|
||||||
set(PROTO_FILES
|
set(PROTO_FILES
|
||||||
#${HDFS3_SOURCE_DIR}/proto/encryption.proto
|
#${HDFS3_SOURCE_DIR}/proto/encryption.proto
|
||||||
@ -207,14 +207,11 @@ target_include_directories(hdfs3 PRIVATE ${HDFS3_COMMON_DIR})
|
|||||||
target_include_directories(hdfs3 PRIVATE ${CMAKE_CURRENT_BINARY_DIR})
|
target_include_directories(hdfs3 PRIVATE ${CMAKE_CURRENT_BINARY_DIR})
|
||||||
|
|
||||||
target_include_directories(hdfs3 PRIVATE ${LIBGSASL_INCLUDE_DIR})
|
target_include_directories(hdfs3 PRIVATE ${LIBGSASL_INCLUDE_DIR})
|
||||||
if (WITH_KERBEROS)
|
|
||||||
target_include_directories(hdfs3 PRIVATE ${KERBEROS_INCLUDE_DIRS})
|
|
||||||
endif()
|
|
||||||
target_include_directories(hdfs3 PRIVATE ${LIBXML2_INCLUDE_DIR})
|
target_include_directories(hdfs3 PRIVATE ${LIBXML2_INCLUDE_DIR})
|
||||||
|
|
||||||
target_link_libraries(hdfs3 PRIVATE ${LIBGSASL_LIBRARY})
|
target_link_libraries(hdfs3 PRIVATE ${LIBGSASL_LIBRARY})
|
||||||
if (WITH_KERBEROS)
|
if (WITH_KERBEROS)
|
||||||
target_link_libraries(hdfs3 PRIVATE ${KERBEROS_LIBRARIES})
|
target_link_libraries(hdfs3 PRIVATE ${KRB5_LIBRARY})
|
||||||
endif()
|
endif()
|
||||||
target_link_libraries(hdfs3 PRIVATE ${LIBXML2_LIBRARIES})
|
target_link_libraries(hdfs3 PRIVATE ${LIBXML2_LIBRARIES})
|
||||||
|
|
||||||
|
2
contrib/libunwind
vendored
2
contrib/libunwind
vendored
@ -1 +1 @@
|
|||||||
Subproject commit 51b84d9b6d2548f1cbdcafe622d5a753853b6149
|
Subproject commit 8fe25d7dc70f2a4ea38c3e5a33fa9d4199b67a5a
|
2
debian/control
vendored
2
debian/control
vendored
@ -40,7 +40,7 @@ Description: Common files for ClickHouse
|
|||||||
Package: clickhouse-server
|
Package: clickhouse-server
|
||||||
Architecture: all
|
Architecture: all
|
||||||
Depends: ${shlibs:Depends}, ${misc:Depends}, clickhouse-common-static (= ${binary:Version}), adduser
|
Depends: ${shlibs:Depends}, ${misc:Depends}, clickhouse-common-static (= ${binary:Version}), adduser
|
||||||
Recommends: libcap2-bin
|
Recommends: libcap2-bin, krb5-user
|
||||||
Replaces: clickhouse-server-common, clickhouse-server-base
|
Replaces: clickhouse-server-common, clickhouse-server-base
|
||||||
Provides: clickhouse-server-common
|
Provides: clickhouse-server-common
|
||||||
Description: Server binary for ClickHouse
|
Description: Server binary for ClickHouse
|
||||||
|
@ -58,8 +58,7 @@
|
|||||||
"docker/test/stateless": {
|
"docker/test/stateless": {
|
||||||
"name": "yandex/clickhouse-stateless-test",
|
"name": "yandex/clickhouse-stateless-test",
|
||||||
"dependent": [
|
"dependent": [
|
||||||
"docker/test/stateful",
|
"docker/test/stateful"
|
||||||
"docker/test/stateful_with_coverage"
|
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
"docker/test/stateless_pytest": {
|
"docker/test/stateless_pytest": {
|
||||||
@ -68,7 +67,9 @@
|
|||||||
},
|
},
|
||||||
"docker/test/stateless_with_coverage": {
|
"docker/test/stateless_with_coverage": {
|
||||||
"name": "yandex/clickhouse-stateless-test-with-coverage",
|
"name": "yandex/clickhouse-stateless-test-with-coverage",
|
||||||
"dependent": []
|
"dependent": [
|
||||||
|
"docker/test/stateful_with_coverage"
|
||||||
|
]
|
||||||
},
|
},
|
||||||
"docker/test/unit": {
|
"docker/test/unit": {
|
||||||
"name": "yandex/clickhouse-unit-test",
|
"name": "yandex/clickhouse-unit-test",
|
||||||
@ -157,5 +158,9 @@
|
|||||||
"name": "yandex/clickhouse-stateless-unbundled-test",
|
"name": "yandex/clickhouse-stateless-unbundled-test",
|
||||||
"dependent": [
|
"dependent": [
|
||||||
]
|
]
|
||||||
|
},
|
||||||
|
"docker/test/integration/kerberized_hadoop": {
|
||||||
|
"name": "yandex/clickhouse-kerberized-hadoop",
|
||||||
|
"dependent": []
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -31,6 +31,7 @@ find . -name '*.so.*' -print -exec mv '{}' /output \;
|
|||||||
if [ "performance" == "$COMBINED_OUTPUT" ]
|
if [ "performance" == "$COMBINED_OUTPUT" ]
|
||||||
then
|
then
|
||||||
cp -r ../tests/performance /output
|
cp -r ../tests/performance /output
|
||||||
|
cp -r ../tests/config/top_level_domains /
|
||||||
cp -r ../docker/test/performance-comparison/config /output ||:
|
cp -r ../docker/test/performance-comparison/config /output ||:
|
||||||
rm /output/unit_tests_dbms ||:
|
rm /output/unit_tests_dbms ||:
|
||||||
rm /output/clickhouse-odbc-bridge ||:
|
rm /output/clickhouse-odbc-bridge ||:
|
||||||
|
@ -148,6 +148,10 @@ def parse_env_variables(build_type, compiler, sanitizer, package_type, image_typ
|
|||||||
|
|
||||||
if split_binary:
|
if split_binary:
|
||||||
cmake_flags.append('-DUSE_STATIC_LIBRARIES=0 -DSPLIT_SHARED_LIBRARIES=1 -DCLICKHOUSE_SPLIT_BINARY=1')
|
cmake_flags.append('-DUSE_STATIC_LIBRARIES=0 -DSPLIT_SHARED_LIBRARIES=1 -DCLICKHOUSE_SPLIT_BINARY=1')
|
||||||
|
# We can't always build utils because it requires too much space, but
|
||||||
|
# we have to build them at least in some way in CI. The split build is
|
||||||
|
# probably the least heavy disk-wise.
|
||||||
|
cmake_flags.append('-DENABLE_UTILS=1')
|
||||||
|
|
||||||
if clang_tidy:
|
if clang_tidy:
|
||||||
cmake_flags.append('-DENABLE_CLANG_TIDY=1')
|
cmake_flags.append('-DENABLE_CLANG_TIDY=1')
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
# docker build -t yandex/clickhouse-test-base .
|
# docker build -t yandex/clickhouse-test-base .
|
||||||
FROM ubuntu:19.10
|
FROM ubuntu:20.04
|
||||||
|
|
||||||
ENV DEBIAN_FRONTEND=noninteractive LLVM_VERSION=11
|
ENV DEBIAN_FRONTEND=noninteractive LLVM_VERSION=11
|
||||||
|
|
||||||
|
18
docker/test/integration/kerberized_hadoop/Dockerfile
Normal file
18
docker/test/integration/kerberized_hadoop/Dockerfile
Normal file
@ -0,0 +1,18 @@
|
|||||||
|
# docker build -t yandex/clickhouse-kerberized-hadoop .
|
||||||
|
|
||||||
|
FROM sequenceiq/hadoop-docker:2.7.0
|
||||||
|
RUN sed -i -e 's/^\#baseurl/baseurl/' /etc/yum.repos.d/CentOS-Base.repo
|
||||||
|
RUN sed -i -e 's/^mirrorlist/#mirrorlist/' /etc/yum.repos.d/CentOS-Base.repo
|
||||||
|
RUN sed -i -e 's#http://mirror.centos.org/#http://vault.centos.org/#' /etc/yum.repos.d/CentOS-Base.repo
|
||||||
|
RUN yum clean all && \
|
||||||
|
rpm --rebuilddb && \
|
||||||
|
yum -y update && \
|
||||||
|
yum -y install yum-plugin-ovl && \
|
||||||
|
yum --quiet -y install krb5-workstation.x86_64
|
||||||
|
RUN cd /tmp && \
|
||||||
|
curl http://archive.apache.org/dist/commons/daemon/source/commons-daemon-1.0.15-src.tar.gz -o commons-daemon-1.0.15-src.tar.gz && \
|
||||||
|
tar xzf commons-daemon-1.0.15-src.tar.gz && \
|
||||||
|
cd commons-daemon-1.0.15-src/src/native/unix && \
|
||||||
|
./configure && \
|
||||||
|
make && \
|
||||||
|
cp ./jsvc /usr/local/hadoop/sbin
|
@ -29,6 +29,8 @@ RUN apt-get update \
|
|||||||
libcurl4-openssl-dev \
|
libcurl4-openssl-dev \
|
||||||
gdb \
|
gdb \
|
||||||
software-properties-common \
|
software-properties-common \
|
||||||
|
libkrb5-dev \
|
||||||
|
krb5-user \
|
||||||
&& rm -rf \
|
&& rm -rf \
|
||||||
/var/lib/apt/lists/* \
|
/var/lib/apt/lists/* \
|
||||||
/var/cache/debconf \
|
/var/cache/debconf \
|
||||||
@ -75,7 +77,8 @@ RUN python3 -m pip install \
|
|||||||
pytest-timeout \
|
pytest-timeout \
|
||||||
redis \
|
redis \
|
||||||
tzlocal \
|
tzlocal \
|
||||||
urllib3
|
urllib3 \
|
||||||
|
requests-kerberos
|
||||||
|
|
||||||
COPY modprobe.sh /usr/local/bin/modprobe
|
COPY modprobe.sh /usr/local/bin/modprobe
|
||||||
COPY dockerd-entrypoint.sh /usr/local/bin/
|
COPY dockerd-entrypoint.sh /usr/local/bin/
|
||||||
|
@ -2,6 +2,7 @@ version: '2.3'
|
|||||||
services:
|
services:
|
||||||
hdfs1:
|
hdfs1:
|
||||||
image: sequenceiq/hadoop-docker:2.7.0
|
image: sequenceiq/hadoop-docker:2.7.0
|
||||||
|
hostname: hdfs1
|
||||||
restart: always
|
restart: always
|
||||||
ports:
|
ports:
|
||||||
- 50075:50075
|
- 50075:50075
|
||||||
|
@ -0,0 +1,29 @@
|
|||||||
|
version: '2.3'
|
||||||
|
|
||||||
|
services:
|
||||||
|
kerberizedhdfs1:
|
||||||
|
cap_add:
|
||||||
|
- DAC_READ_SEARCH
|
||||||
|
image: yandex/clickhouse-kerberized-hadoop:16621
|
||||||
|
hostname: kerberizedhdfs1
|
||||||
|
restart: always
|
||||||
|
volumes:
|
||||||
|
- ${KERBERIZED_HDFS_DIR}/../../hdfs_configs/bootstrap.sh:/etc/bootstrap.sh:ro
|
||||||
|
- ${KERBERIZED_HDFS_DIR}/secrets:/usr/local/hadoop/etc/hadoop/conf
|
||||||
|
- ${KERBERIZED_HDFS_DIR}/secrets/krb_long.conf:/etc/krb5.conf:ro
|
||||||
|
ports:
|
||||||
|
- 1006:1006
|
||||||
|
- 50070:50070
|
||||||
|
- 9000:9000
|
||||||
|
depends_on:
|
||||||
|
- hdfskerberos
|
||||||
|
entrypoint: /etc/bootstrap.sh -d
|
||||||
|
|
||||||
|
hdfskerberos:
|
||||||
|
image: yandex/clickhouse-kerberos-kdc:${DOCKER_KERBEROS_KDC_TAG}
|
||||||
|
hostname: hdfskerberos
|
||||||
|
volumes:
|
||||||
|
- ${KERBERIZED_HDFS_DIR}/secrets:/tmp/keytab
|
||||||
|
- ${KERBERIZED_HDFS_DIR}/../../kerberos_image_config.sh:/config.sh
|
||||||
|
- /dev/urandom:/dev/random
|
||||||
|
ports: [88, 749]
|
@ -1,8 +1,6 @@
|
|||||||
# docker build -t yandex/clickhouse-stateful-test-with-coverage .
|
# docker build -t yandex/clickhouse-stateful-test-with-coverage .
|
||||||
FROM yandex/clickhouse-stateless-test-with-coverage
|
FROM yandex/clickhouse-stateless-test-with-coverage
|
||||||
|
|
||||||
RUN echo "deb [trusted=yes] http://apt.llvm.org/bionic/ llvm-toolchain-bionic-9 main" >> /etc/apt/sources.list
|
|
||||||
|
|
||||||
RUN apt-get update -y \
|
RUN apt-get update -y \
|
||||||
&& env DEBIAN_FRONTEND=noninteractive \
|
&& env DEBIAN_FRONTEND=noninteractive \
|
||||||
apt-get install --yes --no-install-recommends \
|
apt-get install --yes --no-install-recommends \
|
||||||
|
@ -108,6 +108,95 @@ Create table with files named `file000`, `file001`, … , `file999`:
|
|||||||
``` sql
|
``` sql
|
||||||
CREATE TABLE big_table (name String, value UInt32) ENGINE = HDFS('hdfs://hdfs1:9000/big_dir/file{0..9}{0..9}{0..9}', 'CSV')
|
CREATE TABLE big_table (name String, value UInt32) ENGINE = HDFS('hdfs://hdfs1:9000/big_dir/file{0..9}{0..9}{0..9}', 'CSV')
|
||||||
```
|
```
|
||||||
|
## Configuration {#configuration}
|
||||||
|
|
||||||
|
Similar to GraphiteMergeTree, the HDFS engine supports extended configuration using the ClickHouse config file. There are two configuration keys that you can use: global (`hdfs`) and user-level (`hdfs_*`). The global configuration is applied first, and then the user-level configuration is applied (if it exists).
|
||||||
|
|
||||||
|
``` xml
|
||||||
|
<!-- Global configuration options for HDFS engine type -->
|
||||||
|
<hdfs>
|
||||||
|
<hadoop_kerberos_keytab>/tmp/keytab/clickhouse.keytab</hadoop_kerberos_keytab>
|
||||||
|
<hadoop_kerberos_principal>clickuser@TEST.CLICKHOUSE.TECH</hadoop_kerberos_principal>
|
||||||
|
<hadoop_security_authentication>kerberos</hadoop_security_authentication>
|
||||||
|
</hdfs>
|
||||||
|
|
||||||
|
<!-- Configuration specific for user "root" -->
|
||||||
|
<hdfs_root>
|
||||||
|
<hadoop_kerberos_principal>root@TEST.CLICKHOUSE.TECH</hadoop_kerberos_principal>
|
||||||
|
</hdfs_root>
|
||||||
|
```
|
||||||
|
|
||||||
|
### List of possible configuration options with default values
|
||||||
|
#### Supported by libhdfs3
|
||||||
|
|
||||||
|
|
||||||
|
| **parameter** | **default value** |
|
||||||
|
| rpc\_client\_connect\_tcpnodelay | true |
|
||||||
|
| dfs\_client\_read\_shortcircuit | true |
|
||||||
|
| output\_replace-datanode-on-failure | true |
|
||||||
|
| input\_notretry-another-node | false |
|
||||||
|
| input\_localread\_mappedfile | true |
|
||||||
|
| dfs\_client\_use\_legacy\_blockreader\_local | false |
|
||||||
|
| rpc\_client\_ping\_interval | 10 * 1000 |
|
||||||
|
| rpc\_client\_connect\_timeout | 600 * 1000 |
|
||||||
|
| rpc\_client\_read\_timeout | 3600 * 1000 |
|
||||||
|
| rpc\_client\_write\_timeout | 3600 * 1000 |
|
||||||
|
| rpc\_client\_socekt\_linger\_timeout | -1 |
|
||||||
|
| rpc\_client\_connect\_retry | 10 |
|
||||||
|
| rpc\_client\_timeout | 3600 * 1000 |
|
||||||
|
| dfs\_default\_replica | 3 |
|
||||||
|
| input\_connect\_timeout | 600 * 1000 |
|
||||||
|
| input\_read\_timeout | 3600 * 1000 |
|
||||||
|
| input\_write\_timeout | 3600 * 1000 |
|
||||||
|
| input\_localread\_default\_buffersize | 1 * 1024 * 1024 |
|
||||||
|
| dfs\_prefetchsize | 10 |
|
||||||
|
| input\_read\_getblockinfo\_retry | 3 |
|
||||||
|
| input\_localread\_blockinfo\_cachesize | 1000 |
|
||||||
|
| input\_read\_max\_retry | 60 |
|
||||||
|
| output\_default\_chunksize | 512 |
|
||||||
|
| output\_default\_packetsize | 64 * 1024 |
|
||||||
|
| output\_default\_write\_retry | 10 |
|
||||||
|
| output\_connect\_timeout | 600 * 1000 |
|
||||||
|
| output\_read\_timeout | 3600 * 1000 |
|
||||||
|
| output\_write\_timeout | 3600 * 1000 |
|
||||||
|
| output\_close\_timeout | 3600 * 1000 |
|
||||||
|
| output\_packetpool\_size | 1024 |
|
||||||
|
| output\_heeartbeat\_interval | 10 * 1000 |
|
||||||
|
| dfs\_client\_failover\_max\_attempts | 15 |
|
||||||
|
| dfs\_client\_read\_shortcircuit\_streams\_cache\_size | 256 |
|
||||||
|
| dfs\_client\_socketcache\_expiryMsec | 3000 |
|
||||||
|
| dfs\_client\_socketcache\_capacity | 16 |
|
||||||
|
| dfs\_default\_blocksize | 64 * 1024 * 1024 |
|
||||||
|
| dfs\_default\_uri | "hdfs://localhost:9000" |
|
||||||
|
| hadoop\_security\_authentication | "simple" |
|
||||||
|
| hadoop\_security\_kerberos\_ticket\_cache\_path | "" |
|
||||||
|
| dfs\_client\_log\_severity | "INFO" |
|
||||||
|
| dfs\_domain\_socket\_path | "" |
|
||||||
|
|
||||||
|
|
||||||
|
[HDFS Configuration Reference ](https://hawq.apache.org/docs/userguide/2.3.0.0-incubating/reference/HDFSConfigurationParameterReference.html) might explain some parameters.
|
||||||
|
|
||||||
|
|
||||||
|
#### ClickHouse extras {#clickhouse-extras}
|
||||||
|
|
||||||
|
| **parameter** | **default value** |
|
||||||
|
|hadoop\_kerberos\_keytab | "" |
|
||||||
|
|hadoop\_kerberos\_principal | "" |
|
||||||
|
|hadoop\_kerberos\_kinit\_command | kinit |
|
||||||
|
|
||||||
|
#### Limitations {#limitations}
|
||||||
|
|
||||||
|
* hadoop\_security\_kerberos\_ticket\_cache\_path can be global only, not user specific
|
||||||
|
|
||||||
|
## Kerberos support {#kerberos-support}
|
||||||
|
|
||||||
|
If hadoop\_security\_authentication parameter has value 'kerberos', ClickHouse authentifies via Kerberos facility.
|
||||||
|
Parameters [here](#clickhouse-extras) and hadoop\_security\_kerberos\_ticket\_cache\_path may be of help.
|
||||||
|
Note that due to libhdfs3 limitations only old-fashioned approach is supported,
|
||||||
|
datanode communications are not secured by SASL (HADOOP\_SECURE\_DN\_USER is a reliable indicator of such
|
||||||
|
security approach). Use tests/integration/test\_storage\_kerberized\_hdfs/hdfs_configs/bootstrap.sh for reference.
|
||||||
|
|
||||||
|
If hadoop\_kerberos\_keytab, hadoop\_kerberos\_principal or hadoop\_kerberos\_kinit\_command is specified, kinit will be invoked. hadoop\_kerberos\_keytab and hadoop\_kerberos\_principal are mandatory in this case. kinit tool and krb5 configuration files are required.
|
||||||
|
|
||||||
## Virtual Columns {#virtual-columns}
|
## Virtual Columns {#virtual-columns}
|
||||||
|
|
||||||
|
@ -57,7 +57,7 @@ The command line is based on ‘replxx’ (similar to ‘readline’). In other
|
|||||||
|
|
||||||
By default, the format used is PrettyCompact. You can change the format in the FORMAT clause of the query, or by specifying `\G` at the end of the query, using the `--format` or `--vertical` argument in the command line, or using the client configuration file.
|
By default, the format used is PrettyCompact. You can change the format in the FORMAT clause of the query, or by specifying `\G` at the end of the query, using the `--format` or `--vertical` argument in the command line, or using the client configuration file.
|
||||||
|
|
||||||
To exit the client, press Ctrl+D (or Ctrl+C), or enter one of the following instead of a query: “exit”, “quit”, “logout”, “exit;”, “quit;”, “logout;”, “q”, “Q”, “:q”
|
To exit the client, press Ctrl+D, or enter one of the following instead of a query: “exit”, “quit”, “logout”, “exit;”, “quit;”, “logout;”, “q”, “Q”, “:q”
|
||||||
|
|
||||||
When processing a query, the client shows:
|
When processing a query, the client shows:
|
||||||
|
|
||||||
|
@ -25,6 +25,6 @@ Under the same conditions, ClickHouse can handle several hundred queries per sec
|
|||||||
|
|
||||||
## Performance When Inserting Data {#performance-when-inserting-data}
|
## Performance When Inserting Data {#performance-when-inserting-data}
|
||||||
|
|
||||||
We recommend inserting data in packets of at least 1000 rows, or no more than a single request per second. When inserting to a MergeTree table from a tab-separated dump, the insertion speed can be from 50 to 200 MB/s. If the inserted rows are around 1 Kb in size, the speed will be from 50,000 to 200,000 rows per second. If the rows are small, the performance can be higher in rows per second (on Banner System data -`>` 500,000 rows per second; on Graphite data -`>` 1,000,000 rows per second). To improve performance, you can make multiple INSERT queries in parallel, which scales linearly.
|
We recommend inserting data in packets of at least 1000 rows, or no more than a single request per second. When inserting to a MergeTree table from a tab-separated dump, the insertion speed can be from 50 to 200 MB/s. If the inserted rows are around 1 KB in size, the speed will be from 50,000 to 200,000 rows per second. If the rows are small, the performance can be higher in rows per second (on Banner System data -`>` 500,000 rows per second; on Graphite data -`>` 1,000,000 rows per second). To improve performance, you can make multiple INSERT queries in parallel, which scales linearly.
|
||||||
|
|
||||||
{## [Original article](https://clickhouse.tech/docs/en/introduction/performance/) ##}
|
{## [Original article](https://clickhouse.tech/docs/en/introduction/performance/) ##}
|
||||||
|
@ -39,7 +39,7 @@ data_compressed_bytes: 499
|
|||||||
last_exception:
|
last_exception:
|
||||||
```
|
```
|
||||||
|
|
||||||
**See also**
|
**See Also**
|
||||||
|
|
||||||
- [Distributed table engine](../../engines/table-engines/special/distributed.md)
|
- [Distributed table engine](../../engines/table-engines/special/distributed.md)
|
||||||
|
|
||||||
|
81
docs/en/operations/system-tables/replication_queue.md
Normal file
81
docs/en/operations/system-tables/replication_queue.md
Normal file
@ -0,0 +1,81 @@
|
|||||||
|
# system.replication_queue {#system_tables-replication_queue}
|
||||||
|
|
||||||
|
Contains information about tasks from replication queues stored in ZooKeeper for tables in the `ReplicatedMergeTree` family.
|
||||||
|
|
||||||
|
Columns:
|
||||||
|
|
||||||
|
- `database` ([String](../../sql-reference/data-types/string.md)) — Name of the database.
|
||||||
|
|
||||||
|
- `table` ([String](../../sql-reference/data-types/string.md)) — Name of the table.
|
||||||
|
|
||||||
|
- `replica_name` ([String](../../sql-reference/data-types/string.md)) — Replica name in ZooKeeper. Different replicas of the same table have different names.
|
||||||
|
|
||||||
|
- `position` ([UInt32](../../sql-reference/data-types/int-uint.md)) — Position of the task in the queue.
|
||||||
|
|
||||||
|
- `node_name` ([String](../../sql-reference/data-types/string.md)) — Node name in ZooKeeper.
|
||||||
|
|
||||||
|
- `type` ([String](../../sql-reference/data-types/string.md)) — Type of the task in the queue: `GET_PARTS`, `MERGE_PARTS`, `DETACH_PARTS`, `DROP_PARTS`, or `MUTATE_PARTS`.
|
||||||
|
|
||||||
|
- `create_time` ([Datetime](../../sql-reference/data-types/datetime.md)) — Date and time when the task was submitted for execution.
|
||||||
|
|
||||||
|
- `required_quorum` ([UInt32](../../sql-reference/data-types/int-uint.md)) — The number of replicas waiting for the task to complete with confirmation of completion. This column is only relevant for the `GET_PARTS` task.
|
||||||
|
|
||||||
|
- `source_replica` ([String](../../sql-reference/data-types/string.md)) — Name of the source replica.
|
||||||
|
|
||||||
|
- `new_part_name` ([String](../../sql-reference/data-types/string.md)) — Name of the new part.
|
||||||
|
|
||||||
|
- `parts_to_merge` ([Array](../../sql-reference/data-types/array.md) ([String](../../sql-reference/data-types/string.md))) — Names of parts to merge or update.
|
||||||
|
|
||||||
|
- `is_detach` ([UInt8](../../sql-reference/data-types/int-uint.md)) — The flag indicates whether the `DETACH_PARTS` task is in the queue.
|
||||||
|
|
||||||
|
- `is_currently_executing` ([UInt8](../../sql-reference/data-types/int-uint.md)) — The flag indicates whether a specific task is being performed right now.
|
||||||
|
|
||||||
|
- `num_tries` ([UInt32](../../sql-reference/data-types/int-uint.md)) — The number of failed attempts to complete the task.
|
||||||
|
|
||||||
|
- `last_exception` ([String](../../sql-reference/data-types/string.md)) — Text message about the last error that occurred (if any).
|
||||||
|
|
||||||
|
- `last_attempt_time` ([Datetime](../../sql-reference/data-types/datetime.md)) — Date and time when the task was last attempted.
|
||||||
|
|
||||||
|
- `num_postponed` ([UInt32](../../sql-reference/data-types/int-uint.md)) — The number of postponed tasks.
|
||||||
|
|
||||||
|
- `postpone_reason` ([String](../../sql-reference/data-types/string.md)) — The reason why the task was postponed.
|
||||||
|
|
||||||
|
- `last_postpone_time` ([Datetime](../../sql-reference/data-types/datetime.md)) — Date and time when the task was last postponed.
|
||||||
|
|
||||||
|
- `merge_type` ([String](../../sql-reference/data-types/string.md)) — Type of the current merge. Empty if it's a mutation.
|
||||||
|
|
||||||
|
**Example**
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
SELECT * FROM system.replication_queue LIMIT 1 FORMAT Vertical;
|
||||||
|
```
|
||||||
|
|
||||||
|
``` text
|
||||||
|
Row 1:
|
||||||
|
──────
|
||||||
|
database: merge
|
||||||
|
table: visits_v2
|
||||||
|
replica_name: mtgiga001-1t.metrika.yandex.net
|
||||||
|
position: 15
|
||||||
|
node_name: queue-0009325559
|
||||||
|
type: MERGE_PARTS
|
||||||
|
create_time: 2020-12-07 14:04:21
|
||||||
|
required_quorum: 0
|
||||||
|
source_replica: mtgiga001-1t.metrika.yandex.net
|
||||||
|
new_part_name: 20201130_121373_121384_2
|
||||||
|
parts_to_merge: ['20201130_121373_121378_1','20201130_121379_121379_0','20201130_121380_121380_0','20201130_121381_121381_0','20201130_121382_121382_0','20201130_121383_121383_0','20201130_121384_121384_0']
|
||||||
|
is_detach: 0
|
||||||
|
is_currently_executing: 0
|
||||||
|
num_tries: 36
|
||||||
|
last_exception: Code: 226, e.displayText() = DB::Exception: Marks file '/opt/clickhouse/data/merge/visits_v2/tmp_fetch_20201130_121373_121384_2/CounterID.mrk' doesn't exist (version 20.8.7.15 (official build))
|
||||||
|
last_attempt_time: 2020-12-08 17:35:54
|
||||||
|
num_postponed: 0
|
||||||
|
postpone_reason:
|
||||||
|
last_postpone_time: 1970-01-01 03:00:00
|
||||||
|
```
|
||||||
|
|
||||||
|
**See Also**
|
||||||
|
|
||||||
|
- [Managing ReplicatedMergeTree Tables](../../sql-reference/statements/system.md/#query-language-system-replicated)
|
||||||
|
|
||||||
|
[Original article](https://clickhouse.tech/docs/en/operations/system_tables/replication_queue) <!--hide-->
|
@ -91,6 +91,23 @@ The Linux kernel prior to 3.2 had a multitude of problems with IPv6 implementati
|
|||||||
|
|
||||||
Use at least a 10 GB network, if possible. 1 Gb will also work, but it will be much worse for patching replicas with tens of terabytes of data, or for processing distributed queries with a large amount of intermediate data.
|
Use at least a 10 GB network, if possible. 1 Gb will also work, but it will be much worse for patching replicas with tens of terabytes of data, or for processing distributed queries with a large amount of intermediate data.
|
||||||
|
|
||||||
|
## Hypervisor configuration
|
||||||
|
|
||||||
|
If you are using OpenStack, set
|
||||||
|
```
|
||||||
|
cpu_mode=host-passthrough
|
||||||
|
```
|
||||||
|
in nova.conf.
|
||||||
|
|
||||||
|
If you are using libvirt, set
|
||||||
|
```
|
||||||
|
<cpu mode='host-passthrough'/>
|
||||||
|
```
|
||||||
|
in XML configuration.
|
||||||
|
|
||||||
|
This is important for ClickHouse to be able to get correct information with `cpuid` instruction.
|
||||||
|
Otherwise you may get `Illegal instruction` crashes when hypervisor is run on old CPU models.
|
||||||
|
|
||||||
## ZooKeeper {#zookeeper}
|
## ZooKeeper {#zookeeper}
|
||||||
|
|
||||||
You are probably already using ZooKeeper for other purposes. You can use the same installation of ZooKeeper, if it isn’t already overloaded.
|
You are probably already using ZooKeeper for other purposes. You can use the same installation of ZooKeeper, if it isn’t already overloaded.
|
||||||
|
@ -25,6 +25,10 @@ Example 2: `uniqArray(arr)` – Counts the number of unique elements in all ‘a
|
|||||||
|
|
||||||
-If and -Array can be combined. However, ‘Array’ must come first, then ‘If’. Examples: `uniqArrayIf(arr, cond)`, `quantilesTimingArrayIf(level1, level2)(arr, cond)`. Due to this order, the ‘cond’ argument won’t be an array.
|
-If and -Array can be combined. However, ‘Array’ must come first, then ‘If’. Examples: `uniqArrayIf(arr, cond)`, `quantilesTimingArrayIf(level1, level2)(arr, cond)`. Due to this order, the ‘cond’ argument won’t be an array.
|
||||||
|
|
||||||
|
## -SimpleState {#agg-functions-combinator-simplestate}
|
||||||
|
|
||||||
|
If you apply this combinator, the aggregate function returns the same value but with a different type. This is an `SimpleAggregateFunction(...)` that can be stored in a table to work with [AggregatingMergeTree](../../engines/table-engines/mergetree-family/aggregatingmergetree.md) table engines.
|
||||||
|
|
||||||
## -State {#agg-functions-combinator-state}
|
## -State {#agg-functions-combinator-state}
|
||||||
|
|
||||||
If you apply this combinator, the aggregate function doesn’t return the resulting value (such as the number of unique values for the [uniq](../../sql-reference/aggregate-functions/reference/uniq.md#agg_function-uniq) function), but an intermediate state of the aggregation (for `uniq`, this is the hash table for calculating the number of unique values). This is an `AggregateFunction(...)` that can be used for further processing or stored in a table to finish aggregating later.
|
If you apply this combinator, the aggregate function doesn’t return the resulting value (such as the number of unique values for the [uniq](../../sql-reference/aggregate-functions/reference/uniq.md#agg_function-uniq) function), but an intermediate state of the aggregation (for `uniq`, this is the hash table for calculating the number of unique values). This is an `AggregateFunction(...)` that can be used for further processing or stored in a table to finish aggregating later.
|
||||||
|
@ -1288,12 +1288,30 @@ Returns the index of the first element in the `arr1` array for which `func` retu
|
|||||||
|
|
||||||
Note that the `arrayFirstIndex` is a [higher-order function](../../sql-reference/functions/index.md#higher-order-functions). You must pass a lambda function to it as the first argument, and it can’t be omitted.
|
Note that the `arrayFirstIndex` is a [higher-order function](../../sql-reference/functions/index.md#higher-order-functions). You must pass a lambda function to it as the first argument, and it can’t be omitted.
|
||||||
|
|
||||||
|
## arrayMin(\[func,\] arr1, …) {#array-min}
|
||||||
|
|
||||||
|
Returns the sum of the `func` values. If the function is omitted, it just returns the min of the array elements.
|
||||||
|
|
||||||
|
Note that the `arrayMin` is a [higher-order function](../../sql-reference/functions/index.md#higher-order-functions). You can pass a lambda function to it as the first argument.
|
||||||
|
|
||||||
|
## arrayMax(\[func,\] arr1, …) {#array-max}
|
||||||
|
|
||||||
|
Returns the sum of the `func` values. If the function is omitted, it just returns the min of the array elements.
|
||||||
|
|
||||||
|
Note that the `arrayMax` is a [higher-order function](../../sql-reference/functions/index.md#higher-order-functions). You can pass a lambda function to it as the first argument.
|
||||||
|
|
||||||
## arraySum(\[func,\] arr1, …) {#array-sum}
|
## arraySum(\[func,\] arr1, …) {#array-sum}
|
||||||
|
|
||||||
Returns the sum of the `func` values. If the function is omitted, it just returns the sum of the array elements.
|
Returns the sum of the `func` values. If the function is omitted, it just returns the sum of the array elements.
|
||||||
|
|
||||||
Note that the `arraySum` is a [higher-order function](../../sql-reference/functions/index.md#higher-order-functions). You can pass a lambda function to it as the first argument.
|
Note that the `arraySum` is a [higher-order function](../../sql-reference/functions/index.md#higher-order-functions). You can pass a lambda function to it as the first argument.
|
||||||
|
|
||||||
|
## arrayAvg(\[func,\] arr1, …) {#array-avg}
|
||||||
|
|
||||||
|
Returns the sum of the `func` values. If the function is omitted, it just returns the average of the array elements.
|
||||||
|
|
||||||
|
Note that the `arrayAvg` is a [higher-order function](../../sql-reference/functions/index.md#higher-order-functions). You can pass a lambda function to it as the first argument.
|
||||||
|
|
||||||
## arrayCumSum(\[func,\] arr1, …) {#arraycumsumfunc-arr1}
|
## arrayCumSum(\[func,\] arr1, …) {#arraycumsumfunc-arr1}
|
||||||
|
|
||||||
Returns an array of partial sums of elements in the source array (a running sum). If the `func` function is specified, then the values of the array elements are converted by this function before summing.
|
Returns an array of partial sums of elements in the source array (a running sum). If the `func` function is specified, then the values of the array elements are converted by this function before summing.
|
||||||
|
@ -430,6 +430,63 @@ SELECT toTypeName(CAST(x, 'Nullable(UInt16)')) FROM t_null
|
|||||||
|
|
||||||
- [cast_keep_nullable](../../operations/settings/settings.md#cast_keep_nullable) setting
|
- [cast_keep_nullable](../../operations/settings/settings.md#cast_keep_nullable) setting
|
||||||
|
|
||||||
|
## accurateCast(x, T) {#type_conversion_function-accurate-cast}
|
||||||
|
|
||||||
|
Converts ‘x’ to the ‘t’ data type. The differente from cast(x, T) is that accurateCast
|
||||||
|
does not allow overflow of numeric types during cast if type value x does not fit
|
||||||
|
bounds of type T.
|
||||||
|
|
||||||
|
Example
|
||||||
|
``` sql
|
||||||
|
SELECT cast(-1, 'UInt8') as uint8;
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
``` text
|
||||||
|
┌─uint8─┐
|
||||||
|
│ 255 │
|
||||||
|
└───────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SELECT accurateCast(-1, 'UInt8') as uint8;
|
||||||
|
```
|
||||||
|
|
||||||
|
``` text
|
||||||
|
Code: 70. DB::Exception: Received from localhost:9000. DB::Exception: Value in column Int8 cannot be safely converted into type UInt8: While processing accurateCast(-1, 'UInt8') AS uint8.
|
||||||
|
|
||||||
|
```
|
||||||
|
|
||||||
|
## accurateCastOrNull(x, T) {#type_conversion_function-accurate-cast_or_null}
|
||||||
|
|
||||||
|
Converts ‘x’ to the ‘t’ data type. Always returns nullable type and returns NULL
|
||||||
|
if the casted value is not representable in the target type.
|
||||||
|
|
||||||
|
Example:
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
SELECT
|
||||||
|
accurateCastOrNull(-1, 'UInt8') as uint8,
|
||||||
|
accurateCastOrNull(128, 'Int8') as int8,
|
||||||
|
accurateCastOrNull('Test', 'FixedString(2)') as fixed_string
|
||||||
|
```
|
||||||
|
|
||||||
|
``` text
|
||||||
|
┌─uint8─┬─int8─┬─fixed_string─┐
|
||||||
|
│ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │
|
||||||
|
└───────┴──────┴──────────────┘┘
|
||||||
|
```
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
SELECT toTypeName(accurateCastOrNull(5, 'UInt8'))
|
||||||
|
```
|
||||||
|
|
||||||
|
``` text
|
||||||
|
┌─toTypeName(accurateCastOrNull(5, 'UInt8'))─┐
|
||||||
|
│ Nullable(UInt8) │
|
||||||
|
└────────────────────────────────────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
## toInterval(Year\|Quarter\|Month\|Week\|Day\|Hour\|Minute\|Second) {#function-tointerval}
|
## toInterval(Year\|Quarter\|Month\|Week\|Day\|Hour\|Minute\|Second) {#function-tointerval}
|
||||||
|
|
||||||
Converts a Number type argument to an [Interval](../../sql-reference/data-types/special-data-types/interval.md) data type.
|
Converts a Number type argument to an [Interval](../../sql-reference/data-types/special-data-types/interval.md) data type.
|
||||||
|
@ -204,7 +204,7 @@ SYSTEM STOP MOVES [[db.]merge_tree_family_table_name]
|
|||||||
|
|
||||||
## Managing ReplicatedMergeTree Tables {#query-language-system-replicated}
|
## Managing ReplicatedMergeTree Tables {#query-language-system-replicated}
|
||||||
|
|
||||||
ClickHouse can manage background replication related processes in [ReplicatedMergeTree](../../engines/table-engines/mergetree-family/replication/#table_engines-replication) tables.
|
ClickHouse can manage background replication related processes in [ReplicatedMergeTree](../../engines/table-engines/mergetree-family/replication.md#table_engines-replication) tables.
|
||||||
|
|
||||||
### STOP FETCHES {#query_language-system-stop-fetches}
|
### STOP FETCHES {#query_language-system-stop-fetches}
|
||||||
|
|
||||||
|
@ -59,7 +59,7 @@ La línea de comandos se basa en ‘replxx’ (similar a ‘readline’). En otr
|
|||||||
|
|
||||||
De forma predeterminada, el formato utilizado es PrettyCompact. Puede cambiar el formato en la cláusula FORMAT de la consulta o especificando `\G` al final de la consulta, utilizando el `--format` o `--vertical` en la línea de comandos, o utilizando el archivo de configuración del cliente.
|
De forma predeterminada, el formato utilizado es PrettyCompact. Puede cambiar el formato en la cláusula FORMAT de la consulta o especificando `\G` al final de la consulta, utilizando el `--format` o `--vertical` en la línea de comandos, o utilizando el archivo de configuración del cliente.
|
||||||
|
|
||||||
Para salir del cliente, presione Ctrl+D (o Ctrl+C) o introduzca una de las siguientes opciones en lugar de una consulta: “exit”, “quit”, “logout”, “exit;”, “quit;”, “logout;”, “q”, “Q”, “:q”
|
Para salir del cliente, presione Ctrl+D o introduzca una de las siguientes opciones en lugar de una consulta: “exit”, “quit”, “logout”, “exit;”, “quit;”, “logout;”, “q”, “Q”, “:q”
|
||||||
|
|
||||||
Al procesar una consulta, el cliente muestra:
|
Al procesar una consulta, el cliente muestra:
|
||||||
|
|
||||||
|
@ -59,7 +59,7 @@ $ cat file.csv | clickhouse-client --database=test --query="INSERT INTO test FOR
|
|||||||
|
|
||||||
既定では、使用される形式はPrettyCompactです。 クエリのFORMAT句で書式を変更するか、次のように指定することができます `\G` クエリの最後に、 `--format` または `--vertical` コマンドラインでの引数、またはクライアント構成ファイルの使用。
|
既定では、使用される形式はPrettyCompactです。 クエリのFORMAT句で書式を変更するか、次のように指定することができます `\G` クエリの最後に、 `--format` または `--vertical` コマンドラインでの引数、またはクライアント構成ファイルの使用。
|
||||||
|
|
||||||
クライアントを終了するには、Ctrl+D(またはCtrl+C)を押すか、クエリの代わりに次のいずれかを入力します: “exit”, “quit”, “logout”, “exit;”, “quit;”, “logout;”, “q”, “Q”, “:q”
|
クライアントを終了するには、Ctrl+Dを押すか、クエリの代わりに次のいずれかを入力します: “exit”, “quit”, “logout”, “exit;”, “quit;”, “logout;”, “q”, “Q”, “:q”
|
||||||
|
|
||||||
が処理クエリー、クライアントを示し:
|
が処理クエリー、クライアントを示し:
|
||||||
|
|
||||||
|
25
docs/ru/faq/general/index.md
Normal file
25
docs/ru/faq/general/index.md
Normal file
@ -0,0 +1,25 @@
|
|||||||
|
---
|
||||||
|
title: General questions about ClickHouse
|
||||||
|
toc_hidden_folder: true
|
||||||
|
toc_priority: 1
|
||||||
|
toc_title: Общие вопросы
|
||||||
|
---
|
||||||
|
|
||||||
|
# Общие вопросы о ClickHouse {#obshchie-voprosy}
|
||||||
|
|
||||||
|
Вопросы:
|
||||||
|
|
||||||
|
- Что такое ClickHouse?
|
||||||
|
- Почему ClickHouse такой быстрый?
|
||||||
|
- Кто пользуется ClickHouse?
|
||||||
|
- Что обозначает название “ClickHouse”?
|
||||||
|
- Что значит “Не тормозит”?
|
||||||
|
- Что такое OLAP?
|
||||||
|
- Что такое колоночная база данных?
|
||||||
|
- [Почему бы не использовать системы типа MapReduce?](mapreduce.md)
|
||||||
|
|
||||||
|
!!! info "Если вы не нашли то, что искали:"
|
||||||
|
Загляните в другие категории F.A.Q. или поищите в других разделах документации, ориентируйтесь по оглавлению слева.
|
||||||
|
|
||||||
|
{## [Original article](https://clickhouse.tech/docs/ru/faq/general/) ##}
|
||||||
|
|
@ -1,8 +1,12 @@
|
|||||||
# Общие вопросы {#obshchie-voprosy}
|
---
|
||||||
|
title: Why not use something like MapReduce?
|
||||||
|
toc_hidden: true
|
||||||
|
toc_priority: 110
|
||||||
|
---
|
||||||
|
|
||||||
## Почему бы не использовать системы типа MapReduce? {#pochemu-by-ne-ispolzovat-sistemy-tipa-mapreduce}
|
## Почему бы не использовать системы типа MapReduce? {#pochemu-by-ne-ispolzovat-sistemy-tipa-mapreduce}
|
||||||
|
|
||||||
Системами типа MapReduce будем называть системы распределённых вычислений, в которых операция reduce сделана на основе распределённой сортировки. Наиболее распространённым opensource решением данного класса является [Apache Hadoop](http://hadoop.apache.org). Яндекс использует собственное решение — YT.
|
Системами типа MapReduce будем называть системы распределённых вычислений, в которых операция reduce сделана на основе распределённой сортировки. Наиболее распространённым opensource решением данного класса является [Apache Hadoop](http://hadoop.apache.org). Яндекс использует собственное решение — YT.
|
||||||
|
|
||||||
Такие системы не подходят для онлайн запросов в силу слишком большой latency. То есть, не могут быть использованы в качестве бэкенда для веб-интерфейса.
|
Такие системы не подходят для онлайн запросов в силу слишком большой latency. То есть, не могут быть использованы в качестве бэкенда для веб-интерфейса.
|
||||||
Такие системы не подходят для обновления данных в реальном времени.
|
Такие системы не подходят для обновления данных в реальном времени.
|
||||||
@ -10,47 +14,3 @@
|
|||||||
Распределённая сортировка является основной причиной тормозов при выполнении несложных map-reduce задач.
|
Распределённая сортировка является основной причиной тормозов при выполнении несложных map-reduce задач.
|
||||||
|
|
||||||
Большинство реализаций MapReduce позволяют выполнять произвольный код на кластере. Но для OLAP задач лучше подходит декларативный язык запросов, который позволяет быстро проводить исследования. Для примера, для Hadoop существует Hive и Pig. Также смотрите Cloudera Impala, Shark (устаревший) для Spark, а также Spark SQL, Presto, Apache Drill. Впрочем, производительность при выполнении таких задач является сильно неоптимальной по сравнению со специализированными системами, а сравнительно высокая latency не позволяет использовать эти системы в качестве бэкенда для веб-интерфейса.
|
Большинство реализаций MapReduce позволяют выполнять произвольный код на кластере. Но для OLAP задач лучше подходит декларативный язык запросов, который позволяет быстро проводить исследования. Для примера, для Hadoop существует Hive и Pig. Также смотрите Cloudera Impala, Shark (устаревший) для Spark, а также Spark SQL, Presto, Apache Drill. Впрочем, производительность при выполнении таких задач является сильно неоптимальной по сравнению со специализированными системами, а сравнительно высокая latency не позволяет использовать эти системы в качестве бэкенда для веб-интерфейса.
|
||||||
|
|
||||||
## Что делать, если у меня проблема с кодировками при использовании Oracle через ODBC? {#oracle-odbc-encodings}
|
|
||||||
|
|
||||||
Если вы используете Oracle через драйвер ODBC в качестве источника внешних словарей, необходимо задать правильное значение для переменной окружения `NLS_LANG` в `/etc/default/clickhouse`. Подробнее читайте в [Oracle NLS_LANG FAQ](https://www.oracle.com/technetwork/products/globalization/nls-lang-099431.html).
|
|
||||||
|
|
||||||
**Пример**
|
|
||||||
|
|
||||||
``` sql
|
|
||||||
NLS_LANG=RUSSIAN_RUSSIA.UTF8
|
|
||||||
```
|
|
||||||
|
|
||||||
## Как экспортировать данные из ClickHouse в файл? {#how-to-export-to-file}
|
|
||||||
|
|
||||||
### Секция INTO OUTFILE {#sektsiia-into-outfile}
|
|
||||||
|
|
||||||
Добавьте секцию [INTO OUTFILE](../sql-reference/statements/select/into-outfile.md#into-outfile-clause) к своему запросу.
|
|
||||||
|
|
||||||
Например:
|
|
||||||
|
|
||||||
``` sql
|
|
||||||
SELECT * FROM table INTO OUTFILE 'file'
|
|
||||||
```
|
|
||||||
|
|
||||||
По умолчанию, для выдачи данных ClickHouse использует формат [TabSeparated](../interfaces/formats.md#tabseparated). Чтобы выбрать [формат данных](../interfaces/formats.md), используйте [секцию FORMAT](../sql-reference/statements/select/format.md#format-clause).
|
|
||||||
|
|
||||||
Например:
|
|
||||||
|
|
||||||
``` sql
|
|
||||||
SELECT * FROM table INTO OUTFILE 'file' FORMAT CSV
|
|
||||||
```
|
|
||||||
|
|
||||||
### Таблица с движком File {#tablitsa-s-dvizhkom-file}
|
|
||||||
|
|
||||||
Смотрите [File](../engines/table-engines/special/file.md).
|
|
||||||
|
|
||||||
### Перенаправление в командой строке {#perenapravlenie-v-komandoi-stroke}
|
|
||||||
|
|
||||||
``` sql
|
|
||||||
$ clickhouse-client --query "SELECT * from table" --format FormatName > result.txt
|
|
||||||
```
|
|
||||||
|
|
||||||
Смотрите [clickhouse-client](../interfaces/cli.md).
|
|
||||||
|
|
||||||
[Оригинальная статья](https://clickhouse.tech/docs/en/faq/general/) <!--hide-->
|
|
@ -4,3 +4,14 @@ toc_hidden: true
|
|||||||
toc_priority: 76
|
toc_priority: 76
|
||||||
---
|
---
|
||||||
|
|
||||||
|
# Содержание F.A.Q. {#soderzhanie}
|
||||||
|
|
||||||
|
В этом разделе документации собрали вопросы о ClickHouse, которые задают чаще всего.
|
||||||
|
|
||||||
|
Категории:
|
||||||
|
|
||||||
|
- **[Общие вопросы](../faq/general/index.md)**
|
||||||
|
- **[Применение](../faq/use-cases/index.md)**
|
||||||
|
- **[Операции](../faq/operations/index.md)**
|
||||||
|
- **[Интеграция](../faq/integration/index.md)**
|
||||||
|
|
||||||
|
37
docs/ru/faq/integration/file-export.md
Normal file
37
docs/ru/faq/integration/file-export.md
Normal file
@ -0,0 +1,37 @@
|
|||||||
|
---
|
||||||
|
title: How do I export data from ClickHouse to a file?
|
||||||
|
toc_hidden: true
|
||||||
|
toc_priority: 10
|
||||||
|
---
|
||||||
|
|
||||||
|
## Как экспортировать данные из ClickHouse в файл? {#how-to-export-to-file-rus}
|
||||||
|
|
||||||
|
### Секция INTO OUTFILE {#sektsiia-into-outfile-rus}
|
||||||
|
|
||||||
|
Добавьте секцию [INTO OUTFILE](../../sql-reference/statements/select/into-outfile.md#into-outfile-clause) к своему запросу.
|
||||||
|
|
||||||
|
Например:
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
SELECT * FROM table INTO OUTFILE 'file'
|
||||||
|
```
|
||||||
|
|
||||||
|
По умолчанию, для выдачи данных ClickHouse использует формат [TabSeparated](../../interfaces/formats.md#tabseparated). Чтобы выбрать [формат данных](../../interfaces/formats.md), используйте секцию [FORMAT](../../sql-reference/statements/select/format.md#format-clause).
|
||||||
|
|
||||||
|
Например:
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
SELECT * FROM table INTO OUTFILE 'file' FORMAT CSV
|
||||||
|
```
|
||||||
|
|
||||||
|
## Таблица с движком File {#using-a-file-engine-table}
|
||||||
|
|
||||||
|
Смотрите [File](../../engines/table-engines/special/file.md).
|
||||||
|
|
||||||
|
## Перенаправление в командой строке {#using-command-line-redirection}
|
||||||
|
|
||||||
|
``` bash
|
||||||
|
$ clickhouse-client --query "SELECT * from table" --format FormatName > result.txt
|
||||||
|
```
|
||||||
|
|
||||||
|
Смотрите [clickhouse-client](../../interfaces/cli.md).
|
19
docs/ru/faq/integration/index.md
Normal file
19
docs/ru/faq/integration/index.md
Normal file
@ -0,0 +1,19 @@
|
|||||||
|
---
|
||||||
|
title: Questions about integrating ClickHouse and other systems
|
||||||
|
toc_hidden_folder: true
|
||||||
|
toc_priority: 4
|
||||||
|
toc_title: Интеграция
|
||||||
|
---
|
||||||
|
|
||||||
|
# Вопросы об интеграции ClickHouse с другими системами {#question-about-integrating-clickhouse-and-other-systems-rus}
|
||||||
|
|
||||||
|
Вопросы:
|
||||||
|
|
||||||
|
- [Как экспортировать данные из ClickHouse в файл?](file-export.md)
|
||||||
|
- Как импортировать JSON в ClickHouse?
|
||||||
|
- [Что делать, если у меня проблема с кодировками при использовании Oracle через ODBC?](oracle-odbc.md)
|
||||||
|
|
||||||
|
!!! info "Если вы не нашли то, что искали"
|
||||||
|
Загляните в другие подразделы F.A.Q. или поищите в остальных разделах документации, ориентируйтесь по оглавлению слева.
|
||||||
|
|
||||||
|
{## [Original article](https://clickhouse.tech/docs/ru/faq/integration/) ##}
|
15
docs/ru/faq/integration/oracle-odbc.md
Normal file
15
docs/ru/faq/integration/oracle-odbc.md
Normal file
@ -0,0 +1,15 @@
|
|||||||
|
---
|
||||||
|
title: What if I have a problem with encodings when using Oracle via ODBC?
|
||||||
|
toc_hidden: true
|
||||||
|
toc_priority: 20
|
||||||
|
---
|
||||||
|
|
||||||
|
## Что делать, если у меня проблема с кодировками при использовании Oracle через ODBC? {#oracle-odbc-encodings-rus}
|
||||||
|
|
||||||
|
Если вы используете Oracle через драйвер ODBC в качестве источника внешних словарей, необходимо задать правильное значение для переменной окружения `NLS_LANG` в `/etc/default/clickhouse`. Подробнее читайте в [Oracle NLS_LANG FAQ](https://www.oracle.com/technetwork/products/globalization/nls-lang-099431.html).
|
||||||
|
|
||||||
|
**Пример**
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
NLS_LANG=RUSSIAN_RUSSIA.UTF8
|
||||||
|
```
|
18
docs/ru/faq/operations/index.md
Normal file
18
docs/ru/faq/operations/index.md
Normal file
@ -0,0 +1,18 @@
|
|||||||
|
---
|
||||||
|
title: Question about operating ClickHouse servers and clusters
|
||||||
|
toc_hidden_folder: true
|
||||||
|
toc_priority: 3
|
||||||
|
toc_title: Операции
|
||||||
|
---
|
||||||
|
|
||||||
|
# Вопросы о производительности серверов и кластеров ClickHouse {#voprosy-ob-operating-clickhouse-servers-and-clusters}
|
||||||
|
|
||||||
|
Вопросы:
|
||||||
|
|
||||||
|
- Which ClickHouse version to use in production?
|
||||||
|
- Is it possible to delete old records from a ClickHouse table?
|
||||||
|
|
||||||
|
!!! info "Don’t see what you were looking for?"
|
||||||
|
Check out [other F.A.Q. categories](../../faq/index.md) or browse around main documentation articles found in the left sidebar.
|
||||||
|
|
||||||
|
{## [Original article](https://clickhouse.tech/docs/en/faq/production/) ##}
|
14
docs/ru/faq/use-cases/index.md
Normal file
14
docs/ru/faq/use-cases/index.md
Normal file
@ -0,0 +1,14 @@
|
|||||||
|
---
|
||||||
|
title: Questions about ClickHouse use cases
|
||||||
|
toc_hidden_folder: true
|
||||||
|
toc_priority: 2
|
||||||
|
toc_title: Применение
|
||||||
|
---
|
||||||
|
|
||||||
|
# Вопросы о применении ClickHouse {#voprosy-o-primenenii}
|
||||||
|
|
||||||
|
Вопросы:
|
||||||
|
|
||||||
|
- Can I use ClickHouse as a time-series database?
|
||||||
|
- Can I use ClickHouse as a key-value storage?
|
||||||
|
|
@ -63,7 +63,7 @@ $ cat file.csv | clickhouse-client --database=test --query="INSERT INTO test FOR
|
|||||||
|
|
||||||
По умолчанию, в качестве формата, используется формат PrettyCompact (красивые таблички). Вы можете изменить формат с помощью секции FORMAT запроса, или с помощью указания `\G` на конце запроса, с помощью аргумента командной строки `--format` или `--vertical`, или с помощью конфигурационного файла клиента.
|
По умолчанию, в качестве формата, используется формат PrettyCompact (красивые таблички). Вы можете изменить формат с помощью секции FORMAT запроса, или с помощью указания `\G` на конце запроса, с помощью аргумента командной строки `--format` или `--vertical`, или с помощью конфигурационного файла клиента.
|
||||||
|
|
||||||
Чтобы выйти из клиента, нажмите Ctrl+D (или Ctrl+C), или наберите вместо запроса одно из: «exit», «quit», «logout», «учше», «йгше», «дщпщге», «exit;», «quit;», «logout;», «учшеж», «йгшеж», «дщпщгеж», «q», «й», «q», «Q», «:q», «й», «Й», «Жй»
|
Чтобы выйти из клиента, нажмите Ctrl+D, или наберите вместо запроса одно из: «exit», «quit», «logout», «учше», «йгше», «дщпщге», «exit;», «quit;», «logout;», «учшеж», «йгшеж», «дщпщгеж», «q», «й», «q», «Q», «:q», «й», «Й», «Жй»
|
||||||
|
|
||||||
При выполнении запроса, клиент показывает:
|
При выполнении запроса, клиент показывает:
|
||||||
|
|
||||||
|
81
docs/ru/operations/system-tables/replication_queue.md
Normal file
81
docs/ru/operations/system-tables/replication_queue.md
Normal file
@ -0,0 +1,81 @@
|
|||||||
|
# system.replication_queue {#system_tables-replication_queue}
|
||||||
|
|
||||||
|
Содержит информацию о задачах из очередей репликации, хранящихся в ZooKeeper, для таблиц семейства `ReplicatedMergeTree`.
|
||||||
|
|
||||||
|
Столбцы:
|
||||||
|
|
||||||
|
- `database` ([String](../../sql-reference/data-types/string.md)) — имя базы данных.
|
||||||
|
|
||||||
|
- `table` ([String](../../sql-reference/data-types/string.md)) — имя таблицы.
|
||||||
|
|
||||||
|
- `replica_name` ([String](../../sql-reference/data-types/string.md)) — имя реплики в ZooKeeper. Разные реплики одной и той же таблицы имеют различные имена.
|
||||||
|
|
||||||
|
- `position` ([UInt32](../../sql-reference/data-types/int-uint.md)) — позиция задачи в очереди.
|
||||||
|
|
||||||
|
- `node_name` ([String](../../sql-reference/data-types/string.md)) — имя узла в ZooKeeper.
|
||||||
|
|
||||||
|
- `type` ([String](../../sql-reference/data-types/string.md)) — тип задачи в очереди: `GET_PARTS`, `MERGE_PARTS`, `DETACH_PARTS`, `DROP_PARTS` или `MUTATE_PARTS`.
|
||||||
|
|
||||||
|
- `create_time` ([Datetime](../../sql-reference/data-types/datetime.md)) — дата и время отправки задачи на выполнение.
|
||||||
|
|
||||||
|
- `required_quorum` ([UInt32](../../sql-reference/data-types/int-uint.md)) — количество реплик, ожидающих завершения задачи, с подтверждением о завершении. Этот столбец актуален только для задачи `GET_PARTS`.
|
||||||
|
|
||||||
|
- `source_replica` ([String](../../sql-reference/data-types/string.md)) — имя исходной реплики.
|
||||||
|
|
||||||
|
- `new_part_name` ([String](../../sql-reference/data-types/string.md)) — имя нового куска.
|
||||||
|
|
||||||
|
- `parts_to_merge` ([Array](../../sql-reference/data-types/array.md) ([String](../../sql-reference/data-types/string.md))) — имена кусков, которые требуется смержить или обновить.
|
||||||
|
|
||||||
|
- `is_detach` ([UInt8](../../sql-reference/data-types/int-uint.md)) — флаг, указывающий на присутствие в очереди задачи `DETACH_PARTS`.
|
||||||
|
|
||||||
|
- `is_currently_executing` ([UInt8](../../sql-reference/data-types/int-uint.md)) — флаг, указывающий на выполнение конкретной задачи на данный момент.
|
||||||
|
|
||||||
|
- `num_tries` ([UInt32](../../sql-reference/data-types/int-uint.md)) — количество неудачных попыток выполнить задачу.
|
||||||
|
|
||||||
|
- `last_exception` ([String](../../sql-reference/data-types/string.md)) — текст сообщения о последней возникшей ошибке, если таковые имеются.
|
||||||
|
|
||||||
|
- `last_attempt_time` ([Datetime](../../sql-reference/data-types/datetime.md)) — дата и время последней попытки выполнить задачу.
|
||||||
|
|
||||||
|
- `num_postponed` ([UInt32](../../sql-reference/data-types/int-uint.md)) — количество отложенных задач.
|
||||||
|
|
||||||
|
- `postpone_reason` ([String](../../sql-reference/data-types/string.md)) — причина, по которой была отложена задача.
|
||||||
|
|
||||||
|
- `last_postpone_time` ([Datetime](../../sql-reference/data-types/datetime.md)) — дата и время, когда была отложена задача в последний раз.
|
||||||
|
|
||||||
|
- `merge_type` ([String](../../sql-reference/data-types/string.md)) — тип текущего слияния. Пусто, если это мутация.
|
||||||
|
|
||||||
|
**Пример**
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
SELECT * FROM system.replication_queue LIMIT 1 FORMAT Vertical;
|
||||||
|
```
|
||||||
|
|
||||||
|
``` text
|
||||||
|
Row 1:
|
||||||
|
──────
|
||||||
|
database: merge
|
||||||
|
table: visits_v2
|
||||||
|
replica_name: mtgiga001-1t.metrika.yandex.net
|
||||||
|
position: 15
|
||||||
|
node_name: queue-0009325559
|
||||||
|
type: MERGE_PARTS
|
||||||
|
create_time: 2020-12-07 14:04:21
|
||||||
|
required_quorum: 0
|
||||||
|
source_replica: mtgiga001-1t.metrika.yandex.net
|
||||||
|
new_part_name: 20201130_121373_121384_2
|
||||||
|
parts_to_merge: ['20201130_121373_121378_1','20201130_121379_121379_0','20201130_121380_121380_0','20201130_121381_121381_0','20201130_121382_121382_0','20201130_121383_121383_0','20201130_121384_121384_0']
|
||||||
|
is_detach: 0
|
||||||
|
is_currently_executing: 0
|
||||||
|
num_tries: 36
|
||||||
|
last_exception: Code: 226, e.displayText() = DB::Exception: Marks file '/opt/clickhouse/data/merge/visits_v2/tmp_fetch_20201130_121373_121384_2/CounterID.mrk' doesn't exist (version 20.8.7.15 (official build))
|
||||||
|
last_attempt_time: 2020-12-08 17:35:54
|
||||||
|
num_postponed: 0
|
||||||
|
postpone_reason:
|
||||||
|
last_postpone_time: 1970-01-01 03:00:00
|
||||||
|
```
|
||||||
|
|
||||||
|
**Смотрите также**
|
||||||
|
|
||||||
|
- [Управление таблицами ReplicatedMergeTree](../../sql-reference/statements/system.md/#query-language-system-replicated)
|
||||||
|
|
||||||
|
[Оригинальная статья](https://clickhouse.tech/docs/ru/operations/system_tables/replication_queue) <!--hide-->
|
@ -199,7 +199,7 @@ SOURCE(ODBC(
|
|||||||
|
|
||||||
ClickHouse получает от ODBC-драйвера информацию о квотировании и квотирует настройки в запросах к драйверу, поэтому имя таблицы нужно указывать в соответствии с регистром имени таблицы в базе данных.
|
ClickHouse получает от ODBC-драйвера информацию о квотировании и квотирует настройки в запросах к драйверу, поэтому имя таблицы нужно указывать в соответствии с регистром имени таблицы в базе данных.
|
||||||
|
|
||||||
Если у вас есть проблемы с кодировками при использовании Oracle, ознакомьтесь с соответствующим разделом [FAQ](../../../faq/general.md#oracle-odbc-encodings).
|
Если у вас есть проблемы с кодировками при использовании Oracle, ознакомьтесь с соответствующим разделом [FAQ](../../../faq/integration/oracle-odbc.md).
|
||||||
|
|
||||||
### Выявленная уязвимость в функционировании ODBC словарей {#vyiavlennaia-uiazvimost-v-funktsionirovanii-odbc-slovarei}
|
### Выявленная уязвимость в функционировании ODBC словарей {#vyiavlennaia-uiazvimost-v-funktsionirovanii-odbc-slovarei}
|
||||||
|
|
||||||
|
@ -593,6 +593,18 @@ SELECT dateDiff('hour', toDateTime('2018-01-01 22:00:00'), toDateTime('2018-01-0
|
|||||||
Например, `timeSlots(toDateTime('2012-01-01 12:20:00'), toUInt32(600)) = [toDateTime('2012-01-01 12:00:00'), toDateTime('2012-01-01 12:30:00')]`.
|
Например, `timeSlots(toDateTime('2012-01-01 12:20:00'), toUInt32(600)) = [toDateTime('2012-01-01 12:00:00'), toDateTime('2012-01-01 12:30:00')]`.
|
||||||
Это нужно для поиска хитов, входящих в соответствующий визит.
|
Это нужно для поиска хитов, входящих в соответствующий визит.
|
||||||
|
|
||||||
|
## toYYYYMM
|
||||||
|
|
||||||
|
Переводит дату или дату со временем в число типа UInt32, содержащее номер года и месяца (YYYY * 100 + MM).
|
||||||
|
|
||||||
|
## toYYYYMMDD
|
||||||
|
|
||||||
|
Переводит дату или дату со временем в число типа UInt32, содержащее номер года, месяца и дня (YYYY * 10000 + MM * 100 + DD).
|
||||||
|
|
||||||
|
## toYYYYMMDDhhmmss
|
||||||
|
|
||||||
|
Переводит дату или дату со временем в число типа UInt64 содержащее номер года, месяца, дня и время (YYYY * 10000000000 + MM * 100000000 + DD * 1000000 + hh * 10000 + mm * 100 + ss).
|
||||||
|
|
||||||
## formatDateTime {#formatdatetime}
|
## formatDateTime {#formatdatetime}
|
||||||
|
|
||||||
Функция преобразует дату-и-время в строку по заданному шаблону. Важно: шаблон — константное выражение, поэтому использовать разные шаблоны в одной колонке не получится.
|
Функция преобразует дату-и-время в строку по заданному шаблону. Важно: шаблон — константное выражение, поэтому использовать разные шаблоны в одной колонке не получится.
|
||||||
|
@ -59,7 +59,7 @@ Komut satırı dayanmaktadır ‘replxx’ (benzer ‘readline’). Başka bir d
|
|||||||
|
|
||||||
Varsayılan olarak, kullanılan biçim PrettyCompact. Sorgunun biçim yan tümcesinde veya belirterek biçimi değiştirebilirsiniz `\G` sorgunun sonunda, `--format` veya `--vertical` komut satırında veya istemci yapılandırma dosyasını kullanarak bağımsız değişken.
|
Varsayılan olarak, kullanılan biçim PrettyCompact. Sorgunun biçim yan tümcesinde veya belirterek biçimi değiştirebilirsiniz `\G` sorgunun sonunda, `--format` veya `--vertical` komut satırında veya istemci yapılandırma dosyasını kullanarak bağımsız değişken.
|
||||||
|
|
||||||
İstemciden çıkmak için Ctrl+D (veya Ctrl+C) tuşlarına basın veya bir sorgu yerine aşağıdakilerden birini girin: “exit”, “quit”, “logout”, “exit;”, “quit;”, “logout;”, “q”, “Q”, “:q”
|
İstemciden çıkmak için Ctrl+D tuşlarına basın veya bir sorgu yerine aşağıdakilerden birini girin: “exit”, “quit”, “logout”, “exit;”, “quit;”, “logout;”, “q”, “Q”, “:q”
|
||||||
|
|
||||||
Bir sorguyu işlerken, istemci şunları gösterir:
|
Bir sorguyu işlerken, istemci şunları gösterir:
|
||||||
|
|
||||||
|
@ -9,19 +9,21 @@ toc_title: "\u5176\u4ED6"
|
|||||||
|
|
||||||
## ATTACH {#attach}
|
## ATTACH {#attach}
|
||||||
|
|
||||||
这个查询是完全一样的 `CREATE`,但是
|
与`CREATE`类似,但有所区别
|
||||||
|
|
||||||
- 而不是这个词 `CREATE` 它使用这个词 `ATTACH`.
|
- 使用关键词 `ATTACH`
|
||||||
- 查询不会在磁盘上创建数据,但假定数据已经在适当的位置,只是将有关表的信息添加到服务器。
|
- 查询不会在磁盘上创建数据。但会假定数据已经在对应位置存放,同时将与表相关的信息添加到服务器。
|
||||||
执行附加查询后,服务器将知道表的存在。
|
执行 `ATTACH` 查询后,服务器将知道表已经被创建。
|
||||||
|
|
||||||
如果表之前已分离 (`DETACH`),意味着其结构是已知的,可以使用速记而不限定该结构。
|
如果表之前已分离 (`DETACH`),意味着其结构是已知的,可以使用简要的写法来建立表,即不需要定义表结构的Schema细节。
|
||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
ATTACH TABLE [IF NOT EXISTS] [db.]name [ON CLUSTER cluster]
|
ATTACH TABLE [IF NOT EXISTS] [db.]name [ON CLUSTER cluster]
|
||||||
```
|
```
|
||||||
|
|
||||||
启动服务器时使用此查询。 服务器将表元数据作为文件存储 `ATTACH` 查询,它只是在启动时运行(除了在服务器上显式创建的系统表)。
|
启动服务器时会自动触发此查询。
|
||||||
|
|
||||||
|
服务器将表的元数据作为文件存储 `ATTACH` 查询,它只是在启动时运行。有些表例外,如系统表,它们是在服务器上显式指定的。
|
||||||
|
|
||||||
## CHECK TABLE {#check-table}
|
## CHECK TABLE {#check-table}
|
||||||
|
|
||||||
@ -31,13 +33,12 @@ ATTACH TABLE [IF NOT EXISTS] [db.]name [ON CLUSTER cluster]
|
|||||||
CHECK TABLE [db.]name
|
CHECK TABLE [db.]name
|
||||||
```
|
```
|
||||||
|
|
||||||
该 `CHECK TABLE` 查询将实际文件大小与存储在服务器上的预期值进行比较。 如果文件大小与存储的值不匹配,则表示数据已损坏。 例如,这可能是由查询执行期间的系统崩溃引起的。
|
`CHECK TABLE` 查询会比较存储在服务器上的实际文件大小与预期值。 如果文件大小与存储的值不匹配,则表示数据已损坏。 例如,这可能是由查询执行期间的系统崩溃引起的。
|
||||||
|
|
||||||
查询响应包含 `result` 具有单行的列。 该行的值为
|
查询返回一行结果,列名为 `result`, 该行的值为 [布尔值](../../sql-reference/data-types/boolean.md) 类型:
|
||||||
[布尔值](../../sql-reference/data-types/boolean.md) 类型:
|
|
||||||
|
|
||||||
- 0-表中的数据已损坏。
|
- 0-表中的数据已损坏;
|
||||||
- 1-数据保持完整性。
|
- 1-数据保持完整性;
|
||||||
|
|
||||||
该 `CHECK TABLE` 查询支持下表引擎:
|
该 `CHECK TABLE` 查询支持下表引擎:
|
||||||
|
|
||||||
@ -56,13 +57,14 @@ CHECK TABLE [db.]name
|
|||||||
|
|
||||||
如果表已损坏,则可以将未损坏的数据复制到另一个表。 要做到这一点:
|
如果表已损坏,则可以将未损坏的数据复制到另一个表。 要做到这一点:
|
||||||
|
|
||||||
1. 创建一个与损坏的表结构相同的新表。 要做到这一点,请执行查询 `CREATE TABLE <new_table_name> AS <damaged_table_name>`.
|
1. 创建一个与损坏的表结构相同的新表。 请执行查询 `CREATE TABLE <new_table_name> AS <damaged_table_name>`.
|
||||||
2. 将 [max_threads](../../operations/settings/settings.md#settings-max_threads) 值设置为1,以在单个线程中处理下一个查询。 要这样做,请运行查询 `SET max_threads = 1`.
|
2. 将 [max_threads](../../operations/settings/settings.md#settings-max_threads) 值设置为1,以在单个线程中处理下一个查询。 要这样做,请运行查询 `SET max_threads = 1`.
|
||||||
3. 执行查询 `INSERT INTO <new_table_name> SELECT * FROM <damaged_table_name>`. 此请求将未损坏的数据从损坏的表复制到另一个表。 只有损坏部分之前的数据才会被复制。
|
3. 执行查询 `INSERT INTO <new_table_name> SELECT * FROM <damaged_table_name>`. 此请求将未损坏的数据从损坏的表复制到另一个表。 只有损坏部分之前的数据才会被复制。
|
||||||
4. 重新启动 `clickhouse-client` 以重置 `max_threads` 值。
|
4. 重新启动 `clickhouse-client` 以重置 `max_threads` 值。
|
||||||
|
|
||||||
## DESCRIBE TABLE {#misc-describe-table}
|
## DESCRIBE TABLE {#misc-describe-table}
|
||||||
|
|
||||||
|
查看表的描述信息,返回各列的Schema,语法如下:
|
||||||
``` sql
|
``` sql
|
||||||
DESC|DESCRIBE TABLE [db.]table [INTO OUTFILE filename] [FORMAT format]
|
DESC|DESCRIBE TABLE [db.]table [INTO OUTFILE filename] [FORMAT format]
|
||||||
```
|
```
|
||||||
@ -73,24 +75,25 @@ DESC|DESCRIBE TABLE [db.]table [INTO OUTFILE filename] [FORMAT format]
|
|||||||
- `type`— 列的类型。
|
- `type`— 列的类型。
|
||||||
- `default_type` — [默认表达式](create.md#create-default-values) (`DEFAULT`, `MATERIALIZED` 或 `ALIAS`)中使用的子句。 如果没有指定默认表达式,则列包含一个空字符串。
|
- `default_type` — [默认表达式](create.md#create-default-values) (`DEFAULT`, `MATERIALIZED` 或 `ALIAS`)中使用的子句。 如果没有指定默认表达式,则列包含一个空字符串。
|
||||||
- `default_expression` — `DEFAULT` 子句中指定的值。
|
- `default_expression` — `DEFAULT` 子句中指定的值。
|
||||||
- `comment_expression` — 注释。
|
- `comment_expression` — 注释信息。
|
||||||
|
|
||||||
嵌套数据结构以 “expanded” 格式输出。 每列分别显示,列名后加点号。
|
嵌套数据结构以 “expanded” 格式输出。 每列分别显示,列名后加点号。
|
||||||
|
|
||||||
## DETACH {#detach}
|
## DETACH {#detach}
|
||||||
|
|
||||||
从服务器中删除有关 ‘name’ 表的信息。 服务器停止了解该表的存在。
|
从服务器中删除目标表信息(删除对象是表), 执行查询后,服务器视作该表已经不存在。
|
||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
DETACH TABLE [IF EXISTS] [db.]name [ON CLUSTER cluster]
|
DETACH TABLE [IF EXISTS] [db.]name [ON CLUSTER cluster]
|
||||||
```
|
```
|
||||||
|
|
||||||
这不会删除表的数据或元数据。 在下一次服务器启动时,服务器将读取元数据并再次查找该表。
|
这不会删除表的数据或元数据。 在下一次服务器启动时,服务器将读取元数据并再次查找该表。
|
||||||
同样,可以使用 `ATTACH` 查询重新连接一个 “detached” 的表(系统表除外,没有为它们存储元数据)。
|
也可以不停止服务器的情况下,使用前面介绍的 `ATTACH` 查询来重新关联该表(系统表除外,没有为它们存储元数据)。
|
||||||
|
|
||||||
## DROP {#drop}
|
## DROP {#drop}
|
||||||
|
|
||||||
删除已经存在的实体。如果指定 `IF EXISTS`, 则如果实体不存在,则不返回错误。
|
删除已经存在的实体。如果指定 `IF EXISTS`, 则如果实体不存在,则不返回错误。
|
||||||
|
建议使用时添加 `IF EXISTS` 修饰符。
|
||||||
|
|
||||||
## DROP DATABASE {#drop-database}
|
## DROP DATABASE {#drop-database}
|
||||||
|
|
||||||
@ -135,7 +138,7 @@ DROP USER [IF EXISTS] name [,...] [ON CLUSTER cluster_name]
|
|||||||
|
|
||||||
删除角色。
|
删除角色。
|
||||||
|
|
||||||
已删除的角色将从授予该角色的所有实体撤销。
|
同时该角色所拥有的权限也会被收回。
|
||||||
|
|
||||||
语法:
|
语法:
|
||||||
|
|
||||||
@ -199,6 +202,8 @@ EXISTS [TEMPORARY] [TABLE|DICTIONARY] [db.]name [INTO OUTFILE filename] [FORMAT
|
|||||||
|
|
||||||
## KILL QUERY {#kill-query-statement}
|
## KILL QUERY {#kill-query-statement}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
KILL QUERY [ON CLUSTER cluster]
|
KILL QUERY [ON CLUSTER cluster]
|
||||||
WHERE <where expression to SELECT FROM system.processes query>
|
WHERE <where expression to SELECT FROM system.processes query>
|
||||||
@ -219,16 +224,17 @@ KILL QUERY WHERE query_id='2-857d-4a57-9ee0-327da5d60a90'
|
|||||||
KILL QUERY WHERE user='username' SYNC
|
KILL QUERY WHERE user='username' SYNC
|
||||||
```
|
```
|
||||||
|
|
||||||
只读用户只能停止自己的查询。
|
只读用户只能停止自己提交的查询。
|
||||||
|
|
||||||
默认情况下,使用异步版本的查询 (`ASYNC`),不等待确认查询已停止。
|
默认情况下,使用异步版本的查询 (`ASYNC`),不需要等待确认查询已停止。
|
||||||
|
|
||||||
同步版本 (`SYNC`)等待所有查询停止,并在停止时显示有关每个进程的信息。
|
而相对的,终止同步版本 (`SYNC`)的查询会显示每步停止时间。
|
||||||
响应包含 `kill_status` 列,该列可以采用以下值:
|
|
||||||
|
返回信息包含 `kill_status` 列,该列可以采用以下值:
|
||||||
|
|
||||||
1. ‘finished’ – 查询已成功终止。
|
1. ‘finished’ – 查询已成功终止。
|
||||||
2. ‘waiting’ – 发送查询信号终止后,等待查询结束。
|
2. ‘waiting’ – 发送查询信号终止后,等待查询结束。
|
||||||
3. 其他值解释为什么查询不能停止。
|
3. 其他值,会解释为什么查询不能停止。
|
||||||
|
|
||||||
测试查询 (`TEST`)仅检查用户的权限,并显示要停止的查询列表。
|
测试查询 (`TEST`)仅检查用户的权限,并显示要停止的查询列表。
|
||||||
|
|
||||||
|
@ -14,7 +14,7 @@ toc_title: LIMIT
|
|||||||
|
|
||||||
## LIMIT … WITH TIES 修饰符 {#limit-with-ties}
|
## LIMIT … WITH TIES 修饰符 {#limit-with-ties}
|
||||||
|
|
||||||
如果为 `LIMIT n[,m]` 设置了 `WITH TIES` ,并且声明了 `ORDER BY expr_list`, you will get in result first `n` or `n,m` rows and all rows with same `ORDER BY` fields values equal to row at position `n` for `LIMIT n` and `m` for `LIMIT n,m`.
|
如果为 `LIMIT n[,m]` 设置了 `WITH TIES` ,并且声明了 `ORDER BY expr_list`, 除了得到无修饰符的结果(正常情况下的 `limit n`, 前n行数据), 还会返回与第`n`行具有相同排序字段的行(即如果第n+1行的字段与第n行 拥有相同的排序字段,同样返回该结果.
|
||||||
|
|
||||||
此修饰符可以与: [ORDER BY … WITH FILL modifier](../../../sql-reference/statements/select/order-by.md#orderby-with-fill) 组合使用.
|
此修饰符可以与: [ORDER BY … WITH FILL modifier](../../../sql-reference/statements/select/order-by.md#orderby-with-fill) 组合使用.
|
||||||
|
|
||||||
@ -38,7 +38,7 @@ SELECT * FROM (
|
|||||||
└───┘
|
└───┘
|
||||||
```
|
```
|
||||||
|
|
||||||
单子执行了 `WITH TIES` 修饰符后
|
添加 `WITH TIES` 修饰符后
|
||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
SELECT * FROM (
|
SELECT * FROM (
|
||||||
@ -59,4 +59,8 @@ SELECT * FROM (
|
|||||||
└───┘
|
└───┘
|
||||||
```
|
```
|
||||||
|
|
||||||
cause row number 6 have same value “2” for field `n` as row number 5
|
虽然指定了`LIMIT 5`, 但第6行的`n`字段值为2,与第5行相同,因此也作为满足条件的记录返回。
|
||||||
|
简而言之,该修饰符可理解为是否增加“并列行”的数据。
|
||||||
|
|
||||||
|
``` sql,
|
||||||
|
``` sql
|
||||||
|
@ -2346,7 +2346,7 @@ public:
|
|||||||
"Suggestion limit for how many databases, tables and columns to fetch.")
|
"Suggestion limit for how many databases, tables and columns to fetch.")
|
||||||
("multiline,m", "multiline")
|
("multiline,m", "multiline")
|
||||||
("multiquery,n", "multiquery")
|
("multiquery,n", "multiquery")
|
||||||
("queries-file,qf", po::value<std::string>(), "file path with queries to execute")
|
("queries-file", po::value<std::string>(), "file path with queries to execute")
|
||||||
("format,f", po::value<std::string>(), "default output format")
|
("format,f", po::value<std::string>(), "default output format")
|
||||||
("testmode,T", "enable test hints in comments")
|
("testmode,T", "enable test hints in comments")
|
||||||
("ignore-error", "do not stop processing in multiquery mode")
|
("ignore-error", "do not stop processing in multiquery mode")
|
||||||
|
@ -115,7 +115,7 @@ void ODBCColumnsInfoHandler::handleRequest(Poco::Net::HTTPServerRequest & reques
|
|||||||
std::string name = schema_name.empty() ? backQuoteIfNeed(table_name) : backQuoteIfNeed(schema_name) + "." + backQuoteIfNeed(table_name);
|
std::string name = schema_name.empty() ? backQuoteIfNeed(table_name) : backQuoteIfNeed(schema_name) + "." + backQuoteIfNeed(table_name);
|
||||||
WriteBufferFromOwnString buf;
|
WriteBufferFromOwnString buf;
|
||||||
std::string input = "SELECT * FROM " + name + " WHERE 1 = 0";
|
std::string input = "SELECT * FROM " + name + " WHERE 1 = 0";
|
||||||
ParserQueryWithOutput parser;
|
ParserQueryWithOutput parser(input.data() + input.size());
|
||||||
ASTPtr select = parseQuery(parser, input.data(), input.data() + input.size(), "", context_settings.max_query_size, context_settings.max_parser_depth);
|
ASTPtr select = parseQuery(parser, input.data(), input.data() + input.size(), "", context_settings.max_query_size, context_settings.max_parser_depth);
|
||||||
|
|
||||||
IAST::FormatSettings settings(buf, true);
|
IAST::FormatSettings settings(buf, true);
|
||||||
|
@ -139,6 +139,28 @@ void setupTmpPath(Poco::Logger * log, const std::string & path)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int waitServersToFinish(std::vector<DB::ProtocolServerAdapter> & servers, size_t seconds_to_wait)
|
||||||
|
{
|
||||||
|
const int sleep_max_ms = 1000 * seconds_to_wait;
|
||||||
|
const int sleep_one_ms = 100;
|
||||||
|
int sleep_current_ms = 0;
|
||||||
|
int current_connections = 0;
|
||||||
|
while (sleep_current_ms < sleep_max_ms)
|
||||||
|
{
|
||||||
|
current_connections = 0;
|
||||||
|
for (auto & server : servers)
|
||||||
|
{
|
||||||
|
server.stop();
|
||||||
|
current_connections += server.currentConnections();
|
||||||
|
}
|
||||||
|
if (!current_connections)
|
||||||
|
break;
|
||||||
|
sleep_current_ms += sleep_one_ms;
|
||||||
|
std::this_thread::sleep_for(std::chrono::milliseconds(sleep_one_ms));
|
||||||
|
}
|
||||||
|
return current_connections;
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
@ -366,7 +388,7 @@ int Server::main(const std::vector<std::string> & /*args*/)
|
|||||||
LOG_WARNING(log, "Server was built in debug mode. It will work slowly.");
|
LOG_WARNING(log, "Server was built in debug mode. It will work slowly.");
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if defined(ADDRESS_SANITIZER) || defined(THREAD_SANITIZER) || defined(MEMORY_SANITIZER)
|
#if defined(SANITIZER)
|
||||||
LOG_WARNING(log, "Server was built with sanitizer. It will work slowly.");
|
LOG_WARNING(log, "Server was built with sanitizer. It will work slowly.");
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
@ -794,8 +816,29 @@ int Server::main(const std::vector<std::string> & /*args*/)
|
|||||||
|
|
||||||
LOG_DEBUG(log, "Shut down storages.");
|
LOG_DEBUG(log, "Shut down storages.");
|
||||||
|
|
||||||
for (auto & server : servers_to_start_before_tables)
|
if (!servers_to_start_before_tables.empty())
|
||||||
server.stop();
|
{
|
||||||
|
LOG_DEBUG(log, "Waiting for current connections to servers for tables to finish.");
|
||||||
|
int current_connections = 0;
|
||||||
|
for (auto & server : servers_to_start_before_tables)
|
||||||
|
{
|
||||||
|
server.stop();
|
||||||
|
current_connections += server.currentConnections();
|
||||||
|
}
|
||||||
|
|
||||||
|
if (current_connections)
|
||||||
|
LOG_INFO(log, "Closed all listening sockets. Waiting for {} outstanding connections.", current_connections);
|
||||||
|
else
|
||||||
|
LOG_INFO(log, "Closed all listening sockets.");
|
||||||
|
|
||||||
|
if (current_connections > 0)
|
||||||
|
current_connections = waitServersToFinish(servers_to_start_before_tables, config().getInt("shutdown_wait_unfinished", 5));
|
||||||
|
|
||||||
|
if (current_connections)
|
||||||
|
LOG_INFO(log, "Closed connections to servers for tables. But {} remain. Probably some tables of other users cannot finish their connections after context shutdown.", current_connections);
|
||||||
|
else
|
||||||
|
LOG_INFO(log, "Closed connections to servers for tables.");
|
||||||
|
}
|
||||||
|
|
||||||
/** Explicitly destroy Context. It is more convenient than in destructor of Server, because logger is still available.
|
/** Explicitly destroy Context. It is more convenient than in destructor of Server, because logger is still available.
|
||||||
* At this moment, no one could own shared part of Context.
|
* At this moment, no one could own shared part of Context.
|
||||||
@ -1167,24 +1210,7 @@ int Server::main(const std::vector<std::string> & /*args*/)
|
|||||||
global_context->getProcessList().killAllQueries();
|
global_context->getProcessList().killAllQueries();
|
||||||
|
|
||||||
if (current_connections)
|
if (current_connections)
|
||||||
{
|
current_connections = waitServersToFinish(servers, config().getInt("shutdown_wait_unfinished", 5));
|
||||||
const int sleep_max_ms = 1000 * config().getInt("shutdown_wait_unfinished", 5);
|
|
||||||
const int sleep_one_ms = 100;
|
|
||||||
int sleep_current_ms = 0;
|
|
||||||
while (sleep_current_ms < sleep_max_ms)
|
|
||||||
{
|
|
||||||
current_connections = 0;
|
|
||||||
for (auto & server : servers)
|
|
||||||
{
|
|
||||||
server.stop();
|
|
||||||
current_connections += server.currentConnections();
|
|
||||||
}
|
|
||||||
if (!current_connections)
|
|
||||||
break;
|
|
||||||
sleep_current_ms += sleep_one_ms;
|
|
||||||
std::this_thread::sleep_for(std::chrono::milliseconds(sleep_one_ms));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (current_connections)
|
if (current_connections)
|
||||||
LOG_INFO(log, "Closed connections. But {} remain."
|
LOG_INFO(log, "Closed connections. But {} remain."
|
||||||
|
@ -392,9 +392,12 @@ bool ContextAccess::checkAccessImpl2(const AccessFlags & flags, const Args &...
|
|||||||
if (!getUser())
|
if (!getUser())
|
||||||
return access_denied("User has been dropped", ErrorCodes::UNKNOWN_USER);
|
return access_denied("User has been dropped", ErrorCodes::UNKNOWN_USER);
|
||||||
|
|
||||||
/// If the current user was allowed to create a temporary table
|
/// Access to temporary tables is controlled in an unusual way, not like normal tables.
|
||||||
/// then he is allowed to do with it whatever he wants.
|
/// Creating of temporary tables is controlled by AccessType::CREATE_TEMPORARY_TABLES grant,
|
||||||
if ((sizeof...(args) >= 2) && (getDatabase(args...) == DatabaseCatalog::TEMPORARY_DATABASE))
|
/// and other grants are considered as always given.
|
||||||
|
/// The DatabaseCatalog class won't resolve StorageID for temporary tables
|
||||||
|
/// which shouldn't be accessed.
|
||||||
|
if (getDatabase(args...) == DatabaseCatalog::TEMPORARY_DATABASE)
|
||||||
return access_granted();
|
return access_granted();
|
||||||
|
|
||||||
auto acs = getAccessRightsWithImplicit();
|
auto acs = getAccessRightsWithImplicit();
|
||||||
|
@ -156,6 +156,25 @@ bool SettingsConstraints::checkImpl(const Settings & current_settings, SettingCh
|
|||||||
{
|
{
|
||||||
const String & setting_name = change.name;
|
const String & setting_name = change.name;
|
||||||
|
|
||||||
|
if (setting_name == "profile")
|
||||||
|
{
|
||||||
|
/// TODO Check profile settings in Context::setProfile(...), not here. It will be backward incompatible.
|
||||||
|
const String & profile_name = change.value.safeGet<String>();
|
||||||
|
const auto & profile_settings_changes = manager->getProfileSettings(profile_name);
|
||||||
|
try
|
||||||
|
{
|
||||||
|
/// NOTE We cannot use CLAMP_ON_VIOLATION here, because we cannot modify elements of profile_settings_changes
|
||||||
|
for (auto change_copy : *profile_settings_changes)
|
||||||
|
checkImpl(current_settings, change_copy, THROW_ON_VIOLATION);
|
||||||
|
}
|
||||||
|
catch (Exception & e)
|
||||||
|
{
|
||||||
|
e.addMessage(", while trying to set settings profile {}", profile_name);
|
||||||
|
throw;
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
bool cannot_cast;
|
bool cannot_cast;
|
||||||
auto cast_value = [&](const Field & x) -> Field
|
auto cast_value = [&](const Field & x) -> Field
|
||||||
{
|
{
|
||||||
|
@ -18,6 +18,7 @@
|
|||||||
#include <Poco/String.h>
|
#include <Poco/String.h>
|
||||||
#include "registerAggregateFunctions.h"
|
#include "registerAggregateFunctions.h"
|
||||||
|
|
||||||
|
#include <Functions/FunctionFactory.h>
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
{
|
{
|
||||||
@ -135,12 +136,17 @@ AggregateFunctionPtr AggregateFunctionFactory::getImpl(
|
|||||||
return combinator->transformAggregateFunction(nested_function, out_properties, argument_types, parameters);
|
return combinator->transformAggregateFunction(nested_function, out_properties, argument_types, parameters);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
String extra_info;
|
||||||
|
if (FunctionFactory::instance().hasNameOrAlias(name))
|
||||||
|
extra_info = ". There is an ordinary function with the same name, but aggregate function is expected here";
|
||||||
|
|
||||||
auto hints = this->getHints(name);
|
auto hints = this->getHints(name);
|
||||||
if (!hints.empty())
|
if (!hints.empty())
|
||||||
throw Exception(fmt::format("Unknown aggregate function {}. Maybe you meant: {}", name, toString(hints)),
|
throw Exception(ErrorCodes::UNKNOWN_AGGREGATE_FUNCTION,
|
||||||
ErrorCodes::UNKNOWN_AGGREGATE_FUNCTION);
|
"Unknown aggregate function {}{}. Maybe you meant: {}", name, extra_info, toString(hints));
|
||||||
else
|
else
|
||||||
throw Exception(fmt::format("Unknown aggregate function {}", name), ErrorCodes::UNKNOWN_AGGREGATE_FUNCTION);
|
throw Exception(ErrorCodes::UNKNOWN_AGGREGATE_FUNCTION, "Unknown aggregate function {}{}", name, extra_info);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
32
src/AggregateFunctions/AggregateFunctionSimpleState.cpp
Normal file
32
src/AggregateFunctions/AggregateFunctionSimpleState.cpp
Normal file
@ -0,0 +1,32 @@
|
|||||||
|
#include <AggregateFunctions/AggregateFunctionCombinatorFactory.h>
|
||||||
|
#include <AggregateFunctions/AggregateFunctionSimpleState.h>
|
||||||
|
|
||||||
|
namespace DB
|
||||||
|
{
|
||||||
|
namespace
|
||||||
|
{
|
||||||
|
class AggregateFunctionCombinatorSimpleState final : public IAggregateFunctionCombinator
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
String getName() const override { return "SimpleState"; }
|
||||||
|
|
||||||
|
DataTypes transformArguments(const DataTypes & arguments) const override { return arguments; }
|
||||||
|
|
||||||
|
AggregateFunctionPtr transformAggregateFunction(
|
||||||
|
const AggregateFunctionPtr & nested_function,
|
||||||
|
const AggregateFunctionProperties &,
|
||||||
|
const DataTypes & arguments,
|
||||||
|
const Array & params) const override
|
||||||
|
{
|
||||||
|
return std::make_shared<AggregateFunctionSimpleState>(nested_function, arguments, params);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
void registerAggregateFunctionCombinatorSimpleState(AggregateFunctionCombinatorFactory & factory)
|
||||||
|
{
|
||||||
|
factory.registerCombinator(std::make_shared<AggregateFunctionCombinatorSimpleState>());
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
77
src/AggregateFunctions/AggregateFunctionSimpleState.h
Normal file
77
src/AggregateFunctions/AggregateFunctionSimpleState.h
Normal file
@ -0,0 +1,77 @@
|
|||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include <AggregateFunctions/IAggregateFunction.h>
|
||||||
|
#include <DataTypes/DataTypeCustomSimpleAggregateFunction.h>
|
||||||
|
#include <DataTypes/DataTypeFactory.h>
|
||||||
|
|
||||||
|
namespace DB
|
||||||
|
{
|
||||||
|
|
||||||
|
/** Not an aggregate function, but an adapter of aggregate functions.
|
||||||
|
* Aggregate functions with the `SimpleState` suffix is almost identical to the corresponding ones,
|
||||||
|
* except the return type becomes DataTypeCustomSimpleAggregateFunction.
|
||||||
|
*/
|
||||||
|
class AggregateFunctionSimpleState final : public IAggregateFunctionHelper<AggregateFunctionSimpleState>
|
||||||
|
{
|
||||||
|
private:
|
||||||
|
AggregateFunctionPtr nested_func;
|
||||||
|
DataTypes arguments;
|
||||||
|
Array params;
|
||||||
|
|
||||||
|
public:
|
||||||
|
AggregateFunctionSimpleState(AggregateFunctionPtr nested_, const DataTypes & arguments_, const Array & params_)
|
||||||
|
: IAggregateFunctionHelper<AggregateFunctionSimpleState>(arguments_, params_)
|
||||||
|
, nested_func(nested_)
|
||||||
|
, arguments(arguments_)
|
||||||
|
, params(params_)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
|
String getName() const override { return nested_func->getName() + "SimpleState"; }
|
||||||
|
|
||||||
|
DataTypePtr getReturnType() const override
|
||||||
|
{
|
||||||
|
DataTypeCustomSimpleAggregateFunction::checkSupportedFunctions(nested_func);
|
||||||
|
// Need to make a clone because it'll be customized.
|
||||||
|
auto storage_type = DataTypeFactory::instance().get(nested_func->getReturnType()->getName());
|
||||||
|
DataTypeCustomNamePtr custom_name
|
||||||
|
= std::make_unique<DataTypeCustomSimpleAggregateFunction>(nested_func, DataTypes{nested_func->getReturnType()}, params);
|
||||||
|
storage_type->setCustomization(std::make_unique<DataTypeCustomDesc>(std::move(custom_name), nullptr));
|
||||||
|
return storage_type;
|
||||||
|
}
|
||||||
|
|
||||||
|
void create(AggregateDataPtr place) const override { nested_func->create(place); }
|
||||||
|
|
||||||
|
void destroy(AggregateDataPtr place) const noexcept override { nested_func->destroy(place); }
|
||||||
|
|
||||||
|
bool hasTrivialDestructor() const override { return nested_func->hasTrivialDestructor(); }
|
||||||
|
|
||||||
|
size_t sizeOfData() const override { return nested_func->sizeOfData(); }
|
||||||
|
|
||||||
|
size_t alignOfData() const override { return nested_func->alignOfData(); }
|
||||||
|
|
||||||
|
void add(AggregateDataPtr place, const IColumn ** columns, size_t row_num, Arena * arena) const override
|
||||||
|
{
|
||||||
|
nested_func->add(place, columns, row_num, arena);
|
||||||
|
}
|
||||||
|
|
||||||
|
void merge(AggregateDataPtr place, ConstAggregateDataPtr rhs, Arena * arena) const override { nested_func->merge(place, rhs, arena); }
|
||||||
|
|
||||||
|
void serialize(ConstAggregateDataPtr place, WriteBuffer & buf) const override { nested_func->serialize(place, buf); }
|
||||||
|
|
||||||
|
void deserialize(AggregateDataPtr place, ReadBuffer & buf, Arena * arena) const override
|
||||||
|
{
|
||||||
|
nested_func->deserialize(place, buf, arena);
|
||||||
|
}
|
||||||
|
|
||||||
|
void insertResultInto(AggregateDataPtr place, IColumn & to, Arena * arena) const override
|
||||||
|
{
|
||||||
|
nested_func->insertResultInto(place, to, arena);
|
||||||
|
}
|
||||||
|
|
||||||
|
bool allocatesMemoryInArena() const override { return nested_func->allocatesMemoryInArena(); }
|
||||||
|
|
||||||
|
AggregateFunctionPtr getNestedFunction() const { return nested_func; }
|
||||||
|
};
|
||||||
|
|
||||||
|
}
|
@ -47,6 +47,7 @@ class AggregateFunctionCombinatorFactory;
|
|||||||
void registerAggregateFunctionCombinatorIf(AggregateFunctionCombinatorFactory &);
|
void registerAggregateFunctionCombinatorIf(AggregateFunctionCombinatorFactory &);
|
||||||
void registerAggregateFunctionCombinatorArray(AggregateFunctionCombinatorFactory &);
|
void registerAggregateFunctionCombinatorArray(AggregateFunctionCombinatorFactory &);
|
||||||
void registerAggregateFunctionCombinatorForEach(AggregateFunctionCombinatorFactory &);
|
void registerAggregateFunctionCombinatorForEach(AggregateFunctionCombinatorFactory &);
|
||||||
|
void registerAggregateFunctionCombinatorSimpleState(AggregateFunctionCombinatorFactory &);
|
||||||
void registerAggregateFunctionCombinatorState(AggregateFunctionCombinatorFactory &);
|
void registerAggregateFunctionCombinatorState(AggregateFunctionCombinatorFactory &);
|
||||||
void registerAggregateFunctionCombinatorMerge(AggregateFunctionCombinatorFactory &);
|
void registerAggregateFunctionCombinatorMerge(AggregateFunctionCombinatorFactory &);
|
||||||
void registerAggregateFunctionCombinatorNull(AggregateFunctionCombinatorFactory &);
|
void registerAggregateFunctionCombinatorNull(AggregateFunctionCombinatorFactory &);
|
||||||
@ -104,6 +105,7 @@ void registerAggregateFunctions()
|
|||||||
registerAggregateFunctionCombinatorIf(factory);
|
registerAggregateFunctionCombinatorIf(factory);
|
||||||
registerAggregateFunctionCombinatorArray(factory);
|
registerAggregateFunctionCombinatorArray(factory);
|
||||||
registerAggregateFunctionCombinatorForEach(factory);
|
registerAggregateFunctionCombinatorForEach(factory);
|
||||||
|
registerAggregateFunctionCombinatorSimpleState(factory);
|
||||||
registerAggregateFunctionCombinatorState(factory);
|
registerAggregateFunctionCombinatorState(factory);
|
||||||
registerAggregateFunctionCombinatorMerge(factory);
|
registerAggregateFunctionCombinatorMerge(factory);
|
||||||
registerAggregateFunctionCombinatorNull(factory);
|
registerAggregateFunctionCombinatorNull(factory);
|
||||||
|
@ -41,6 +41,7 @@ SRCS(
|
|||||||
AggregateFunctionRetention.cpp
|
AggregateFunctionRetention.cpp
|
||||||
AggregateFunctionSequenceMatch.cpp
|
AggregateFunctionSequenceMatch.cpp
|
||||||
AggregateFunctionSimpleLinearRegression.cpp
|
AggregateFunctionSimpleLinearRegression.cpp
|
||||||
|
AggregateFunctionSimpleState.cpp
|
||||||
AggregateFunctionState.cpp
|
AggregateFunctionState.cpp
|
||||||
AggregateFunctionStatistics.cpp
|
AggregateFunctionStatistics.cpp
|
||||||
AggregateFunctionStatisticsSimple.cpp
|
AggregateFunctionStatisticsSimple.cpp
|
||||||
|
@ -88,6 +88,10 @@ if (USE_AWS_S3)
|
|||||||
add_headers_and_sources(dbms Disks/S3)
|
add_headers_and_sources(dbms Disks/S3)
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
|
if (USE_HDFS)
|
||||||
|
add_headers_and_sources(dbms Storages/HDFS)
|
||||||
|
endif()
|
||||||
|
|
||||||
list (APPEND clickhouse_common_io_sources ${CONFIG_BUILD})
|
list (APPEND clickhouse_common_io_sources ${CONFIG_BUILD})
|
||||||
list (APPEND clickhouse_common_io_headers ${CONFIG_VERSION} ${CONFIG_COMMON})
|
list (APPEND clickhouse_common_io_headers ${CONFIG_VERSION} ${CONFIG_COMMON})
|
||||||
|
|
||||||
@ -389,8 +393,8 @@ if (USE_GRPC)
|
|||||||
endif()
|
endif()
|
||||||
|
|
||||||
if (USE_HDFS)
|
if (USE_HDFS)
|
||||||
target_link_libraries (clickhouse_common_io PUBLIC ${HDFS3_LIBRARY})
|
dbms_target_link_libraries(PRIVATE ${HDFS3_LIBRARY})
|
||||||
target_include_directories (clickhouse_common_io SYSTEM BEFORE PUBLIC ${HDFS3_INCLUDE_DIR})
|
dbms_target_include_directories (SYSTEM BEFORE PUBLIC ${HDFS3_INCLUDE_DIR})
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
if (USE_AWS_S3)
|
if (USE_AWS_S3)
|
||||||
|
@ -5,8 +5,9 @@
|
|||||||
#include <Poco/Net/StreamSocket.h>
|
#include <Poco/Net/StreamSocket.h>
|
||||||
|
|
||||||
#include <Common/Throttler.h>
|
#include <Common/Throttler.h>
|
||||||
#include <Common/config.h>
|
#if !defined(ARCADIA_BUILD)
|
||||||
|
# include <Common/config.h>
|
||||||
|
#endif
|
||||||
#include <Core/Block.h>
|
#include <Core/Block.h>
|
||||||
#include <Core/Defines.h>
|
#include <Core/Defines.h>
|
||||||
#include <IO/Progress.h>
|
#include <IO/Progress.h>
|
||||||
|
241
src/Columns/ColumnMap.cpp
Normal file
241
src/Columns/ColumnMap.cpp
Normal file
@ -0,0 +1,241 @@
|
|||||||
|
#include <Columns/ColumnMap.h>
|
||||||
|
#include <Columns/IColumnImpl.h>
|
||||||
|
#include <DataStreams/ColumnGathererStream.h>
|
||||||
|
#include <IO/WriteBufferFromString.h>
|
||||||
|
#include <IO/Operators.h>
|
||||||
|
#include <ext/map.h>
|
||||||
|
#include <ext/range.h>
|
||||||
|
#include <Common/typeid_cast.h>
|
||||||
|
#include <Common/assert_cast.h>
|
||||||
|
#include <Common/WeakHash.h>
|
||||||
|
#include <Core/Field.h>
|
||||||
|
|
||||||
|
|
||||||
|
namespace DB
|
||||||
|
{
|
||||||
|
|
||||||
|
namespace ErrorCodes
|
||||||
|
{
|
||||||
|
extern const int ILLEGAL_COLUMN;
|
||||||
|
extern const int NOT_IMPLEMENTED;
|
||||||
|
extern const int LOGICAL_ERROR;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
std::string ColumnMap::getName() const
|
||||||
|
{
|
||||||
|
WriteBufferFromOwnString res;
|
||||||
|
const auto & nested_tuple = getNestedData();
|
||||||
|
res << "Map(" << nested_tuple.getColumn(0).getName()
|
||||||
|
<< ", " << nested_tuple.getColumn(1).getName() << ")";
|
||||||
|
|
||||||
|
return res.str();
|
||||||
|
}
|
||||||
|
|
||||||
|
ColumnMap::ColumnMap(MutableColumnPtr && nested_)
|
||||||
|
: nested(std::move(nested_))
|
||||||
|
{
|
||||||
|
const auto * column_array = typeid_cast<const ColumnArray *>(nested.get());
|
||||||
|
if (!column_array)
|
||||||
|
throw Exception(ErrorCodes::LOGICAL_ERROR, "ColumnMap can be created only from array of tuples");
|
||||||
|
|
||||||
|
const auto * column_tuple = typeid_cast<const ColumnTuple *>(column_array->getDataPtr().get());
|
||||||
|
if (!column_tuple)
|
||||||
|
throw Exception(ErrorCodes::LOGICAL_ERROR, "ColumnMap can be created only from array of tuples");
|
||||||
|
|
||||||
|
if (column_tuple->getColumns().size() != 2)
|
||||||
|
throw Exception(ErrorCodes::LOGICAL_ERROR, "ColumnMap should contain only 2 subcolumns: keys and values");
|
||||||
|
|
||||||
|
for (const auto & column : column_tuple->getColumns())
|
||||||
|
if (isColumnConst(*column))
|
||||||
|
throw Exception(ErrorCodes::ILLEGAL_COLUMN, "ColumnMap cannot have ColumnConst as its element");
|
||||||
|
}
|
||||||
|
|
||||||
|
MutableColumnPtr ColumnMap::cloneEmpty() const
|
||||||
|
{
|
||||||
|
return ColumnMap::create(nested->cloneEmpty());
|
||||||
|
}
|
||||||
|
|
||||||
|
MutableColumnPtr ColumnMap::cloneResized(size_t new_size) const
|
||||||
|
{
|
||||||
|
return ColumnMap::create(nested->cloneResized(new_size));
|
||||||
|
}
|
||||||
|
|
||||||
|
Field ColumnMap::operator[](size_t n) const
|
||||||
|
{
|
||||||
|
auto array = DB::get<Array>((*nested)[n]);
|
||||||
|
return Map(std::make_move_iterator(array.begin()), std::make_move_iterator(array.end()));
|
||||||
|
}
|
||||||
|
|
||||||
|
void ColumnMap::get(size_t n, Field & res) const
|
||||||
|
{
|
||||||
|
const auto & offsets = getNestedColumn().getOffsets();
|
||||||
|
size_t offset = offsets[n - 1];
|
||||||
|
size_t size = offsets[n] - offsets[n - 1];
|
||||||
|
|
||||||
|
res = Map(size);
|
||||||
|
auto & map = DB::get<Map &>(res);
|
||||||
|
|
||||||
|
for (size_t i = 0; i < size; ++i)
|
||||||
|
getNestedData().get(offset + i, map[i]);
|
||||||
|
}
|
||||||
|
|
||||||
|
StringRef ColumnMap::getDataAt(size_t) const
|
||||||
|
{
|
||||||
|
throw Exception("Method getDataAt is not supported for " + getName(), ErrorCodes::NOT_IMPLEMENTED);
|
||||||
|
}
|
||||||
|
|
||||||
|
void ColumnMap::insertData(const char *, size_t)
|
||||||
|
{
|
||||||
|
throw Exception("Method insertData is not supported for " + getName(), ErrorCodes::NOT_IMPLEMENTED);
|
||||||
|
}
|
||||||
|
|
||||||
|
void ColumnMap::insert(const Field & x)
|
||||||
|
{
|
||||||
|
const auto & map = DB::get<const Map &>(x);
|
||||||
|
nested->insert(Array(map.begin(), map.end()));
|
||||||
|
}
|
||||||
|
|
||||||
|
void ColumnMap::insertDefault()
|
||||||
|
{
|
||||||
|
nested->insertDefault();
|
||||||
|
}
|
||||||
|
void ColumnMap::popBack(size_t n)
|
||||||
|
{
|
||||||
|
nested->popBack(n);
|
||||||
|
}
|
||||||
|
|
||||||
|
StringRef ColumnMap::serializeValueIntoArena(size_t n, Arena & arena, char const *& begin) const
|
||||||
|
{
|
||||||
|
return nested->serializeValueIntoArena(n, arena, begin);
|
||||||
|
}
|
||||||
|
|
||||||
|
const char * ColumnMap::deserializeAndInsertFromArena(const char * pos)
|
||||||
|
{
|
||||||
|
return nested->deserializeAndInsertFromArena(pos);
|
||||||
|
}
|
||||||
|
|
||||||
|
void ColumnMap::updateHashWithValue(size_t n, SipHash & hash) const
|
||||||
|
{
|
||||||
|
nested->updateHashWithValue(n, hash);
|
||||||
|
}
|
||||||
|
|
||||||
|
void ColumnMap::updateWeakHash32(WeakHash32 & hash) const
|
||||||
|
{
|
||||||
|
nested->updateWeakHash32(hash);
|
||||||
|
}
|
||||||
|
|
||||||
|
void ColumnMap::updateHashFast(SipHash & hash) const
|
||||||
|
{
|
||||||
|
nested->updateHashFast(hash);
|
||||||
|
}
|
||||||
|
|
||||||
|
void ColumnMap::insertRangeFrom(const IColumn & src, size_t start, size_t length)
|
||||||
|
{
|
||||||
|
nested->insertRangeFrom(
|
||||||
|
assert_cast<const ColumnMap &>(src).getNestedColumn(),
|
||||||
|
start, length);
|
||||||
|
}
|
||||||
|
|
||||||
|
ColumnPtr ColumnMap::filter(const Filter & filt, ssize_t result_size_hint) const
|
||||||
|
{
|
||||||
|
auto filtered = nested->filter(filt, result_size_hint);
|
||||||
|
return ColumnMap::create(filtered);
|
||||||
|
}
|
||||||
|
|
||||||
|
ColumnPtr ColumnMap::permute(const Permutation & perm, size_t limit) const
|
||||||
|
{
|
||||||
|
auto permuted = nested->permute(perm, limit);
|
||||||
|
return ColumnMap::create(std::move(permuted));
|
||||||
|
}
|
||||||
|
|
||||||
|
ColumnPtr ColumnMap::index(const IColumn & indexes, size_t limit) const
|
||||||
|
{
|
||||||
|
auto res = nested->index(indexes, limit);
|
||||||
|
return ColumnMap::create(std::move(res));
|
||||||
|
}
|
||||||
|
|
||||||
|
ColumnPtr ColumnMap::replicate(const Offsets & offsets) const
|
||||||
|
{
|
||||||
|
auto replicated = nested->replicate(offsets);
|
||||||
|
return ColumnMap::create(std::move(replicated));
|
||||||
|
}
|
||||||
|
|
||||||
|
MutableColumns ColumnMap::scatter(ColumnIndex num_columns, const Selector & selector) const
|
||||||
|
{
|
||||||
|
auto scattered_columns = nested->scatter(num_columns, selector);
|
||||||
|
MutableColumns res;
|
||||||
|
res.reserve(num_columns);
|
||||||
|
for (auto && scattered : scattered_columns)
|
||||||
|
res.push_back(ColumnMap::create(std::move(scattered)));
|
||||||
|
|
||||||
|
return res;
|
||||||
|
}
|
||||||
|
|
||||||
|
int ColumnMap::compareAt(size_t n, size_t m, const IColumn & rhs, int nan_direction_hint) const
|
||||||
|
{
|
||||||
|
const auto & rhs_map = assert_cast<const ColumnMap &>(rhs);
|
||||||
|
return nested->compareAt(n, m, rhs_map.getNestedColumn(), nan_direction_hint);
|
||||||
|
}
|
||||||
|
|
||||||
|
void ColumnMap::compareColumn(const IColumn & rhs, size_t rhs_row_num,
|
||||||
|
PaddedPODArray<UInt64> * row_indexes, PaddedPODArray<Int8> & compare_results,
|
||||||
|
int direction, int nan_direction_hint) const
|
||||||
|
{
|
||||||
|
return doCompareColumn<ColumnMap>(assert_cast<const ColumnMap &>(rhs), rhs_row_num, row_indexes,
|
||||||
|
compare_results, direction, nan_direction_hint);
|
||||||
|
}
|
||||||
|
|
||||||
|
void ColumnMap::getPermutation(bool reverse, size_t limit, int nan_direction_hint, Permutation & res) const
|
||||||
|
{
|
||||||
|
nested->getPermutation(reverse, limit, nan_direction_hint, res);
|
||||||
|
}
|
||||||
|
|
||||||
|
void ColumnMap::updatePermutation(bool reverse, size_t limit, int nan_direction_hint, IColumn::Permutation & res, EqualRanges & equal_range) const
|
||||||
|
{
|
||||||
|
nested->updatePermutation(reverse, limit, nan_direction_hint, res, equal_range);
|
||||||
|
}
|
||||||
|
|
||||||
|
void ColumnMap::gather(ColumnGathererStream & gatherer)
|
||||||
|
{
|
||||||
|
gatherer.gather(*this);
|
||||||
|
}
|
||||||
|
|
||||||
|
void ColumnMap::reserve(size_t n)
|
||||||
|
{
|
||||||
|
nested->reserve(n);
|
||||||
|
}
|
||||||
|
|
||||||
|
size_t ColumnMap::byteSize() const
|
||||||
|
{
|
||||||
|
return nested->byteSize();
|
||||||
|
}
|
||||||
|
|
||||||
|
size_t ColumnMap::allocatedBytes() const
|
||||||
|
{
|
||||||
|
return nested->allocatedBytes();
|
||||||
|
}
|
||||||
|
|
||||||
|
void ColumnMap::protect()
|
||||||
|
{
|
||||||
|
nested->protect();
|
||||||
|
}
|
||||||
|
|
||||||
|
void ColumnMap::getExtremes(Field & min, Field & max) const
|
||||||
|
{
|
||||||
|
nested->getExtremes(min, max);
|
||||||
|
}
|
||||||
|
|
||||||
|
void ColumnMap::forEachSubcolumn(ColumnCallback callback)
|
||||||
|
{
|
||||||
|
nested->forEachSubcolumn(callback);
|
||||||
|
}
|
||||||
|
|
||||||
|
bool ColumnMap::structureEquals(const IColumn & rhs) const
|
||||||
|
{
|
||||||
|
if (const auto * rhs_map = typeid_cast<const ColumnMap *>(&rhs))
|
||||||
|
return nested->structureEquals(*rhs_map->nested);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
92
src/Columns/ColumnMap.h
Normal file
92
src/Columns/ColumnMap.h
Normal file
@ -0,0 +1,92 @@
|
|||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include <Core/Block.h>
|
||||||
|
#include <Columns/ColumnArray.h>
|
||||||
|
#include <Columns/ColumnVector.h>
|
||||||
|
#include <Columns/ColumnTuple.h>
|
||||||
|
|
||||||
|
namespace DB
|
||||||
|
{
|
||||||
|
|
||||||
|
/** Column, that stores a nested Array(Tuple(key, value)) column.
|
||||||
|
*/
|
||||||
|
class ColumnMap final : public COWHelper<IColumn, ColumnMap>
|
||||||
|
{
|
||||||
|
private:
|
||||||
|
friend class COWHelper<IColumn, ColumnMap>;
|
||||||
|
|
||||||
|
WrappedPtr nested;
|
||||||
|
|
||||||
|
explicit ColumnMap(MutableColumnPtr && nested_);
|
||||||
|
|
||||||
|
ColumnMap(const ColumnMap &) = default;
|
||||||
|
|
||||||
|
public:
|
||||||
|
/** Create immutable column using immutable arguments. This arguments may be shared with other columns.
|
||||||
|
* Use IColumn::mutate in order to make mutable column and mutate shared nested columns.
|
||||||
|
*/
|
||||||
|
using Base = COWHelper<IColumn, ColumnMap>;
|
||||||
|
|
||||||
|
static Ptr create(const ColumnPtr & keys, const ColumnPtr & values, const ColumnPtr & offsets)
|
||||||
|
{
|
||||||
|
auto nested_column = ColumnArray::create(ColumnTuple::create(Columns{keys, values}), offsets);
|
||||||
|
return ColumnMap::create(nested_column);
|
||||||
|
}
|
||||||
|
|
||||||
|
static Ptr create(const ColumnPtr & column) { return ColumnMap::create(column->assumeMutable()); }
|
||||||
|
static Ptr create(ColumnPtr && arg) { return create(arg); }
|
||||||
|
|
||||||
|
template <typename Arg, typename = typename std::enable_if<std::is_rvalue_reference<Arg &&>::value>::type>
|
||||||
|
static MutablePtr create(Arg && arg) { return Base::create(std::forward<Arg>(arg)); }
|
||||||
|
|
||||||
|
std::string getName() const override;
|
||||||
|
const char * getFamilyName() const override { return "Map"; }
|
||||||
|
TypeIndex getDataType() const override { return TypeIndex::Map; }
|
||||||
|
|
||||||
|
MutableColumnPtr cloneEmpty() const override;
|
||||||
|
MutableColumnPtr cloneResized(size_t size) const override;
|
||||||
|
|
||||||
|
size_t size() const override { return nested->size(); }
|
||||||
|
|
||||||
|
Field operator[](size_t n) const override;
|
||||||
|
void get(size_t n, Field & res) const override;
|
||||||
|
|
||||||
|
StringRef getDataAt(size_t n) const override;
|
||||||
|
void insertData(const char * pos, size_t length) override;
|
||||||
|
void insert(const Field & x) override;
|
||||||
|
void insertDefault() override;
|
||||||
|
void popBack(size_t n) override;
|
||||||
|
StringRef serializeValueIntoArena(size_t n, Arena & arena, char const *& begin) const override;
|
||||||
|
const char * deserializeAndInsertFromArena(const char * pos) override;
|
||||||
|
void updateHashWithValue(size_t n, SipHash & hash) const override;
|
||||||
|
void updateWeakHash32(WeakHash32 & hash) const override;
|
||||||
|
void updateHashFast(SipHash & hash) const override;
|
||||||
|
void insertRangeFrom(const IColumn & src, size_t start, size_t length) override;
|
||||||
|
ColumnPtr filter(const Filter & filt, ssize_t result_size_hint) const override;
|
||||||
|
ColumnPtr permute(const Permutation & perm, size_t limit) const override;
|
||||||
|
ColumnPtr index(const IColumn & indexes, size_t limit) const override;
|
||||||
|
ColumnPtr replicate(const Offsets & offsets) const override;
|
||||||
|
MutableColumns scatter(ColumnIndex num_columns, const Selector & selector) const override;
|
||||||
|
void gather(ColumnGathererStream & gatherer_stream) override;
|
||||||
|
int compareAt(size_t n, size_t m, const IColumn & rhs, int nan_direction_hint) const override;
|
||||||
|
void compareColumn(const IColumn & rhs, size_t rhs_row_num,
|
||||||
|
PaddedPODArray<UInt64> * row_indexes, PaddedPODArray<Int8> & compare_results,
|
||||||
|
int direction, int nan_direction_hint) const override;
|
||||||
|
void getExtremes(Field & min, Field & max) const override;
|
||||||
|
void getPermutation(bool reverse, size_t limit, int nan_direction_hint, Permutation & res) const override;
|
||||||
|
void updatePermutation(bool reverse, size_t limit, int nan_direction_hint, IColumn::Permutation & res, EqualRanges & equal_range) const override;
|
||||||
|
void reserve(size_t n) override;
|
||||||
|
size_t byteSize() const override;
|
||||||
|
size_t allocatedBytes() const override;
|
||||||
|
void protect() override;
|
||||||
|
void forEachSubcolumn(ColumnCallback callback) override;
|
||||||
|
bool structureEquals(const IColumn & rhs) const override;
|
||||||
|
|
||||||
|
const ColumnArray & getNestedColumn() const { return assert_cast<const ColumnArray &>(*nested); }
|
||||||
|
ColumnArray & getNestedColumn() { return assert_cast<ColumnArray &>(*nested); }
|
||||||
|
|
||||||
|
const ColumnTuple & getNestedData() const { return assert_cast<const ColumnTuple &>(getNestedColumn().getData()); }
|
||||||
|
ColumnTuple & getNestedData() { return assert_cast<ColumnTuple &>(getNestedColumn().getData()); }
|
||||||
|
};
|
||||||
|
|
||||||
|
}
|
@ -24,6 +24,7 @@ SRCS(
|
|||||||
ColumnFixedString.cpp
|
ColumnFixedString.cpp
|
||||||
ColumnFunction.cpp
|
ColumnFunction.cpp
|
||||||
ColumnLowCardinality.cpp
|
ColumnLowCardinality.cpp
|
||||||
|
ColumnMap.cpp
|
||||||
ColumnNullable.cpp
|
ColumnNullable.cpp
|
||||||
ColumnString.cpp
|
ColumnString.cpp
|
||||||
ColumnTuple.cpp
|
ColumnTuple.cpp
|
||||||
|
@ -26,6 +26,7 @@
|
|||||||
#define DISABLE_MREMAP 1
|
#define DISABLE_MREMAP 1
|
||||||
#endif
|
#endif
|
||||||
#include <common/mremap.h>
|
#include <common/mremap.h>
|
||||||
|
#include <common/getPageSize.h>
|
||||||
|
|
||||||
#include <Common/MemoryTracker.h>
|
#include <Common/MemoryTracker.h>
|
||||||
#include <Common/Exception.h>
|
#include <Common/Exception.h>
|
||||||
@ -59,7 +60,6 @@
|
|||||||
*/
|
*/
|
||||||
extern const size_t MMAP_THRESHOLD;
|
extern const size_t MMAP_THRESHOLD;
|
||||||
|
|
||||||
static constexpr size_t MMAP_MIN_ALIGNMENT = 4096;
|
|
||||||
static constexpr size_t MALLOC_MIN_ALIGNMENT = 8;
|
static constexpr size_t MALLOC_MIN_ALIGNMENT = 8;
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
@ -194,10 +194,11 @@ private:
|
|||||||
void * allocNoTrack(size_t size, size_t alignment)
|
void * allocNoTrack(size_t size, size_t alignment)
|
||||||
{
|
{
|
||||||
void * buf;
|
void * buf;
|
||||||
|
size_t mmap_min_alignment = ::getPageSize();
|
||||||
|
|
||||||
if (size >= MMAP_THRESHOLD)
|
if (size >= MMAP_THRESHOLD)
|
||||||
{
|
{
|
||||||
if (alignment > MMAP_MIN_ALIGNMENT)
|
if (alignment > mmap_min_alignment)
|
||||||
throw DB::Exception(fmt::format("Too large alignment {}: more than page size when allocating {}.",
|
throw DB::Exception(fmt::format("Too large alignment {}: more than page size when allocating {}.",
|
||||||
ReadableSize(alignment), ReadableSize(size)), DB::ErrorCodes::BAD_ARGUMENTS);
|
ReadableSize(alignment), ReadableSize(size)), DB::ErrorCodes::BAD_ARGUMENTS);
|
||||||
|
|
||||||
|
@ -83,10 +83,11 @@ private:
|
|||||||
/// Last contiguous MemoryChunk of memory.
|
/// Last contiguous MemoryChunk of memory.
|
||||||
MemoryChunk * head;
|
MemoryChunk * head;
|
||||||
size_t size_in_bytes;
|
size_t size_in_bytes;
|
||||||
|
size_t page_size;
|
||||||
|
|
||||||
static size_t roundUpToPageSize(size_t s)
|
static size_t roundUpToPageSize(size_t s, size_t page_size)
|
||||||
{
|
{
|
||||||
return (s + 4096 - 1) / 4096 * 4096;
|
return (s + page_size - 1) / page_size * page_size;
|
||||||
}
|
}
|
||||||
|
|
||||||
/// If MemoryChunks size is less than 'linear_growth_threshold', then use exponential growth, otherwise - linear growth
|
/// If MemoryChunks size is less than 'linear_growth_threshold', then use exponential growth, otherwise - linear growth
|
||||||
@ -113,7 +114,7 @@ private:
|
|||||||
}
|
}
|
||||||
|
|
||||||
assert(size_after_grow >= min_next_size);
|
assert(size_after_grow >= min_next_size);
|
||||||
return roundUpToPageSize(size_after_grow);
|
return roundUpToPageSize(size_after_grow, page_size);
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Add next contiguous MemoryChunk of memory with size not less than specified.
|
/// Add next contiguous MemoryChunk of memory with size not less than specified.
|
||||||
@ -130,6 +131,7 @@ public:
|
|||||||
Arena(size_t initial_size_ = 4096, size_t growth_factor_ = 2, size_t linear_growth_threshold_ = 128 * 1024 * 1024)
|
Arena(size_t initial_size_ = 4096, size_t growth_factor_ = 2, size_t linear_growth_threshold_ = 128 * 1024 * 1024)
|
||||||
: growth_factor(growth_factor_), linear_growth_threshold(linear_growth_threshold_),
|
: growth_factor(growth_factor_), linear_growth_threshold(linear_growth_threshold_),
|
||||||
head(new MemoryChunk(initial_size_, nullptr)), size_in_bytes(head->size())
|
head(new MemoryChunk(initial_size_, nullptr)), size_in_bytes(head->size())
|
||||||
|
page_size(static_cast<size_t>(::getPageSize()))
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -13,6 +13,8 @@
|
|||||||
#include <boost/noncopyable.hpp>
|
#include <boost/noncopyable.hpp>
|
||||||
#include <ext/scope_guard.h>
|
#include <ext/scope_guard.h>
|
||||||
|
|
||||||
|
#include <common/getPageSize.h>
|
||||||
|
|
||||||
#include <Common/Exception.h>
|
#include <Common/Exception.h>
|
||||||
#include <Common/randomSeed.h>
|
#include <Common/randomSeed.h>
|
||||||
#include <Common/formatReadable.h>
|
#include <Common/formatReadable.h>
|
||||||
@ -326,8 +328,6 @@ private:
|
|||||||
return (x + (rounding - 1)) / rounding * rounding;
|
return (x + (rounding - 1)) / rounding * rounding;
|
||||||
}
|
}
|
||||||
|
|
||||||
static constexpr size_t page_size = 4096;
|
|
||||||
|
|
||||||
/// Sizes and addresses of allocated memory will be aligned to specified boundary.
|
/// Sizes and addresses of allocated memory will be aligned to specified boundary.
|
||||||
static constexpr size_t alignment = 16;
|
static constexpr size_t alignment = 16;
|
||||||
|
|
||||||
@ -505,6 +505,7 @@ private:
|
|||||||
|
|
||||||
/// If nothing was found and total size of allocated chunks plus required size is lower than maximum,
|
/// If nothing was found and total size of allocated chunks plus required size is lower than maximum,
|
||||||
/// allocate a new chunk.
|
/// allocate a new chunk.
|
||||||
|
size_t page_size = static_cast<size_t>(::getPageSize());
|
||||||
size_t required_chunk_size = std::max(min_chunk_size, roundUp(size, page_size));
|
size_t required_chunk_size = std::max(min_chunk_size, roundUp(size, page_size));
|
||||||
if (total_chunks_size + required_chunk_size <= max_total_size)
|
if (total_chunks_size + required_chunk_size <= max_total_size)
|
||||||
{
|
{
|
||||||
|
@ -529,6 +529,7 @@
|
|||||||
M(560, ZSTD_ENCODER_FAILED) \
|
M(560, ZSTD_ENCODER_FAILED) \
|
||||||
M(561, ZSTD_DECODER_FAILED) \
|
M(561, ZSTD_DECODER_FAILED) \
|
||||||
M(562, TLD_LIST_NOT_FOUND) \
|
M(562, TLD_LIST_NOT_FOUND) \
|
||||||
|
M(563, CANNOT_READ_MAP_FROM_TEXT) \
|
||||||
\
|
\
|
||||||
M(999, KEEPER_EXCEPTION) \
|
M(999, KEEPER_EXCEPTION) \
|
||||||
M(1000, POCO_EXCEPTION) \
|
M(1000, POCO_EXCEPTION) \
|
||||||
|
@ -93,6 +93,22 @@ String FieldVisitorDump::operator() (const Tuple & x) const
|
|||||||
return wb.str();
|
return wb.str();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
String FieldVisitorDump::operator() (const Map & x) const
|
||||||
|
{
|
||||||
|
WriteBufferFromOwnString wb;
|
||||||
|
|
||||||
|
wb << "Map_(";
|
||||||
|
for (auto it = x.begin(); it != x.end(); ++it)
|
||||||
|
{
|
||||||
|
if (it != x.begin())
|
||||||
|
wb << ", ";
|
||||||
|
wb << applyVisitor(*this, *it);
|
||||||
|
}
|
||||||
|
wb << ')';
|
||||||
|
|
||||||
|
return wb.str();
|
||||||
|
}
|
||||||
|
|
||||||
String FieldVisitorDump::operator() (const AggregateFunctionStateData & x) const
|
String FieldVisitorDump::operator() (const AggregateFunctionStateData & x) const
|
||||||
{
|
{
|
||||||
WriteBufferFromOwnString wb;
|
WriteBufferFromOwnString wb;
|
||||||
@ -176,6 +192,82 @@ String FieldVisitorToString::operator() (const Tuple & x) const
|
|||||||
return wb.str();
|
return wb.str();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
String FieldVisitorToString::operator() (const Map & x) const
|
||||||
|
{
|
||||||
|
WriteBufferFromOwnString wb;
|
||||||
|
|
||||||
|
wb << '(';
|
||||||
|
for (auto it = x.begin(); it != x.end(); ++it)
|
||||||
|
{
|
||||||
|
if (it != x.begin())
|
||||||
|
wb << ", ";
|
||||||
|
wb << applyVisitor(*this, *it);
|
||||||
|
}
|
||||||
|
wb << ')';
|
||||||
|
|
||||||
|
return wb.str();
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void FieldVisitorWriteBinary::operator() (const Null &, WriteBuffer &) const { }
|
||||||
|
void FieldVisitorWriteBinary::operator() (const UInt64 & x, WriteBuffer & buf) const { DB::writeVarUInt(x, buf); }
|
||||||
|
void FieldVisitorWriteBinary::operator() (const Int64 & x, WriteBuffer & buf) const { DB::writeVarInt(x, buf); }
|
||||||
|
void FieldVisitorWriteBinary::operator() (const Float64 & x, WriteBuffer & buf) const { DB::writeFloatBinary(x, buf); }
|
||||||
|
void FieldVisitorWriteBinary::operator() (const String & x, WriteBuffer & buf) const { DB::writeStringBinary(x, buf); }
|
||||||
|
void FieldVisitorWriteBinary::operator() (const UInt128 & x, WriteBuffer & buf) const { DB::writeBinary(x, buf); }
|
||||||
|
void FieldVisitorWriteBinary::operator() (const Int128 & x, WriteBuffer & buf) const { DB::writeVarInt(x, buf); }
|
||||||
|
void FieldVisitorWriteBinary::operator() (const UInt256 & x, WriteBuffer & buf) const { DB::writeBinary(x, buf); }
|
||||||
|
void FieldVisitorWriteBinary::operator() (const Int256 & x, WriteBuffer & buf) const { DB::writeBinary(x, buf); }
|
||||||
|
void FieldVisitorWriteBinary::operator() (const DecimalField<Decimal32> & x, WriteBuffer & buf) const { DB::writeBinary(x.getValue(), buf); }
|
||||||
|
void FieldVisitorWriteBinary::operator() (const DecimalField<Decimal64> & x, WriteBuffer & buf) const { DB::writeBinary(x.getValue(), buf); }
|
||||||
|
void FieldVisitorWriteBinary::operator() (const DecimalField<Decimal128> & x, WriteBuffer & buf) const { DB::writeBinary(x.getValue(), buf); }
|
||||||
|
void FieldVisitorWriteBinary::operator() (const DecimalField<Decimal256> & x, WriteBuffer & buf) const { DB::writeBinary(x.getValue(), buf); }
|
||||||
|
void FieldVisitorWriteBinary::operator() (const AggregateFunctionStateData & x, WriteBuffer & buf) const
|
||||||
|
{
|
||||||
|
DB::writeStringBinary(x.name, buf);
|
||||||
|
DB::writeStringBinary(x.data, buf);
|
||||||
|
}
|
||||||
|
|
||||||
|
void FieldVisitorWriteBinary::operator() (const Array & x, WriteBuffer & buf) const
|
||||||
|
{
|
||||||
|
const size_t size = x.size();
|
||||||
|
DB::writeBinary(size, buf);
|
||||||
|
|
||||||
|
for (size_t i = 0; i < size; ++i)
|
||||||
|
{
|
||||||
|
const UInt8 type = x[i].getType();
|
||||||
|
DB::writeBinary(type, buf);
|
||||||
|
Field::dispatch([&buf] (const auto & value) { DB::FieldVisitorWriteBinary()(value, buf); }, x[i]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void FieldVisitorWriteBinary::operator() (const Tuple & x, WriteBuffer & buf) const
|
||||||
|
{
|
||||||
|
const size_t size = x.size();
|
||||||
|
DB::writeBinary(size, buf);
|
||||||
|
|
||||||
|
for (size_t i = 0; i < size; ++i)
|
||||||
|
{
|
||||||
|
const UInt8 type = x[i].getType();
|
||||||
|
DB::writeBinary(type, buf);
|
||||||
|
Field::dispatch([&buf] (const auto & value) { DB::FieldVisitorWriteBinary()(value, buf); }, x[i]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void FieldVisitorWriteBinary::operator() (const Map & x, WriteBuffer & buf) const
|
||||||
|
{
|
||||||
|
const size_t size = x.size();
|
||||||
|
DB::writeBinary(size, buf);
|
||||||
|
|
||||||
|
for (size_t i = 0; i < size; ++i)
|
||||||
|
{
|
||||||
|
const UInt8 type = x[i].getType();
|
||||||
|
writeBinary(type, buf);
|
||||||
|
Field::dispatch([&buf] (const auto & value) { DB::FieldVisitorWriteBinary()(value, buf); }, x[i]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
FieldVisitorHash::FieldVisitorHash(SipHash & hash_) : hash(hash_) {}
|
FieldVisitorHash::FieldVisitorHash(SipHash & hash_) : hash(hash_) {}
|
||||||
|
|
||||||
@ -238,6 +330,16 @@ void FieldVisitorHash::operator() (const Tuple & x) const
|
|||||||
applyVisitor(*this, elem);
|
applyVisitor(*this, elem);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void FieldVisitorHash::operator() (const Map & x) const
|
||||||
|
{
|
||||||
|
UInt8 type = Field::Types::Map;
|
||||||
|
hash.update(type);
|
||||||
|
hash.update(x.size());
|
||||||
|
|
||||||
|
for (const auto & elem : x)
|
||||||
|
applyVisitor(*this, elem);
|
||||||
|
}
|
||||||
|
|
||||||
void FieldVisitorHash::operator() (const Array & x) const
|
void FieldVisitorHash::operator() (const Array & x) const
|
||||||
{
|
{
|
||||||
UInt8 type = Field::Types::Array;
|
UInt8 type = Field::Types::Array;
|
||||||
|
@ -77,6 +77,7 @@ public:
|
|||||||
String operator() (const String & x) const;
|
String operator() (const String & x) const;
|
||||||
String operator() (const Array & x) const;
|
String operator() (const Array & x) const;
|
||||||
String operator() (const Tuple & x) const;
|
String operator() (const Tuple & x) const;
|
||||||
|
String operator() (const Map & x) const;
|
||||||
String operator() (const DecimalField<Decimal32> & x) const;
|
String operator() (const DecimalField<Decimal32> & x) const;
|
||||||
String operator() (const DecimalField<Decimal64> & x) const;
|
String operator() (const DecimalField<Decimal64> & x) const;
|
||||||
String operator() (const DecimalField<Decimal128> & x) const;
|
String operator() (const DecimalField<Decimal128> & x) const;
|
||||||
@ -88,6 +89,30 @@ public:
|
|||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
|
class FieldVisitorWriteBinary
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
void operator() (const Null & x, WriteBuffer & buf) const;
|
||||||
|
void operator() (const UInt64 & x, WriteBuffer & buf) const;
|
||||||
|
void operator() (const UInt128 & x, WriteBuffer & buf) const;
|
||||||
|
void operator() (const Int64 & x, WriteBuffer & buf) const;
|
||||||
|
void operator() (const Int128 & x, WriteBuffer & buf) const;
|
||||||
|
void operator() (const Float64 & x, WriteBuffer & buf) const;
|
||||||
|
void operator() (const String & x, WriteBuffer & buf) const;
|
||||||
|
void operator() (const Array & x, WriteBuffer & buf) const;
|
||||||
|
void operator() (const Tuple & x, WriteBuffer & buf) const;
|
||||||
|
void operator() (const Map & x, WriteBuffer & buf) const;
|
||||||
|
void operator() (const DecimalField<Decimal32> & x, WriteBuffer & buf) const;
|
||||||
|
void operator() (const DecimalField<Decimal64> & x, WriteBuffer & buf) const;
|
||||||
|
void operator() (const DecimalField<Decimal128> & x, WriteBuffer & buf) const;
|
||||||
|
void operator() (const DecimalField<Decimal256> & x, WriteBuffer & buf) const;
|
||||||
|
void operator() (const AggregateFunctionStateData & x, WriteBuffer & buf) const;
|
||||||
|
|
||||||
|
void operator() (const UInt256 & x, WriteBuffer & buf) const;
|
||||||
|
void operator() (const Int256 & x, WriteBuffer & buf) const;
|
||||||
|
};
|
||||||
|
|
||||||
|
|
||||||
/** Print readable and unique text dump of field type and value. */
|
/** Print readable and unique text dump of field type and value. */
|
||||||
class FieldVisitorDump : public StaticVisitor<String>
|
class FieldVisitorDump : public StaticVisitor<String>
|
||||||
{
|
{
|
||||||
@ -101,6 +126,7 @@ public:
|
|||||||
String operator() (const String & x) const;
|
String operator() (const String & x) const;
|
||||||
String operator() (const Array & x) const;
|
String operator() (const Array & x) const;
|
||||||
String operator() (const Tuple & x) const;
|
String operator() (const Tuple & x) const;
|
||||||
|
String operator() (const Map & x) const;
|
||||||
String operator() (const DecimalField<Decimal32> & x) const;
|
String operator() (const DecimalField<Decimal32> & x) const;
|
||||||
String operator() (const DecimalField<Decimal64> & x) const;
|
String operator() (const DecimalField<Decimal64> & x) const;
|
||||||
String operator() (const DecimalField<Decimal128> & x) const;
|
String operator() (const DecimalField<Decimal128> & x) const;
|
||||||
@ -137,6 +163,11 @@ public:
|
|||||||
throw Exception("Cannot convert Tuple to " + demangle(typeid(T).name()), ErrorCodes::CANNOT_CONVERT_TYPE);
|
throw Exception("Cannot convert Tuple to " + demangle(typeid(T).name()), ErrorCodes::CANNOT_CONVERT_TYPE);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
T operator() (const Map &) const
|
||||||
|
{
|
||||||
|
throw Exception("Cannot convert Map to " + demangle(typeid(T).name()), ErrorCodes::CANNOT_CONVERT_TYPE);
|
||||||
|
}
|
||||||
|
|
||||||
T operator() (const UInt64 & x) const { return T(x); }
|
T operator() (const UInt64 & x) const { return T(x); }
|
||||||
T operator() (const Int64 & x) const { return T(x); }
|
T operator() (const Int64 & x) const { return T(x); }
|
||||||
T operator() (const Int128 & x) const { return T(x); }
|
T operator() (const Int128 & x) const { return T(x); }
|
||||||
@ -226,6 +257,7 @@ public:
|
|||||||
void operator() (const String & x) const;
|
void operator() (const String & x) const;
|
||||||
void operator() (const Array & x) const;
|
void operator() (const Array & x) const;
|
||||||
void operator() (const Tuple & x) const;
|
void operator() (const Tuple & x) const;
|
||||||
|
void operator() (const Map & x) const;
|
||||||
void operator() (const DecimalField<Decimal32> & x) const;
|
void operator() (const DecimalField<Decimal32> & x) const;
|
||||||
void operator() (const DecimalField<Decimal64> & x) const;
|
void operator() (const DecimalField<Decimal64> & x) const;
|
||||||
void operator() (const DecimalField<Decimal128> & x) const;
|
void operator() (const DecimalField<Decimal128> & x) const;
|
||||||
@ -268,6 +300,7 @@ public:
|
|||||||
bool operator() (String &) const { throw Exception("Cannot sum Strings", ErrorCodes::LOGICAL_ERROR); }
|
bool operator() (String &) const { throw Exception("Cannot sum Strings", ErrorCodes::LOGICAL_ERROR); }
|
||||||
bool operator() (Array &) const { throw Exception("Cannot sum Arrays", ErrorCodes::LOGICAL_ERROR); }
|
bool operator() (Array &) const { throw Exception("Cannot sum Arrays", ErrorCodes::LOGICAL_ERROR); }
|
||||||
bool operator() (Tuple &) const { throw Exception("Cannot sum Tuples", ErrorCodes::LOGICAL_ERROR); }
|
bool operator() (Tuple &) const { throw Exception("Cannot sum Tuples", ErrorCodes::LOGICAL_ERROR); }
|
||||||
|
bool operator() (Map &) const { throw Exception("Cannot sum Maps", ErrorCodes::LOGICAL_ERROR); }
|
||||||
bool operator() (UInt128 &) const { throw Exception("Cannot sum UUIDs", ErrorCodes::LOGICAL_ERROR); }
|
bool operator() (UInt128 &) const { throw Exception("Cannot sum UUIDs", ErrorCodes::LOGICAL_ERROR); }
|
||||||
bool operator() (AggregateFunctionStateData &) const { throw Exception("Cannot sum AggregateFunctionStates", ErrorCodes::LOGICAL_ERROR); }
|
bool operator() (AggregateFunctionStateData &) const { throw Exception("Cannot sum AggregateFunctionStates", ErrorCodes::LOGICAL_ERROR); }
|
||||||
|
|
||||||
|
@ -106,6 +106,11 @@ public:
|
|||||||
return aliases.count(name) || case_insensitive_aliases.count(name);
|
return aliases.count(name) || case_insensitive_aliases.count(name);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool hasNameOrAlias(const String & name) const
|
||||||
|
{
|
||||||
|
return getMap().count(name) || getCaseInsensitiveMap().count(name) || isAlias(name);
|
||||||
|
}
|
||||||
|
|
||||||
virtual ~IFactoryWithAliases() override {}
|
virtual ~IFactoryWithAliases() override {}
|
||||||
|
|
||||||
private:
|
private:
|
||||||
|
@ -8,10 +8,11 @@
|
|||||||
|
|
||||||
#include "MemoryStatisticsOS.h"
|
#include "MemoryStatisticsOS.h"
|
||||||
|
|
||||||
|
#include <common/logger_useful.h>
|
||||||
|
#include <common/getPageSize.h>
|
||||||
#include <Common/Exception.h>
|
#include <Common/Exception.h>
|
||||||
#include <IO/ReadBufferFromMemory.h>
|
#include <IO/ReadBufferFromMemory.h>
|
||||||
#include <IO/ReadHelpers.h>
|
#include <IO/ReadHelpers.h>
|
||||||
#include <common/logger_useful.h>
|
|
||||||
|
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
@ -26,7 +27,6 @@ namespace ErrorCodes
|
|||||||
}
|
}
|
||||||
|
|
||||||
static constexpr auto filename = "/proc/self/statm";
|
static constexpr auto filename = "/proc/self/statm";
|
||||||
static constexpr size_t PAGE_SIZE = 4096;
|
|
||||||
|
|
||||||
MemoryStatisticsOS::MemoryStatisticsOS()
|
MemoryStatisticsOS::MemoryStatisticsOS()
|
||||||
{
|
{
|
||||||
@ -93,11 +93,12 @@ MemoryStatisticsOS::Data MemoryStatisticsOS::get() const
|
|||||||
skipWhitespaceIfAny(in);
|
skipWhitespaceIfAny(in);
|
||||||
readIntText(data.data_and_stack, in);
|
readIntText(data.data_and_stack, in);
|
||||||
|
|
||||||
data.virt *= PAGE_SIZE;
|
size_t page_size = static_cast<size_t>(::getPageSize());
|
||||||
data.resident *= PAGE_SIZE;
|
data.virt *= page_size;
|
||||||
data.shared *= PAGE_SIZE;
|
data.resident *= page_size;
|
||||||
data.code *= PAGE_SIZE;
|
data.shared *= page_size;
|
||||||
data.data_and_stack *= PAGE_SIZE;
|
data.code *= page_size;
|
||||||
|
data.data_and_stack *= page_size;
|
||||||
|
|
||||||
return data;
|
return data;
|
||||||
}
|
}
|
||||||
|
@ -1,5 +1,6 @@
|
|||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
|
#include <common/getPageSize.h>
|
||||||
#include <Common/Exception.h>
|
#include <Common/Exception.h>
|
||||||
#include <Common/StringUtils/StringUtils.h>
|
#include <Common/StringUtils/StringUtils.h>
|
||||||
#include <Common/UTF8Helpers.h>
|
#include <Common/UTF8Helpers.h>
|
||||||
@ -37,7 +38,7 @@ struct StringSearcherBase
|
|||||||
{
|
{
|
||||||
#ifdef __SSE2__
|
#ifdef __SSE2__
|
||||||
static constexpr auto n = sizeof(__m128i);
|
static constexpr auto n = sizeof(__m128i);
|
||||||
const int page_size = getpagesize();
|
const int page_size = ::getPageSize();
|
||||||
|
|
||||||
bool pageSafe(const void * const ptr) const
|
bool pageSafe(const void * const ptr) const
|
||||||
{
|
{
|
||||||
|
@ -2,11 +2,14 @@
|
|||||||
#include <Common/ThreadProfileEvents.h>
|
#include <Common/ThreadProfileEvents.h>
|
||||||
#include <Common/QueryProfiler.h>
|
#include <Common/QueryProfiler.h>
|
||||||
#include <Common/ThreadStatus.h>
|
#include <Common/ThreadStatus.h>
|
||||||
|
#include <common/errnoToString.h>
|
||||||
#include <Interpreters/OpenTelemetrySpanLog.h>
|
#include <Interpreters/OpenTelemetrySpanLog.h>
|
||||||
|
|
||||||
#include <Poco/Logger.h>
|
#include <Poco/Logger.h>
|
||||||
#include <common/getThreadId.h>
|
#include <common/getThreadId.h>
|
||||||
|
|
||||||
|
#include <signal.h>
|
||||||
|
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
{
|
{
|
||||||
@ -21,6 +24,11 @@ namespace ErrorCodes
|
|||||||
thread_local ThreadStatus * current_thread = nullptr;
|
thread_local ThreadStatus * current_thread = nullptr;
|
||||||
thread_local ThreadStatus * main_thread = nullptr;
|
thread_local ThreadStatus * main_thread = nullptr;
|
||||||
|
|
||||||
|
#if !defined(SANITIZER) && !defined(ARCADIA_BUILD)
|
||||||
|
alignas(4096) static thread_local char alt_stack[4096];
|
||||||
|
static thread_local bool has_alt_stack = false;
|
||||||
|
#endif
|
||||||
|
|
||||||
|
|
||||||
ThreadStatus::ThreadStatus()
|
ThreadStatus::ThreadStatus()
|
||||||
: thread_id{getThreadId()}
|
: thread_id{getThreadId()}
|
||||||
@ -34,6 +42,46 @@ ThreadStatus::ThreadStatus()
|
|||||||
|
|
||||||
/// NOTE: It is important not to do any non-trivial actions (like updating ProfileEvents or logging) before ThreadStatus is created
|
/// NOTE: It is important not to do any non-trivial actions (like updating ProfileEvents or logging) before ThreadStatus is created
|
||||||
/// Otherwise it could lead to SIGSEGV due to current_thread dereferencing
|
/// Otherwise it could lead to SIGSEGV due to current_thread dereferencing
|
||||||
|
|
||||||
|
/// Will set alternative signal stack to provide diagnostics for stack overflow errors.
|
||||||
|
/// If not already installed for current thread.
|
||||||
|
/// Sanitizer makes larger stack usage and also it's incompatible with alternative stack by default (it sets up and relies on its own).
|
||||||
|
#if !defined(SANITIZER) && !defined(ARCADIA_BUILD)
|
||||||
|
if (!has_alt_stack)
|
||||||
|
{
|
||||||
|
/// Don't repeat tries even if not installed successfully.
|
||||||
|
has_alt_stack = true;
|
||||||
|
|
||||||
|
/// We have to call 'sigaltstack' before first 'sigaction'. (It does not work other way, for unknown reason).
|
||||||
|
stack_t altstack_description{};
|
||||||
|
altstack_description.ss_sp = alt_stack;
|
||||||
|
altstack_description.ss_flags = 0;
|
||||||
|
altstack_description.ss_size = sizeof(alt_stack);
|
||||||
|
|
||||||
|
if (0 != sigaltstack(&altstack_description, nullptr))
|
||||||
|
{
|
||||||
|
LOG_WARNING(log, "Cannot set alternative signal stack for thread, {}", errnoToString(errno));
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
/// Obtain existing sigaction and modify it by adding a flag.
|
||||||
|
struct sigaction action{};
|
||||||
|
if (0 != sigaction(SIGSEGV, nullptr, &action))
|
||||||
|
{
|
||||||
|
LOG_WARNING(log, "Cannot obtain previous signal action to set alternative signal stack for thread, {}", errnoToString(errno));
|
||||||
|
}
|
||||||
|
else if (!(action.sa_flags & SA_ONSTACK))
|
||||||
|
{
|
||||||
|
action.sa_flags |= SA_ONSTACK;
|
||||||
|
|
||||||
|
if (0 != sigaction(SIGSEGV, &action, nullptr))
|
||||||
|
{
|
||||||
|
LOG_WARNING(log, "Cannot set action with alternative signal stack for thread, {}", errnoToString(errno));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
ThreadStatus::~ThreadStatus()
|
ThreadStatus::~ThreadStatus()
|
||||||
|
@ -28,23 +28,28 @@ struct UInt128
|
|||||||
UInt64 low;
|
UInt64 low;
|
||||||
UInt64 high;
|
UInt64 high;
|
||||||
|
|
||||||
|
/// TODO: Make this constexpr. Currently it is used in unions
|
||||||
|
/// and union cannot contain member with non trivial constructor
|
||||||
|
/// constructor must be non user provided but compiler cannot constexpr constructor
|
||||||
|
/// if members low and high are not initialized, if we default member initialize them
|
||||||
|
/// constructor becomes non trivial.
|
||||||
UInt128() = default;
|
UInt128() = default;
|
||||||
explicit UInt128(const UInt64 low_, const UInt64 high_) : low(low_), high(high_) {}
|
explicit constexpr UInt128(const UInt64 low_, const UInt64 high_) : low(low_), high(high_) { }
|
||||||
|
|
||||||
/// We need Int128 to UInt128 conversion or AccurateComparison will call greaterOp<Int128, UInt64> instead of greaterOp<Int128, UInt128>
|
/// We need Int128 to UInt128 conversion or AccurateComparison will call greaterOp<Int128, UInt64> instead of greaterOp<Int128, UInt128>
|
||||||
explicit UInt128(const Int128 rhs) : low(rhs), high(rhs >> 64) {}
|
explicit constexpr UInt128(const Int128 rhs) : low(rhs), high(rhs >> 64) {}
|
||||||
explicit UInt128(const Int64 rhs) : low(rhs), high() {}
|
explicit constexpr UInt128(const Int64 rhs) : low(rhs), high() {}
|
||||||
explicit UInt128(const Int32 rhs) : low(rhs), high() {}
|
explicit constexpr UInt128(const Int32 rhs) : low(rhs), high() {}
|
||||||
explicit UInt128(const Int16 rhs) : low(rhs), high() {}
|
explicit constexpr UInt128(const Int16 rhs) : low(rhs), high() {}
|
||||||
explicit UInt128(const Int8 rhs) : low(rhs), high() {}
|
explicit constexpr UInt128(const Int8 rhs) : low(rhs), high() {}
|
||||||
explicit UInt128(const UInt8 rhs) : low(rhs), high() {}
|
explicit constexpr UInt128(const UInt8 rhs) : low(rhs), high() {}
|
||||||
explicit UInt128(const UInt16 rhs) : low(rhs), high() {}
|
explicit constexpr UInt128(const UInt16 rhs) : low(rhs), high() {}
|
||||||
explicit UInt128(const UInt32 rhs) : low(rhs), high() {}
|
explicit constexpr UInt128(const UInt32 rhs) : low(rhs), high() {}
|
||||||
explicit UInt128(const UInt64 rhs) : low(rhs), high() {}
|
explicit constexpr UInt128(const UInt64 rhs) : low(rhs), high() {}
|
||||||
explicit UInt128(const Float32 rhs) : low(rhs), high() {}
|
explicit constexpr UInt128(const Float32 rhs) : low(rhs), high() {}
|
||||||
explicit UInt128(const Float64 rhs) : low(rhs), high() {}
|
explicit constexpr UInt128(const Float64 rhs) : low(rhs), high() {}
|
||||||
|
|
||||||
auto tuple() const { return std::tie(high, low); }
|
constexpr auto tuple() const { return std::tie(high, low); }
|
||||||
|
|
||||||
String toHexString() const
|
String toHexString() const
|
||||||
{
|
{
|
||||||
@ -53,31 +58,31 @@ struct UInt128
|
|||||||
return res;
|
return res;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool inline operator== (const UInt128 rhs) const { return tuple() == rhs.tuple(); }
|
constexpr bool operator== (const UInt128 rhs) const { return tuple() == rhs.tuple(); }
|
||||||
bool inline operator!= (const UInt128 rhs) const { return tuple() != rhs.tuple(); }
|
constexpr bool operator!= (const UInt128 rhs) const { return tuple() != rhs.tuple(); }
|
||||||
bool inline operator< (const UInt128 rhs) const { return tuple() < rhs.tuple(); }
|
constexpr bool operator< (const UInt128 rhs) const { return tuple() < rhs.tuple(); }
|
||||||
bool inline operator<= (const UInt128 rhs) const { return tuple() <= rhs.tuple(); }
|
constexpr bool operator<= (const UInt128 rhs) const { return tuple() <= rhs.tuple(); }
|
||||||
bool inline operator> (const UInt128 rhs) const { return tuple() > rhs.tuple(); }
|
constexpr bool operator> (const UInt128 rhs) const { return tuple() > rhs.tuple(); }
|
||||||
bool inline operator>= (const UInt128 rhs) const { return tuple() >= rhs.tuple(); }
|
constexpr bool operator>= (const UInt128 rhs) const { return tuple() >= rhs.tuple(); }
|
||||||
|
|
||||||
bool inline operator == (const Int128 rhs) const { return *this == UInt128(rhs, rhs >> 64) && rhs >= 0; }
|
constexpr bool operator == (const Int128 rhs) const { return *this == UInt128(rhs, rhs >> 64) && rhs >= 0; }
|
||||||
bool inline operator != (const Int128 rhs) const { return *this != UInt128(rhs, rhs >> 64) || rhs < 0; }
|
constexpr bool operator != (const Int128 rhs) const { return *this != UInt128(rhs, rhs >> 64) || rhs < 0; }
|
||||||
bool inline operator >= (const Int128 rhs) const { return *this >= UInt128(rhs, rhs >> 64) || rhs < 0; }
|
constexpr bool operator >= (const Int128 rhs) const { return *this >= UInt128(rhs, rhs >> 64) || rhs < 0; }
|
||||||
bool inline operator > (const Int128 rhs) const { return *this > UInt128(rhs, rhs >> 64) || rhs < 0; }
|
constexpr bool operator > (const Int128 rhs) const { return *this > UInt128(rhs, rhs >> 64) || rhs < 0; }
|
||||||
bool inline operator <= (const Int128 rhs) const { return *this <= UInt128(rhs, rhs >> 64) && rhs >= 0; }
|
constexpr bool operator <= (const Int128 rhs) const { return *this <= UInt128(rhs, rhs >> 64) && rhs >= 0; }
|
||||||
bool inline operator < (const Int128 rhs) const { return *this < UInt128(rhs, rhs >> 64) && rhs >= 0; }
|
constexpr bool operator < (const Int128 rhs) const { return *this < UInt128(rhs, rhs >> 64) && rhs >= 0; }
|
||||||
|
|
||||||
bool inline operator > (const Int256 rhs) const { return (rhs < 0) || ((Int256(high) << 64) + low) > rhs; }
|
constexpr bool operator > (const Int256 rhs) const { return (rhs < 0) || ((Int256(high) << 64) + low) > rhs; }
|
||||||
bool inline operator > (const UInt256 rhs) const { return ((UInt256(high) << 64) + low) > rhs; }
|
constexpr bool operator > (const UInt256 rhs) const { return ((UInt256(high) << 64) + low) > rhs; }
|
||||||
bool inline operator < (const Int256 rhs) const { return (rhs >= 0) && ((Int256(high) << 64) + low) < rhs; }
|
constexpr bool operator < (const Int256 rhs) const { return (rhs >= 0) && ((Int256(high) << 64) + low) < rhs; }
|
||||||
bool inline operator < (const UInt256 rhs) const { return ((UInt256(high) << 64) + low) < rhs; }
|
constexpr bool operator < (const UInt256 rhs) const { return ((UInt256(high) << 64) + low) < rhs; }
|
||||||
|
|
||||||
template <typename T> bool inline operator== (const T rhs) const { return *this == UInt128(rhs); }
|
template <typename T> constexpr bool operator== (const T rhs) const { return *this == UInt128(rhs); }
|
||||||
template <typename T> bool inline operator!= (const T rhs) const { return *this != UInt128(rhs); }
|
template <typename T> constexpr bool operator!= (const T rhs) const { return *this != UInt128(rhs); }
|
||||||
template <typename T> bool inline operator>= (const T rhs) const { return *this >= UInt128(rhs); }
|
template <typename T> constexpr bool operator>= (const T rhs) const { return *this >= UInt128(rhs); }
|
||||||
template <typename T> bool inline operator> (const T rhs) const { return *this > UInt128(rhs); }
|
template <typename T> constexpr bool operator> (const T rhs) const { return *this > UInt128(rhs); }
|
||||||
template <typename T> bool inline operator<= (const T rhs) const { return *this <= UInt128(rhs); }
|
template <typename T> constexpr bool operator<= (const T rhs) const { return *this <= UInt128(rhs); }
|
||||||
template <typename T> bool inline operator< (const T rhs) const { return *this < UInt128(rhs); }
|
template <typename T> constexpr bool operator< (const T rhs) const { return *this < UInt128(rhs); }
|
||||||
|
|
||||||
template <typename T> explicit operator T() const
|
template <typename T> explicit operator T() const
|
||||||
{
|
{
|
||||||
@ -91,15 +96,15 @@ struct UInt128
|
|||||||
#pragma GCC diagnostic pop
|
#pragma GCC diagnostic pop
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
UInt128 & operator= (const UInt64 rhs) { low = rhs; high = 0; return *this; }
|
constexpr UInt128 & operator= (const UInt64 rhs) { low = rhs; high = 0; return *this; }
|
||||||
};
|
};
|
||||||
|
|
||||||
template <typename T> bool inline operator == (T a, const UInt128 b) { return b.operator==(a); }
|
template <typename T> constexpr bool operator == (T a, const UInt128 b) { return b.operator==(a); }
|
||||||
template <typename T> bool inline operator != (T a, const UInt128 b) { return b.operator!=(a); }
|
template <typename T> constexpr bool operator != (T a, const UInt128 b) { return b.operator!=(a); }
|
||||||
template <typename T> bool inline operator >= (T a, const UInt128 b) { return b <= a; }
|
template <typename T> constexpr bool operator >= (T a, const UInt128 b) { return b <= a; }
|
||||||
template <typename T> bool inline operator > (T a, const UInt128 b) { return b < a; }
|
template <typename T> constexpr bool operator > (T a, const UInt128 b) { return b < a; }
|
||||||
template <typename T> bool inline operator <= (T a, const UInt128 b) { return b >= a; }
|
template <typename T> constexpr bool operator <= (T a, const UInt128 b) { return b >= a; }
|
||||||
template <typename T> bool inline operator < (T a, const UInt128 b) { return b > a; }
|
template <typename T> constexpr bool operator < (T a, const UInt128 b) { return b > a; }
|
||||||
|
|
||||||
template <> inline constexpr bool IsNumber<UInt128> = true;
|
template <> inline constexpr bool IsNumber<UInt128> = true;
|
||||||
template <> struct TypeName<UInt128> { static constexpr const char * get() { return "UInt128"; } };
|
template <> struct TypeName<UInt128> { static constexpr const char * get() { return "UInt128"; } };
|
||||||
@ -246,4 +251,42 @@ template <> struct hash<DB::UInt128>
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
template<>
|
||||||
|
class numeric_limits<DB::UInt128>
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
static constexpr bool is_specialized = true;
|
||||||
|
static constexpr bool is_signed = ::is_signed<DB::UInt128>::value;
|
||||||
|
static constexpr bool is_integer = ::is_integer<DB::UInt128>::value;
|
||||||
|
static constexpr bool is_exact = true;
|
||||||
|
static constexpr bool has_infinity = false;
|
||||||
|
static constexpr bool has_quiet_NaN = false;
|
||||||
|
static constexpr bool has_signaling_NaN = false;
|
||||||
|
static constexpr std::float_denorm_style has_denorm = std::denorm_absent;
|
||||||
|
static constexpr bool has_denorm_loss = false;
|
||||||
|
static constexpr std::float_round_style round_style = std::round_toward_zero;
|
||||||
|
static constexpr bool is_iec559 = false;
|
||||||
|
static constexpr bool is_bounded = true;
|
||||||
|
static constexpr bool is_modulo = true;
|
||||||
|
static constexpr int digits = std::numeric_limits<UInt64>::digits * 2;
|
||||||
|
static constexpr int digits10 = digits * 0.30103 /*std::log10(2)*/;
|
||||||
|
static constexpr int max_digits10 = 0;
|
||||||
|
static constexpr int radix = 2;
|
||||||
|
static constexpr int min_exponent = 0;
|
||||||
|
static constexpr int min_exponent10 = 0;
|
||||||
|
static constexpr int max_exponent = 0;
|
||||||
|
static constexpr int max_exponent10 = 0;
|
||||||
|
static constexpr bool traps = true;
|
||||||
|
static constexpr bool tinyness_before = false;
|
||||||
|
|
||||||
|
static constexpr DB::UInt128 min() noexcept { return DB::UInt128(0, 0); }
|
||||||
|
|
||||||
|
static constexpr DB::UInt128 max() noexcept
|
||||||
|
{
|
||||||
|
return DB::UInt128(std::numeric_limits<UInt64>::max(), std::numeric_limits<UInt64>::max());
|
||||||
|
}
|
||||||
|
|
||||||
|
static constexpr DB::UInt128 lowest() noexcept { return min(); }
|
||||||
|
};
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -798,6 +798,21 @@ void TestKeeperStorage::clearDeadWatches(int64_t session_id)
|
|||||||
if (watches_for_path.empty())
|
if (watches_for_path.empty())
|
||||||
watches.erase(watch);
|
watches.erase(watch);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
auto list_watch = list_watches.find(watch_path);
|
||||||
|
if (list_watch != list_watches.end())
|
||||||
|
{
|
||||||
|
auto & list_watches_for_path = list_watch->second;
|
||||||
|
for (auto w_it = list_watches_for_path.begin(); w_it != list_watches_for_path.end();)
|
||||||
|
{
|
||||||
|
if (w_it->session_id == session_id)
|
||||||
|
w_it = list_watches_for_path.erase(w_it);
|
||||||
|
else
|
||||||
|
++w_it;
|
||||||
|
}
|
||||||
|
if (list_watches_for_path.empty())
|
||||||
|
list_watches.erase(list_watch);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
sessions_and_watchers.erase(watches_it);
|
sessions_and_watchers.erase(watches_it);
|
||||||
}
|
}
|
||||||
|
@ -5,6 +5,7 @@
|
|||||||
#cmakedefine01 USE_RE2_ST
|
#cmakedefine01 USE_RE2_ST
|
||||||
#cmakedefine01 USE_SSL
|
#cmakedefine01 USE_SSL
|
||||||
#cmakedefine01 USE_HDFS
|
#cmakedefine01 USE_HDFS
|
||||||
|
#cmakedefine01 USE_INTERNAL_HDFS3_LIBRARY
|
||||||
#cmakedefine01 USE_AWS_S3
|
#cmakedefine01 USE_AWS_S3
|
||||||
#cmakedefine01 USE_BROTLI
|
#cmakedefine01 USE_BROTLI
|
||||||
#cmakedefine01 USE_UNWIND
|
#cmakedefine01 USE_UNWIND
|
||||||
|
@ -7,7 +7,7 @@ ADDINCL (
|
|||||||
GLOBAL clickhouse/src
|
GLOBAL clickhouse/src
|
||||||
contrib/libs/libcpuid
|
contrib/libs/libcpuid
|
||||||
contrib/libs/libunwind/include
|
contrib/libs/libunwind/include
|
||||||
GLOBAL contrib/restricted/ryu
|
GLOBAL contrib/restricted/dragonbox
|
||||||
)
|
)
|
||||||
|
|
||||||
PEERDIR(
|
PEERDIR(
|
||||||
@ -18,7 +18,7 @@ PEERDIR(
|
|||||||
contrib/libs/openssl
|
contrib/libs/openssl
|
||||||
contrib/libs/poco/NetSSL_OpenSSL
|
contrib/libs/poco/NetSSL_OpenSSL
|
||||||
contrib/libs/re2
|
contrib/libs/re2
|
||||||
contrib/restricted/ryu
|
contrib/restricted/dragonbox
|
||||||
)
|
)
|
||||||
|
|
||||||
INCLUDE(${ARCADIA_ROOT}/clickhouse/cmake/yandex/ya.make.versions.inc)
|
INCLUDE(${ARCADIA_ROOT}/clickhouse/cmake/yandex/ya.make.versions.inc)
|
||||||
|
@ -6,7 +6,7 @@ ADDINCL (
|
|||||||
GLOBAL clickhouse/src
|
GLOBAL clickhouse/src
|
||||||
contrib/libs/libcpuid
|
contrib/libs/libcpuid
|
||||||
contrib/libs/libunwind/include
|
contrib/libs/libunwind/include
|
||||||
GLOBAL contrib/restricted/ryu
|
GLOBAL contrib/restricted/dragonbox
|
||||||
)
|
)
|
||||||
|
|
||||||
PEERDIR(
|
PEERDIR(
|
||||||
@ -17,7 +17,7 @@ PEERDIR(
|
|||||||
contrib/libs/openssl
|
contrib/libs/openssl
|
||||||
contrib/libs/poco/NetSSL_OpenSSL
|
contrib/libs/poco/NetSSL_OpenSSL
|
||||||
contrib/libs/re2
|
contrib/libs/re2
|
||||||
contrib/restricted/ryu
|
contrib/restricted/dragonbox
|
||||||
)
|
)
|
||||||
|
|
||||||
INCLUDE(${ARCADIA_ROOT}/clickhouse/cmake/yandex/ya.make.versions.inc)
|
INCLUDE(${ARCADIA_ROOT}/clickhouse/cmake/yandex/ya.make.versions.inc)
|
||||||
|
@ -515,11 +515,32 @@ inline bool NO_SANITIZE_UNDEFINED convertNumeric(From value, To & result)
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Note that NaNs doesn't compare equal to anything, but they are still in range of any Float type.
|
if constexpr (std::is_floating_point_v<From> && std::is_floating_point_v<To>)
|
||||||
if (isNaN(value) && std::is_floating_point_v<To>)
|
|
||||||
{
|
{
|
||||||
result = value;
|
/// Note that NaNs doesn't compare equal to anything, but they are still in range of any Float type.
|
||||||
return true;
|
if (isNaN(value))
|
||||||
|
{
|
||||||
|
result = value;
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (value == std::numeric_limits<From>::infinity())
|
||||||
|
{
|
||||||
|
result = std::numeric_limits<To>::infinity();
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (value == -std::numeric_limits<From>::infinity())
|
||||||
|
{
|
||||||
|
result = -std::numeric_limits<To>::infinity();
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (accurate::greaterOp(value, std::numeric_limits<To>::max())
|
||||||
|
|| accurate::greaterOp(std::numeric_limits<To>::lowest(), value))
|
||||||
|
{
|
||||||
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
result = static_cast<To>(value);
|
result = static_cast<To>(value);
|
||||||
|
@ -206,23 +206,32 @@ inline typename DecimalType::NativeType getFractionalPart(const DecimalType & de
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Decimal to integer/float conversion
|
/// Decimal to integer/float conversion
|
||||||
template <typename To, typename DecimalType>
|
template <typename To, typename DecimalType, typename ReturnType>
|
||||||
To convertTo(const DecimalType & decimal, size_t scale)
|
ReturnType convertToImpl(const DecimalType & decimal, size_t scale, To & result)
|
||||||
{
|
{
|
||||||
using NativeT = typename DecimalType::NativeType;
|
using NativeT = typename DecimalType::NativeType;
|
||||||
|
static constexpr bool throw_exception = std::is_same_v<ReturnType, void>;
|
||||||
|
|
||||||
if constexpr (std::is_floating_point_v<To>)
|
if constexpr (std::is_floating_point_v<To>)
|
||||||
{
|
{
|
||||||
return static_cast<To>(decimal.value) / static_cast<To>(scaleMultiplier<NativeT>(scale));
|
result = static_cast<To>(decimal.value) / static_cast<To>(scaleMultiplier<NativeT>(scale));
|
||||||
}
|
}
|
||||||
else if constexpr (is_integer_v<To> && (sizeof(To) >= sizeof(NativeT)))
|
else if constexpr (is_integer_v<To> && (sizeof(To) >= sizeof(NativeT)))
|
||||||
{
|
{
|
||||||
NativeT whole = getWholePart(decimal, scale);
|
NativeT whole = getWholePart(decimal, scale);
|
||||||
|
|
||||||
if constexpr (is_unsigned_v<To>)
|
if constexpr (is_unsigned_v<To>)
|
||||||
|
{
|
||||||
if (whole < 0)
|
if (whole < 0)
|
||||||
throw Exception("Convert overflow", ErrorCodes::DECIMAL_OVERFLOW);
|
{
|
||||||
return static_cast<To>(whole);
|
if constexpr (throw_exception)
|
||||||
|
throw Exception("Convert overflow", ErrorCodes::DECIMAL_OVERFLOW);
|
||||||
|
else
|
||||||
|
return ReturnType(true);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
result = static_cast<To>(whole);
|
||||||
}
|
}
|
||||||
else if constexpr (is_integer_v<To>)
|
else if constexpr (is_integer_v<To>)
|
||||||
{
|
{
|
||||||
@ -235,9 +244,34 @@ To convertTo(const DecimalType & decimal, size_t scale)
|
|||||||
static const constexpr CastTo max_to = std::numeric_limits<ToNativeT>::max();
|
static const constexpr CastTo max_to = std::numeric_limits<ToNativeT>::max();
|
||||||
|
|
||||||
if (whole < min_to || whole > max_to)
|
if (whole < min_to || whole > max_to)
|
||||||
throw Exception("Convert overflow", ErrorCodes::DECIMAL_OVERFLOW);
|
{
|
||||||
return static_cast<CastTo>(whole);
|
if constexpr (throw_exception)
|
||||||
|
throw Exception("Convert overflow", ErrorCodes::DECIMAL_OVERFLOW);
|
||||||
|
else
|
||||||
|
return ReturnType(true);
|
||||||
|
}
|
||||||
|
|
||||||
|
result = static_cast<CastTo>(whole);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
return ReturnType(true);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
template <typename To, typename DecimalType>
|
||||||
|
To convertTo(const DecimalType & decimal, size_t scale)
|
||||||
|
{
|
||||||
|
To result;
|
||||||
|
|
||||||
|
convertToImpl<To, DecimalType, void>(decimal, scale, result);
|
||||||
|
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename To, typename DecimalType>
|
||||||
|
bool tryConvertTo(const DecimalType & decimal, size_t scale, To & result)
|
||||||
|
{
|
||||||
|
return convertToImpl<To, DecimalType, bool>(decimal, scale, result);
|
||||||
}
|
}
|
||||||
|
|
||||||
template <bool is_multiply, bool is_division, typename T, typename U, template <typename> typename DecimalType>
|
template <bool is_multiply, bool is_division, typename T, typename U, template <typename> typename DecimalType>
|
||||||
|
@ -17,6 +17,63 @@ namespace ErrorCodes
|
|||||||
extern const int DECIMAL_OVERFLOW;
|
extern const int DECIMAL_OVERFLOW;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
inline Field getBinaryValue(UInt8 type, ReadBuffer & buf)
|
||||||
|
{
|
||||||
|
switch (type)
|
||||||
|
{
|
||||||
|
case Field::Types::Null: {
|
||||||
|
return DB::Field();
|
||||||
|
}
|
||||||
|
case Field::Types::UInt64: {
|
||||||
|
UInt64 value;
|
||||||
|
DB::readVarUInt(value, buf);
|
||||||
|
return value;
|
||||||
|
}
|
||||||
|
case Field::Types::UInt128: {
|
||||||
|
UInt128 value;
|
||||||
|
DB::readBinary(value, buf);
|
||||||
|
return value;
|
||||||
|
}
|
||||||
|
case Field::Types::Int64: {
|
||||||
|
Int64 value;
|
||||||
|
DB::readVarInt(value, buf);
|
||||||
|
return value;
|
||||||
|
}
|
||||||
|
case Field::Types::Float64: {
|
||||||
|
Float64 value;
|
||||||
|
DB::readFloatBinary(value, buf);
|
||||||
|
return value;
|
||||||
|
}
|
||||||
|
case Field::Types::String: {
|
||||||
|
std::string value;
|
||||||
|
DB::readStringBinary(value, buf);
|
||||||
|
return value;
|
||||||
|
}
|
||||||
|
case Field::Types::Array: {
|
||||||
|
Array value;
|
||||||
|
DB::readBinary(value, buf);
|
||||||
|
return value;
|
||||||
|
}
|
||||||
|
case Field::Types::Tuple: {
|
||||||
|
Tuple value;
|
||||||
|
DB::readBinary(value, buf);
|
||||||
|
return value;
|
||||||
|
}
|
||||||
|
case Field::Types::Map: {
|
||||||
|
Map value;
|
||||||
|
DB::readBinary(value, buf);
|
||||||
|
return value;
|
||||||
|
}
|
||||||
|
case Field::Types::AggregateFunctionState: {
|
||||||
|
AggregateFunctionStateData value;
|
||||||
|
DB::readStringBinary(value.name, buf);
|
||||||
|
DB::readStringBinary(value.data, buf);
|
||||||
|
return value;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return DB::Field();
|
||||||
|
}
|
||||||
|
|
||||||
void readBinary(Array & x, ReadBuffer & buf)
|
void readBinary(Array & x, ReadBuffer & buf)
|
||||||
{
|
{
|
||||||
size_t size;
|
size_t size;
|
||||||
@ -25,73 +82,7 @@ void readBinary(Array & x, ReadBuffer & buf)
|
|||||||
DB::readBinary(size, buf);
|
DB::readBinary(size, buf);
|
||||||
|
|
||||||
for (size_t index = 0; index < size; ++index)
|
for (size_t index = 0; index < size; ++index)
|
||||||
{
|
x.push_back(getBinaryValue(type, buf));
|
||||||
switch (type)
|
|
||||||
{
|
|
||||||
case Field::Types::Null:
|
|
||||||
{
|
|
||||||
x.push_back(DB::Field());
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
case Field::Types::UInt64:
|
|
||||||
{
|
|
||||||
UInt64 value;
|
|
||||||
DB::readVarUInt(value, buf);
|
|
||||||
x.push_back(value);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
case Field::Types::UInt128:
|
|
||||||
{
|
|
||||||
UInt128 value;
|
|
||||||
DB::readBinary(value, buf);
|
|
||||||
x.push_back(value);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
case Field::Types::Int64:
|
|
||||||
{
|
|
||||||
Int64 value;
|
|
||||||
DB::readVarInt(value, buf);
|
|
||||||
x.push_back(value);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
case Field::Types::Float64:
|
|
||||||
{
|
|
||||||
Float64 value;
|
|
||||||
DB::readFloatBinary(value, buf);
|
|
||||||
x.push_back(value);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
case Field::Types::String:
|
|
||||||
{
|
|
||||||
std::string value;
|
|
||||||
DB::readStringBinary(value, buf);
|
|
||||||
x.push_back(value);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
case Field::Types::Array:
|
|
||||||
{
|
|
||||||
Array value;
|
|
||||||
DB::readBinary(value, buf);
|
|
||||||
x.push_back(value);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
case Field::Types::Tuple:
|
|
||||||
{
|
|
||||||
Tuple value;
|
|
||||||
DB::readBinary(value, buf);
|
|
||||||
x.push_back(value);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
case Field::Types::AggregateFunctionState:
|
|
||||||
{
|
|
||||||
AggregateFunctionStateData value;
|
|
||||||
DB::readStringBinary(value.name, buf);
|
|
||||||
DB::readStringBinary(value.data, buf);
|
|
||||||
x.push_back(value);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void writeBinary(const Array & x, WriteBuffer & buf)
|
void writeBinary(const Array & x, WriteBuffer & buf)
|
||||||
@ -104,53 +95,7 @@ void writeBinary(const Array & x, WriteBuffer & buf)
|
|||||||
DB::writeBinary(size, buf);
|
DB::writeBinary(size, buf);
|
||||||
|
|
||||||
for (const auto & elem : x)
|
for (const auto & elem : x)
|
||||||
{
|
Field::dispatch([&buf] (const auto & value) { DB::FieldVisitorWriteBinary()(value, buf); }, elem);
|
||||||
switch (type)
|
|
||||||
{
|
|
||||||
case Field::Types::Null: break;
|
|
||||||
case Field::Types::UInt64:
|
|
||||||
{
|
|
||||||
DB::writeVarUInt(get<UInt64>(elem), buf);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
case Field::Types::UInt128:
|
|
||||||
{
|
|
||||||
DB::writeBinary(get<UInt128>(elem), buf);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
case Field::Types::Int64:
|
|
||||||
{
|
|
||||||
DB::writeVarInt(get<Int64>(elem), buf);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
case Field::Types::Float64:
|
|
||||||
{
|
|
||||||
DB::writeFloatBinary(get<Float64>(elem), buf);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
case Field::Types::String:
|
|
||||||
{
|
|
||||||
DB::writeStringBinary(get<std::string>(elem), buf);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
case Field::Types::Array:
|
|
||||||
{
|
|
||||||
DB::writeBinary(get<Array>(elem), buf);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
case Field::Types::Tuple:
|
|
||||||
{
|
|
||||||
DB::writeBinary(get<Tuple>(elem), buf);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
case Field::Types::AggregateFunctionState:
|
|
||||||
{
|
|
||||||
DB::writeStringBinary(elem.get<AggregateFunctionStateData>().name, buf);
|
|
||||||
DB::writeStringBinary(elem.get<AggregateFunctionStateData>().data, buf);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void writeText(const Array & x, WriteBuffer & buf)
|
void writeText(const Array & x, WriteBuffer & buf)
|
||||||
@ -168,93 +113,7 @@ void readBinary(Tuple & x, ReadBuffer & buf)
|
|||||||
{
|
{
|
||||||
UInt8 type;
|
UInt8 type;
|
||||||
DB::readBinary(type, buf);
|
DB::readBinary(type, buf);
|
||||||
|
x.push_back(getBinaryValue(type, buf));
|
||||||
switch (type)
|
|
||||||
{
|
|
||||||
case Field::Types::Null:
|
|
||||||
{
|
|
||||||
x.push_back(DB::Field());
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
case Field::Types::UInt64:
|
|
||||||
{
|
|
||||||
UInt64 value;
|
|
||||||
DB::readVarUInt(value, buf);
|
|
||||||
x.push_back(value);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
case Field::Types::UInt128:
|
|
||||||
{
|
|
||||||
UInt128 value;
|
|
||||||
DB::readBinary(value, buf);
|
|
||||||
x.push_back(value);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
case Field::Types::Int64:
|
|
||||||
{
|
|
||||||
Int64 value;
|
|
||||||
DB::readVarInt(value, buf);
|
|
||||||
x.push_back(value);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
case Field::Types::Int128:
|
|
||||||
{
|
|
||||||
Int64 value;
|
|
||||||
DB::readVarInt(value, buf);
|
|
||||||
x.push_back(value);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
case Field::Types::Float64:
|
|
||||||
{
|
|
||||||
Float64 value;
|
|
||||||
DB::readFloatBinary(value, buf);
|
|
||||||
x.push_back(value);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
case Field::Types::String:
|
|
||||||
{
|
|
||||||
std::string value;
|
|
||||||
DB::readStringBinary(value, buf);
|
|
||||||
x.push_back(value);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
case Field::Types::UInt256:
|
|
||||||
{
|
|
||||||
UInt256 value;
|
|
||||||
DB::readBinary(value, buf);
|
|
||||||
x.push_back(value);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
case Field::Types::Int256:
|
|
||||||
{
|
|
||||||
Int256 value;
|
|
||||||
DB::readBinary(value, buf);
|
|
||||||
x.push_back(value);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
case Field::Types::Array:
|
|
||||||
{
|
|
||||||
Array value;
|
|
||||||
DB::readBinary(value, buf);
|
|
||||||
x.push_back(value);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
case Field::Types::Tuple:
|
|
||||||
{
|
|
||||||
Tuple value;
|
|
||||||
DB::readBinary(value, buf);
|
|
||||||
x.push_back(value);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
case Field::Types::AggregateFunctionState:
|
|
||||||
{
|
|
||||||
AggregateFunctionStateData value;
|
|
||||||
DB::readStringBinary(value.name, buf);
|
|
||||||
DB::readStringBinary(value.data, buf);
|
|
||||||
x.push_back(value);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -267,67 +126,7 @@ void writeBinary(const Tuple & x, WriteBuffer & buf)
|
|||||||
{
|
{
|
||||||
const UInt8 type = elem.getType();
|
const UInt8 type = elem.getType();
|
||||||
DB::writeBinary(type, buf);
|
DB::writeBinary(type, buf);
|
||||||
|
Field::dispatch([&buf] (const auto & value) { DB::FieldVisitorWriteBinary()(value, buf); }, elem);
|
||||||
switch (type)
|
|
||||||
{
|
|
||||||
case Field::Types::Null: break;
|
|
||||||
case Field::Types::UInt64:
|
|
||||||
{
|
|
||||||
DB::writeVarUInt(get<UInt64>(elem), buf);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
case Field::Types::UInt128:
|
|
||||||
{
|
|
||||||
DB::writeBinary(get<UInt128>(elem), buf);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
case Field::Types::Int64:
|
|
||||||
{
|
|
||||||
DB::writeVarInt(get<Int64>(elem), buf);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
case Field::Types::Int128:
|
|
||||||
{
|
|
||||||
DB::writeVarInt(get<Int64>(elem), buf);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
case Field::Types::Float64:
|
|
||||||
{
|
|
||||||
DB::writeFloatBinary(get<Float64>(elem), buf);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
case Field::Types::String:
|
|
||||||
{
|
|
||||||
DB::writeStringBinary(get<std::string>(elem), buf);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
case Field::Types::UInt256:
|
|
||||||
{
|
|
||||||
DB::writeBinary(get<UInt256>(elem), buf);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
case Field::Types::Int256:
|
|
||||||
{
|
|
||||||
DB::writeBinary(get<Int256>(elem), buf);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
case Field::Types::Array:
|
|
||||||
{
|
|
||||||
DB::writeBinary(get<Array>(elem), buf);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
case Field::Types::Tuple:
|
|
||||||
{
|
|
||||||
DB::writeBinary(get<Tuple>(elem), buf);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
case Field::Types::AggregateFunctionState:
|
|
||||||
{
|
|
||||||
DB::writeStringBinary(elem.get<AggregateFunctionStateData>().name, buf);
|
|
||||||
DB::writeStringBinary(elem.get<AggregateFunctionStateData>().data, buf);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -336,6 +135,37 @@ void writeText(const Tuple & x, WriteBuffer & buf)
|
|||||||
writeFieldText(DB::Field(x), buf);
|
writeFieldText(DB::Field(x), buf);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void readBinary(Map & x, ReadBuffer & buf)
|
||||||
|
{
|
||||||
|
size_t size;
|
||||||
|
DB::readBinary(size, buf);
|
||||||
|
|
||||||
|
for (size_t index = 0; index < size; ++index)
|
||||||
|
{
|
||||||
|
UInt8 type;
|
||||||
|
DB::readBinary(type, buf);
|
||||||
|
x.push_back(getBinaryValue(type, buf));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void writeBinary(const Map & x, WriteBuffer & buf)
|
||||||
|
{
|
||||||
|
const size_t size = x.size();
|
||||||
|
DB::writeBinary(size, buf);
|
||||||
|
|
||||||
|
for (const auto & elem : x)
|
||||||
|
{
|
||||||
|
const UInt8 type = elem.getType();
|
||||||
|
DB::writeBinary(type, buf);
|
||||||
|
Field::dispatch([&buf] (const auto & value) { DB::FieldVisitorWriteBinary()(value, buf); }, elem);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void writeText(const Map & x, WriteBuffer & buf)
|
||||||
|
{
|
||||||
|
writeFieldText(DB::Field(x), buf);
|
||||||
|
}
|
||||||
|
|
||||||
template <typename T>
|
template <typename T>
|
||||||
void readQuoted(DecimalField<T> & x, ReadBuffer & buf)
|
void readQuoted(DecimalField<T> & x, ReadBuffer & buf)
|
||||||
{
|
{
|
||||||
@ -530,6 +360,30 @@ Field Field::restoreFromDump(const std::string_view & dump_)
|
|||||||
return tuple;
|
return tuple;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
prefix = std::string_view{"Map_("};
|
||||||
|
if (dump.starts_with(prefix))
|
||||||
|
{
|
||||||
|
std::string_view tail = dump.substr(prefix.length());
|
||||||
|
trimLeft(tail);
|
||||||
|
Map map;
|
||||||
|
while (tail != ")")
|
||||||
|
{
|
||||||
|
size_t separator = tail.find_first_of(",)");
|
||||||
|
if (separator == std::string_view::npos)
|
||||||
|
show_error();
|
||||||
|
bool comma = (tail[separator] == ',');
|
||||||
|
std::string_view element = tail.substr(0, separator);
|
||||||
|
tail.remove_prefix(separator);
|
||||||
|
if (comma)
|
||||||
|
tail.remove_prefix(1);
|
||||||
|
trimLeft(tail);
|
||||||
|
if (!comma && tail != ")")
|
||||||
|
show_error();
|
||||||
|
map.push_back(Field::restoreFromDump(element));
|
||||||
|
}
|
||||||
|
return map;
|
||||||
|
}
|
||||||
|
|
||||||
prefix = std::string_view{"AggregateFunctionState_("};
|
prefix = std::string_view{"AggregateFunctionState_("};
|
||||||
if (dump.starts_with(prefix))
|
if (dump.starts_with(prefix))
|
||||||
{
|
{
|
||||||
|
@ -51,6 +51,9 @@ struct X : public FieldVector \
|
|||||||
DEFINE_FIELD_VECTOR(Array);
|
DEFINE_FIELD_VECTOR(Array);
|
||||||
DEFINE_FIELD_VECTOR(Tuple);
|
DEFINE_FIELD_VECTOR(Tuple);
|
||||||
|
|
||||||
|
/// An array with the following structure: [(key1, value1), (key2, value2), ...]
|
||||||
|
DEFINE_FIELD_VECTOR(Map);
|
||||||
|
|
||||||
#undef DEFINE_FIELD_VECTOR
|
#undef DEFINE_FIELD_VECTOR
|
||||||
|
|
||||||
struct AggregateFunctionStateData
|
struct AggregateFunctionStateData
|
||||||
@ -206,6 +209,7 @@ template <> struct NearestFieldTypeImpl<std::string_view> { using Type = String;
|
|||||||
template <> struct NearestFieldTypeImpl<String> { using Type = String; };
|
template <> struct NearestFieldTypeImpl<String> { using Type = String; };
|
||||||
template <> struct NearestFieldTypeImpl<Array> { using Type = Array; };
|
template <> struct NearestFieldTypeImpl<Array> { using Type = Array; };
|
||||||
template <> struct NearestFieldTypeImpl<Tuple> { using Type = Tuple; };
|
template <> struct NearestFieldTypeImpl<Tuple> { using Type = Tuple; };
|
||||||
|
template <> struct NearestFieldTypeImpl<Map> { using Type = Map; };
|
||||||
template <> struct NearestFieldTypeImpl<bool> { using Type = UInt64; };
|
template <> struct NearestFieldTypeImpl<bool> { using Type = UInt64; };
|
||||||
template <> struct NearestFieldTypeImpl<Null> { using Type = Null; };
|
template <> struct NearestFieldTypeImpl<Null> { using Type = Null; };
|
||||||
|
|
||||||
@ -259,6 +263,7 @@ public:
|
|||||||
Decimal256 = 23,
|
Decimal256 = 23,
|
||||||
UInt256 = 24,
|
UInt256 = 24,
|
||||||
Int256 = 25,
|
Int256 = 25,
|
||||||
|
Map = 26,
|
||||||
};
|
};
|
||||||
|
|
||||||
static const int MIN_NON_POD = 16;
|
static const int MIN_NON_POD = 16;
|
||||||
@ -276,6 +281,7 @@ public:
|
|||||||
case String: return "String";
|
case String: return "String";
|
||||||
case Array: return "Array";
|
case Array: return "Array";
|
||||||
case Tuple: return "Tuple";
|
case Tuple: return "Tuple";
|
||||||
|
case Map: return "Map";
|
||||||
case Decimal32: return "Decimal32";
|
case Decimal32: return "Decimal32";
|
||||||
case Decimal64: return "Decimal64";
|
case Decimal64: return "Decimal64";
|
||||||
case Decimal128: return "Decimal128";
|
case Decimal128: return "Decimal128";
|
||||||
@ -464,6 +470,7 @@ public:
|
|||||||
case Types::String: return get<String>() < rhs.get<String>();
|
case Types::String: return get<String>() < rhs.get<String>();
|
||||||
case Types::Array: return get<Array>() < rhs.get<Array>();
|
case Types::Array: return get<Array>() < rhs.get<Array>();
|
||||||
case Types::Tuple: return get<Tuple>() < rhs.get<Tuple>();
|
case Types::Tuple: return get<Tuple>() < rhs.get<Tuple>();
|
||||||
|
case Types::Map: return get<Map>() < rhs.get<Map>();
|
||||||
case Types::Decimal32: return get<DecimalField<Decimal32>>() < rhs.get<DecimalField<Decimal32>>();
|
case Types::Decimal32: return get<DecimalField<Decimal32>>() < rhs.get<DecimalField<Decimal32>>();
|
||||||
case Types::Decimal64: return get<DecimalField<Decimal64>>() < rhs.get<DecimalField<Decimal64>>();
|
case Types::Decimal64: return get<DecimalField<Decimal64>>() < rhs.get<DecimalField<Decimal64>>();
|
||||||
case Types::Decimal128: return get<DecimalField<Decimal128>>() < rhs.get<DecimalField<Decimal128>>();
|
case Types::Decimal128: return get<DecimalField<Decimal128>>() < rhs.get<DecimalField<Decimal128>>();
|
||||||
@ -499,6 +506,7 @@ public:
|
|||||||
case Types::String: return get<String>() <= rhs.get<String>();
|
case Types::String: return get<String>() <= rhs.get<String>();
|
||||||
case Types::Array: return get<Array>() <= rhs.get<Array>();
|
case Types::Array: return get<Array>() <= rhs.get<Array>();
|
||||||
case Types::Tuple: return get<Tuple>() <= rhs.get<Tuple>();
|
case Types::Tuple: return get<Tuple>() <= rhs.get<Tuple>();
|
||||||
|
case Types::Map: return get<Map>() <= rhs.get<Map>();
|
||||||
case Types::Decimal32: return get<DecimalField<Decimal32>>() <= rhs.get<DecimalField<Decimal32>>();
|
case Types::Decimal32: return get<DecimalField<Decimal32>>() <= rhs.get<DecimalField<Decimal32>>();
|
||||||
case Types::Decimal64: return get<DecimalField<Decimal64>>() <= rhs.get<DecimalField<Decimal64>>();
|
case Types::Decimal64: return get<DecimalField<Decimal64>>() <= rhs.get<DecimalField<Decimal64>>();
|
||||||
case Types::Decimal128: return get<DecimalField<Decimal128>>() <= rhs.get<DecimalField<Decimal128>>();
|
case Types::Decimal128: return get<DecimalField<Decimal128>>() <= rhs.get<DecimalField<Decimal128>>();
|
||||||
@ -536,6 +544,7 @@ public:
|
|||||||
case Types::String: return get<String>() == rhs.get<String>();
|
case Types::String: return get<String>() == rhs.get<String>();
|
||||||
case Types::Array: return get<Array>() == rhs.get<Array>();
|
case Types::Array: return get<Array>() == rhs.get<Array>();
|
||||||
case Types::Tuple: return get<Tuple>() == rhs.get<Tuple>();
|
case Types::Tuple: return get<Tuple>() == rhs.get<Tuple>();
|
||||||
|
case Types::Map: return get<Map>() == rhs.get<Map>();
|
||||||
case Types::UInt128: return get<UInt128>() == rhs.get<UInt128>();
|
case Types::UInt128: return get<UInt128>() == rhs.get<UInt128>();
|
||||||
case Types::Int128: return get<Int128>() == rhs.get<Int128>();
|
case Types::Int128: return get<Int128>() == rhs.get<Int128>();
|
||||||
case Types::Decimal32: return get<DecimalField<Decimal32>>() == rhs.get<DecimalField<Decimal32>>();
|
case Types::Decimal32: return get<DecimalField<Decimal32>>() == rhs.get<DecimalField<Decimal32>>();
|
||||||
@ -575,6 +584,7 @@ public:
|
|||||||
case Types::String: return f(field.template get<String>());
|
case Types::String: return f(field.template get<String>());
|
||||||
case Types::Array: return f(field.template get<Array>());
|
case Types::Array: return f(field.template get<Array>());
|
||||||
case Types::Tuple: return f(field.template get<Tuple>());
|
case Types::Tuple: return f(field.template get<Tuple>());
|
||||||
|
case Types::Map: return f(field.template get<Map>());
|
||||||
case Types::Decimal32: return f(field.template get<DecimalField<Decimal32>>());
|
case Types::Decimal32: return f(field.template get<DecimalField<Decimal32>>());
|
||||||
case Types::Decimal64: return f(field.template get<DecimalField<Decimal64>>());
|
case Types::Decimal64: return f(field.template get<DecimalField<Decimal64>>());
|
||||||
case Types::Decimal128: return f(field.template get<DecimalField<Decimal128>>());
|
case Types::Decimal128: return f(field.template get<DecimalField<Decimal128>>());
|
||||||
@ -600,7 +610,7 @@ public:
|
|||||||
|
|
||||||
private:
|
private:
|
||||||
std::aligned_union_t<DBMS_MIN_FIELD_SIZE - sizeof(Types::Which),
|
std::aligned_union_t<DBMS_MIN_FIELD_SIZE - sizeof(Types::Which),
|
||||||
Null, UInt64, UInt128, Int64, Int128, Float64, String, Array, Tuple,
|
Null, UInt64, UInt128, Int64, Int128, Float64, String, Array, Tuple, Map,
|
||||||
DecimalField<Decimal32>, DecimalField<Decimal64>, DecimalField<Decimal128>, DecimalField<Decimal256>,
|
DecimalField<Decimal32>, DecimalField<Decimal64>, DecimalField<Decimal128>, DecimalField<Decimal256>,
|
||||||
AggregateFunctionStateData,
|
AggregateFunctionStateData,
|
||||||
UInt256, Int256
|
UInt256, Int256
|
||||||
@ -699,6 +709,9 @@ private:
|
|||||||
case Types::Tuple:
|
case Types::Tuple:
|
||||||
destroy<Tuple>();
|
destroy<Tuple>();
|
||||||
break;
|
break;
|
||||||
|
case Types::Map:
|
||||||
|
destroy<Map>();
|
||||||
|
break;
|
||||||
case Types::AggregateFunctionState:
|
case Types::AggregateFunctionState:
|
||||||
destroy<AggregateFunctionStateData>();
|
destroy<AggregateFunctionStateData>();
|
||||||
break;
|
break;
|
||||||
@ -729,6 +742,7 @@ template <> struct Field::TypeToEnum<Float64> { static const Types::Which value
|
|||||||
template <> struct Field::TypeToEnum<String> { static const Types::Which value = Types::String; };
|
template <> struct Field::TypeToEnum<String> { static const Types::Which value = Types::String; };
|
||||||
template <> struct Field::TypeToEnum<Array> { static const Types::Which value = Types::Array; };
|
template <> struct Field::TypeToEnum<Array> { static const Types::Which value = Types::Array; };
|
||||||
template <> struct Field::TypeToEnum<Tuple> { static const Types::Which value = Types::Tuple; };
|
template <> struct Field::TypeToEnum<Tuple> { static const Types::Which value = Types::Tuple; };
|
||||||
|
template <> struct Field::TypeToEnum<Map> { static const Types::Which value = Types::Map; };
|
||||||
template <> struct Field::TypeToEnum<DecimalField<Decimal32>>{ static const Types::Which value = Types::Decimal32; };
|
template <> struct Field::TypeToEnum<DecimalField<Decimal32>>{ static const Types::Which value = Types::Decimal32; };
|
||||||
template <> struct Field::TypeToEnum<DecimalField<Decimal64>>{ static const Types::Which value = Types::Decimal64; };
|
template <> struct Field::TypeToEnum<DecimalField<Decimal64>>{ static const Types::Which value = Types::Decimal64; };
|
||||||
template <> struct Field::TypeToEnum<DecimalField<Decimal128>>{ static const Types::Which value = Types::Decimal128; };
|
template <> struct Field::TypeToEnum<DecimalField<Decimal128>>{ static const Types::Which value = Types::Decimal128; };
|
||||||
@ -747,6 +761,7 @@ template <> struct Field::EnumToType<Field::Types::Float64> { using Type = Float
|
|||||||
template <> struct Field::EnumToType<Field::Types::String> { using Type = String; };
|
template <> struct Field::EnumToType<Field::Types::String> { using Type = String; };
|
||||||
template <> struct Field::EnumToType<Field::Types::Array> { using Type = Array; };
|
template <> struct Field::EnumToType<Field::Types::Array> { using Type = Array; };
|
||||||
template <> struct Field::EnumToType<Field::Types::Tuple> { using Type = Tuple; };
|
template <> struct Field::EnumToType<Field::Types::Tuple> { using Type = Tuple; };
|
||||||
|
template <> struct Field::EnumToType<Field::Types::Map> { using Type = Map; };
|
||||||
template <> struct Field::EnumToType<Field::Types::Decimal32> { using Type = DecimalField<Decimal32>; };
|
template <> struct Field::EnumToType<Field::Types::Decimal32> { using Type = DecimalField<Decimal32>; };
|
||||||
template <> struct Field::EnumToType<Field::Types::Decimal64> { using Type = DecimalField<Decimal64>; };
|
template <> struct Field::EnumToType<Field::Types::Decimal64> { using Type = DecimalField<Decimal64>; };
|
||||||
template <> struct Field::EnumToType<Field::Types::Decimal128> { using Type = DecimalField<Decimal128>; };
|
template <> struct Field::EnumToType<Field::Types::Decimal128> { using Type = DecimalField<Decimal128>; };
|
||||||
@ -814,6 +829,7 @@ T safeGet(Field & field)
|
|||||||
|
|
||||||
template <> struct TypeName<Array> { static std::string get() { return "Array"; } };
|
template <> struct TypeName<Array> { static std::string get() { return "Array"; } };
|
||||||
template <> struct TypeName<Tuple> { static std::string get() { return "Tuple"; } };
|
template <> struct TypeName<Tuple> { static std::string get() { return "Tuple"; } };
|
||||||
|
template <> struct TypeName<Map> { static std::string get() { return "Map"; } };
|
||||||
template <> struct TypeName<AggregateFunctionStateData> { static std::string get() { return "AggregateFunctionState"; } };
|
template <> struct TypeName<AggregateFunctionStateData> { static std::string get() { return "AggregateFunctionState"; } };
|
||||||
|
|
||||||
template <typename T>
|
template <typename T>
|
||||||
@ -900,6 +916,12 @@ void writeBinary(const Tuple & x, WriteBuffer & buf);
|
|||||||
|
|
||||||
void writeText(const Tuple & x, WriteBuffer & buf);
|
void writeText(const Tuple & x, WriteBuffer & buf);
|
||||||
|
|
||||||
|
void readBinary(Map & x, ReadBuffer & buf);
|
||||||
|
[[noreturn]] inline void readText(Map &, ReadBuffer &) { throw Exception("Cannot read Map.", ErrorCodes::NOT_IMPLEMENTED); }
|
||||||
|
[[noreturn]] inline void readQuoted(Map &, ReadBuffer &) { throw Exception("Cannot read Map.", ErrorCodes::NOT_IMPLEMENTED); }
|
||||||
|
void writeBinary(const Map & x, WriteBuffer & buf);
|
||||||
|
void writeText(const Map & x, WriteBuffer & buf);
|
||||||
|
[[noreturn]] inline void writeQuoted(const Map &, WriteBuffer &) { throw Exception("Cannot write Map quoted.", ErrorCodes::NOT_IMPLEMENTED); }
|
||||||
|
|
||||||
__attribute__ ((noreturn)) inline void writeText(const AggregateFunctionStateData &, WriteBuffer &)
|
__attribute__ ((noreturn)) inline void writeText(const AggregateFunctionStateData &, WriteBuffer &)
|
||||||
{
|
{
|
||||||
|
@ -16,10 +16,8 @@ namespace ErrorCodes
|
|||||||
extern const int UNKNOWN_ELEMENT_IN_CONFIG;
|
extern const int UNKNOWN_ELEMENT_IN_CONFIG;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
IMPLEMENT_SETTINGS_TRAITS(SettingsTraits, LIST_OF_SETTINGS)
|
IMPLEMENT_SETTINGS_TRAITS(SettingsTraits, LIST_OF_SETTINGS)
|
||||||
|
|
||||||
|
|
||||||
/** Set the settings from the profile (in the server configuration, many settings can be listed in one profile).
|
/** Set the settings from the profile (in the server configuration, many settings can be listed in one profile).
|
||||||
* The profile can also be set using the `set` functions, like the `profile` setting.
|
* The profile can also be set using the `set` functions, like the `profile` setting.
|
||||||
*/
|
*/
|
||||||
|
@ -240,6 +240,8 @@ class IColumn;
|
|||||||
* Almost all limits apply to each stream individually. \
|
* Almost all limits apply to each stream individually. \
|
||||||
*/ \
|
*/ \
|
||||||
\
|
\
|
||||||
|
M(UInt64, limit, 0, "Limit on read rows from the most 'end' result for select query, default 0 means no limit length", 0) \
|
||||||
|
M(UInt64, offset, 0, "Offset on read rows from the most 'end' result for select query", 0) \
|
||||||
M(UInt64, max_rows_to_read, 0, "Limit on read rows from the most 'deep' sources. That is, only in the deepest subquery. When reading from a remote server, it is only checked on a remote server.", 0) \
|
M(UInt64, max_rows_to_read, 0, "Limit on read rows from the most 'deep' sources. That is, only in the deepest subquery. When reading from a remote server, it is only checked on a remote server.", 0) \
|
||||||
M(UInt64, max_bytes_to_read, 0, "Limit on read bytes (after decompression) from the most 'deep' sources. That is, only in the deepest subquery. When reading from a remote server, it is only checked on a remote server.", 0) \
|
M(UInt64, max_bytes_to_read, 0, "Limit on read bytes (after decompression) from the most 'deep' sources. That is, only in the deepest subquery. When reading from a remote server, it is only checked on a remote server.", 0) \
|
||||||
M(OverflowMode, read_overflow_mode, OverflowMode::THROW, "What to do when the limit is exceeded.", 0) \
|
M(OverflowMode, read_overflow_mode, OverflowMode::THROW, "What to do when the limit is exceeded.", 0) \
|
||||||
@ -402,6 +404,8 @@ class IColumn;
|
|||||||
M(Bool, enable_global_with_statement, false, "Propagate WITH statements to UNION queries and all subqueries", 0) \
|
M(Bool, enable_global_with_statement, false, "Propagate WITH statements to UNION queries and all subqueries", 0) \
|
||||||
M(Bool, aggregate_functions_null_for_empty, false, "Rewrite all aggregate functions in a query, adding -OrNull suffix to them", 0) \
|
M(Bool, aggregate_functions_null_for_empty, false, "Rewrite all aggregate functions in a query, adding -OrNull suffix to them", 0) \
|
||||||
M(Bool, optimize_skip_merged_partitions, false, "Skip partitions with one part with level > 0 in optimize final", 0) \
|
M(Bool, optimize_skip_merged_partitions, false, "Skip partitions with one part with level > 0 in optimize final", 0) \
|
||||||
|
M(Bool, optimize_on_insert, true, "Do the same transformation for inserted block of data as if merge was done on this block.", 0) \
|
||||||
|
M(Bool, allow_experimental_map_type, false, "Allow data type Map", 0) \
|
||||||
\
|
\
|
||||||
M(Bool, use_antlr_parser, false, "Parse incoming queries using ANTLR-generated parser", 0) \
|
M(Bool, use_antlr_parser, false, "Parse incoming queries using ANTLR-generated parser", 0) \
|
||||||
\
|
\
|
||||||
|
@ -30,7 +30,6 @@ struct SortCursorImpl
|
|||||||
ColumnRawPtrs all_columns;
|
ColumnRawPtrs all_columns;
|
||||||
SortDescription desc;
|
SortDescription desc;
|
||||||
size_t sort_columns_size = 0;
|
size_t sort_columns_size = 0;
|
||||||
size_t pos = 0;
|
|
||||||
size_t rows = 0;
|
size_t rows = 0;
|
||||||
|
|
||||||
/** Determines order if comparing columns are equal.
|
/** Determines order if comparing columns are equal.
|
||||||
@ -49,15 +48,20 @@ struct SortCursorImpl
|
|||||||
/** Is there at least one column with Collator. */
|
/** Is there at least one column with Collator. */
|
||||||
bool has_collation = false;
|
bool has_collation = false;
|
||||||
|
|
||||||
|
/** We could use SortCursorImpl in case when columns aren't sorted
|
||||||
|
* but we have their sorted permutation
|
||||||
|
*/
|
||||||
|
IColumn::Permutation * permutation = nullptr;
|
||||||
|
|
||||||
SortCursorImpl() {}
|
SortCursorImpl() {}
|
||||||
|
|
||||||
SortCursorImpl(const Block & block, const SortDescription & desc_, size_t order_ = 0)
|
SortCursorImpl(const Block & block, const SortDescription & desc_, size_t order_ = 0, IColumn::Permutation * perm = nullptr)
|
||||||
: desc(desc_), sort_columns_size(desc.size()), order(order_), need_collation(desc.size())
|
: desc(desc_), sort_columns_size(desc.size()), order(order_), need_collation(desc.size())
|
||||||
{
|
{
|
||||||
reset(block);
|
reset(block, perm);
|
||||||
}
|
}
|
||||||
|
|
||||||
SortCursorImpl(const Columns & columns, const SortDescription & desc_, size_t order_ = 0)
|
SortCursorImpl(const Columns & columns, const SortDescription & desc_, size_t order_ = 0, IColumn::Permutation * perm = nullptr)
|
||||||
: desc(desc_), sort_columns_size(desc.size()), order(order_), need_collation(desc.size())
|
: desc(desc_), sort_columns_size(desc.size()), order(order_), need_collation(desc.size())
|
||||||
{
|
{
|
||||||
for (auto & column_desc : desc)
|
for (auto & column_desc : desc)
|
||||||
@ -66,19 +70,19 @@ struct SortCursorImpl
|
|||||||
throw Exception("SortDescription should contain column position if SortCursor was used without header.",
|
throw Exception("SortDescription should contain column position if SortCursor was used without header.",
|
||||||
ErrorCodes::LOGICAL_ERROR);
|
ErrorCodes::LOGICAL_ERROR);
|
||||||
}
|
}
|
||||||
reset(columns, {});
|
reset(columns, {}, perm);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool empty() const { return rows == 0; }
|
bool empty() const { return rows == 0; }
|
||||||
|
|
||||||
/// Set the cursor to the beginning of the new block.
|
/// Set the cursor to the beginning of the new block.
|
||||||
void reset(const Block & block)
|
void reset(const Block & block, IColumn::Permutation * perm = nullptr)
|
||||||
{
|
{
|
||||||
reset(block.getColumns(), block);
|
reset(block.getColumns(), block, perm);
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Set the cursor to the beginning of the new block.
|
/// Set the cursor to the beginning of the new block.
|
||||||
void reset(const Columns & columns, const Block & block)
|
void reset(const Columns & columns, const Block & block, IColumn::Permutation * perm = nullptr)
|
||||||
{
|
{
|
||||||
all_columns.clear();
|
all_columns.clear();
|
||||||
sort_columns.clear();
|
sort_columns.clear();
|
||||||
@ -96,18 +100,33 @@ struct SortCursorImpl
|
|||||||
: column_desc.column_number;
|
: column_desc.column_number;
|
||||||
sort_columns.push_back(columns[column_number].get());
|
sort_columns.push_back(columns[column_number].get());
|
||||||
|
|
||||||
need_collation[j] = desc[j].collator != nullptr && sort_columns.back()->isCollationSupported(); /// TODO Nullable(String)
|
need_collation[j] = desc[j].collator != nullptr && sort_columns.back()->isCollationSupported();
|
||||||
has_collation |= need_collation[j];
|
has_collation |= need_collation[j];
|
||||||
}
|
}
|
||||||
|
|
||||||
pos = 0;
|
pos = 0;
|
||||||
rows = all_columns[0]->size();
|
rows = all_columns[0]->size();
|
||||||
|
permutation = perm;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
size_t getRow() const
|
||||||
|
{
|
||||||
|
if (permutation)
|
||||||
|
return (*permutation)[pos];
|
||||||
|
return pos;
|
||||||
|
}
|
||||||
|
|
||||||
|
/// We need a possibility to change pos (see MergeJoin).
|
||||||
|
size_t & getPosRef() { return pos; }
|
||||||
|
|
||||||
bool isFirst() const { return pos == 0; }
|
bool isFirst() const { return pos == 0; }
|
||||||
bool isLast() const { return pos + 1 >= rows; }
|
bool isLast() const { return pos + 1 >= rows; }
|
||||||
bool isValid() const { return pos < rows; }
|
bool isValid() const { return pos < rows; }
|
||||||
void next() { ++pos; }
|
void next() { ++pos; }
|
||||||
|
|
||||||
|
/// Prevent using pos instead of getRow()
|
||||||
|
private:
|
||||||
|
size_t pos;
|
||||||
};
|
};
|
||||||
|
|
||||||
using SortCursorImpls = std::vector<SortCursorImpl>;
|
using SortCursorImpls = std::vector<SortCursorImpl>;
|
||||||
@ -127,7 +146,7 @@ struct SortCursorHelper
|
|||||||
|
|
||||||
bool ALWAYS_INLINE greater(const SortCursorHelper & rhs) const
|
bool ALWAYS_INLINE greater(const SortCursorHelper & rhs) const
|
||||||
{
|
{
|
||||||
return derived().greaterAt(rhs.derived(), impl->pos, rhs.impl->pos);
|
return derived().greaterAt(rhs.derived(), impl->getRow(), rhs.impl->getRow());
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Inverted so that the priority queue elements are removed in ascending order.
|
/// Inverted so that the priority queue elements are removed in ascending order.
|
||||||
|
@ -56,6 +56,7 @@ enum class TypeIndex
|
|||||||
Function,
|
Function,
|
||||||
AggregateFunction,
|
AggregateFunction,
|
||||||
LowCardinality,
|
LowCardinality,
|
||||||
|
Map,
|
||||||
};
|
};
|
||||||
#if !__clang__
|
#if !__clang__
|
||||||
#pragma GCC diagnostic pop
|
#pragma GCC diagnostic pop
|
||||||
@ -267,6 +268,7 @@ inline constexpr const char * getTypeName(TypeIndex idx)
|
|||||||
case TypeIndex::Function: return "Function";
|
case TypeIndex::Function: return "Function";
|
||||||
case TypeIndex::AggregateFunction: return "AggregateFunction";
|
case TypeIndex::AggregateFunction: return "AggregateFunction";
|
||||||
case TypeIndex::LowCardinality: return "LowCardinality";
|
case TypeIndex::LowCardinality: return "LowCardinality";
|
||||||
|
case TypeIndex::Map: return "Map";
|
||||||
}
|
}
|
||||||
|
|
||||||
__builtin_unreachable();
|
__builtin_unreachable();
|
||||||
|
@ -222,7 +222,7 @@ void MergingSortedBlockInputStream::merge(MutableColumns & merged_columns, TSort
|
|||||||
// std::cerr << "total_merged_rows: " << total_merged_rows << ", merged_rows: " << merged_rows << "\n";
|
// std::cerr << "total_merged_rows: " << total_merged_rows << ", merged_rows: " << merged_rows << "\n";
|
||||||
// std::cerr << "Inserting row\n";
|
// std::cerr << "Inserting row\n";
|
||||||
for (size_t i = 0; i < num_columns; ++i)
|
for (size_t i = 0; i < num_columns; ++i)
|
||||||
merged_columns[i]->insertFrom(*current->all_columns[i], current->pos);
|
merged_columns[i]->insertFrom(*current->all_columns[i], current->getRow());
|
||||||
|
|
||||||
if (out_row_sources_buf)
|
if (out_row_sources_buf)
|
||||||
{
|
{
|
||||||
|
@ -3,6 +3,7 @@
|
|||||||
#include <Client/ConnectionPool.h>
|
#include <Client/ConnectionPool.h>
|
||||||
#include <Client/MultiplexedConnections.h>
|
#include <Client/MultiplexedConnections.h>
|
||||||
#include <Storages/IStorage_fwd.h>
|
#include <Storages/IStorage_fwd.h>
|
||||||
|
#include <Interpreters/Context.h>
|
||||||
#include <Interpreters/StorageID.h>
|
#include <Interpreters/StorageID.h>
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
@ -93,7 +94,7 @@ private:
|
|||||||
|
|
||||||
const String query;
|
const String query;
|
||||||
String query_id = "";
|
String query_id = "";
|
||||||
const Context & context;
|
Context context;
|
||||||
|
|
||||||
ProgressCallback progress_callback;
|
ProgressCallback progress_callback;
|
||||||
ProfileInfoCallback profile_info_callback;
|
ProfileInfoCallback profile_info_callback;
|
||||||
|
@ -25,10 +25,19 @@ namespace ErrorCodes
|
|||||||
extern const int LOGICAL_ERROR;
|
extern const int LOGICAL_ERROR;
|
||||||
}
|
}
|
||||||
|
|
||||||
static const std::vector<String> supported_functions{"any", "anyLast", "min",
|
void DataTypeCustomSimpleAggregateFunction::checkSupportedFunctions(const AggregateFunctionPtr & function)
|
||||||
"max", "sum", "sumWithOverflow", "groupBitAnd", "groupBitOr", "groupBitXor",
|
{
|
||||||
"sumMap", "minMap", "maxMap", "groupArrayArray", "groupUniqArrayArray"};
|
static const std::vector<String> supported_functions{"any", "anyLast", "min",
|
||||||
|
"max", "sum", "sumWithOverflow", "groupBitAnd", "groupBitOr", "groupBitXor",
|
||||||
|
"sumMap", "minMap", "maxMap", "groupArrayArray", "groupUniqArrayArray"};
|
||||||
|
|
||||||
|
// check function
|
||||||
|
if (std::find(std::begin(supported_functions), std::end(supported_functions), function->getName()) == std::end(supported_functions))
|
||||||
|
{
|
||||||
|
throw Exception("Unsupported aggregate function " + function->getName() + ", supported functions are " + boost::algorithm::join(supported_functions, ","),
|
||||||
|
ErrorCodes::BAD_ARGUMENTS);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
String DataTypeCustomSimpleAggregateFunction::getName() const
|
String DataTypeCustomSimpleAggregateFunction::getName() const
|
||||||
{
|
{
|
||||||
@ -114,12 +123,7 @@ static std::pair<DataTypePtr, DataTypeCustomDescPtr> create(const ASTPtr & argum
|
|||||||
AggregateFunctionProperties properties;
|
AggregateFunctionProperties properties;
|
||||||
function = AggregateFunctionFactory::instance().get(function_name, argument_types, params_row, properties);
|
function = AggregateFunctionFactory::instance().get(function_name, argument_types, params_row, properties);
|
||||||
|
|
||||||
// check function
|
DataTypeCustomSimpleAggregateFunction::checkSupportedFunctions(function);
|
||||||
if (std::find(std::begin(supported_functions), std::end(supported_functions), function->getName()) == std::end(supported_functions))
|
|
||||||
{
|
|
||||||
throw Exception("Unsupported aggregate function " + function->getName() + ", supported functions are " + boost::algorithm::join(supported_functions, ","),
|
|
||||||
ErrorCodes::BAD_ARGUMENTS);
|
|
||||||
}
|
|
||||||
|
|
||||||
DataTypePtr storage_type = DataTypeFactory::instance().get(argument_types[0]->getName());
|
DataTypePtr storage_type = DataTypeFactory::instance().get(argument_types[0]->getName());
|
||||||
|
|
||||||
|
@ -37,6 +37,7 @@ public:
|
|||||||
|
|
||||||
const AggregateFunctionPtr getFunction() const { return function; }
|
const AggregateFunctionPtr getFunction() const { return function; }
|
||||||
String getName() const override;
|
String getName() const override;
|
||||||
|
static void checkSupportedFunctions(const AggregateFunctionPtr & function);
|
||||||
};
|
};
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -180,6 +180,7 @@ DataTypeFactory::DataTypeFactory()
|
|||||||
registerDataTypeDomainIPv4AndIPv6(*this);
|
registerDataTypeDomainIPv4AndIPv6(*this);
|
||||||
registerDataTypeDomainSimpleAggregateFunction(*this);
|
registerDataTypeDomainSimpleAggregateFunction(*this);
|
||||||
registerDataTypeDomainGeo(*this);
|
registerDataTypeDomainGeo(*this);
|
||||||
|
registerDataTypeMap(*this);
|
||||||
}
|
}
|
||||||
|
|
||||||
DataTypeFactory & DataTypeFactory::instance()
|
DataTypeFactory & DataTypeFactory::instance()
|
||||||
|
@ -73,6 +73,7 @@ void registerDataTypeFixedString(DataTypeFactory & factory);
|
|||||||
void registerDataTypeEnum(DataTypeFactory & factory);
|
void registerDataTypeEnum(DataTypeFactory & factory);
|
||||||
void registerDataTypeArray(DataTypeFactory & factory);
|
void registerDataTypeArray(DataTypeFactory & factory);
|
||||||
void registerDataTypeTuple(DataTypeFactory & factory);
|
void registerDataTypeTuple(DataTypeFactory & factory);
|
||||||
|
void registerDataTypeMap(DataTypeFactory & factory);
|
||||||
void registerDataTypeNullable(DataTypeFactory & factory);
|
void registerDataTypeNullable(DataTypeFactory & factory);
|
||||||
void registerDataTypeNothing(DataTypeFactory & factory);
|
void registerDataTypeNothing(DataTypeFactory & factory);
|
||||||
void registerDataTypeUUID(DataTypeFactory & factory);
|
void registerDataTypeUUID(DataTypeFactory & factory);
|
||||||
|
375
src/DataTypes/DataTypeMap.cpp
Normal file
375
src/DataTypes/DataTypeMap.cpp
Normal file
@ -0,0 +1,375 @@
|
|||||||
|
#include <Common/StringUtils/StringUtils.h>
|
||||||
|
#include <Columns/ColumnMap.h>
|
||||||
|
#include <Columns/ColumnArray.h>
|
||||||
|
#include <Core/Field.h>
|
||||||
|
#include <Formats/FormatSettings.h>
|
||||||
|
#include <DataTypes/DataTypeMap.h>
|
||||||
|
#include <DataTypes/DataTypeArray.h>
|
||||||
|
#include <DataTypes/DataTypeTuple.h>
|
||||||
|
#include <DataTypes/DataTypeFactory.h>
|
||||||
|
#include <Parsers/IAST.h>
|
||||||
|
#include <Parsers/ASTNameTypePair.h>
|
||||||
|
#include <Common/typeid_cast.h>
|
||||||
|
#include <Common/assert_cast.h>
|
||||||
|
#include <Common/quoteString.h>
|
||||||
|
#include <IO/WriteHelpers.h>
|
||||||
|
#include <IO/ReadHelpers.h>
|
||||||
|
#include <IO/WriteBufferFromString.h>
|
||||||
|
#include <IO/ReadBufferFromString.h>
|
||||||
|
#include <IO/Operators.h>
|
||||||
|
|
||||||
|
#include <ext/map.h>
|
||||||
|
#include <ext/enumerate.h>
|
||||||
|
|
||||||
|
|
||||||
|
namespace DB
|
||||||
|
{
|
||||||
|
|
||||||
|
namespace ErrorCodes
|
||||||
|
{
|
||||||
|
extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH;
|
||||||
|
extern const int CANNOT_READ_MAP_FROM_TEXT;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
DataTypeMap::DataTypeMap(const DataTypes & elems_)
|
||||||
|
{
|
||||||
|
assert(elems_.size() == 2);
|
||||||
|
key_type = elems_[0];
|
||||||
|
value_type = elems_[1];
|
||||||
|
|
||||||
|
nested = std::make_shared<DataTypeArray>(
|
||||||
|
std::make_shared<DataTypeTuple>(DataTypes{key_type, value_type}, Names{"keys", "values"}));
|
||||||
|
}
|
||||||
|
|
||||||
|
DataTypeMap::DataTypeMap(const DataTypePtr & key_type_, const DataTypePtr & value_type_)
|
||||||
|
: key_type(key_type_), value_type(value_type_)
|
||||||
|
, nested(std::make_shared<DataTypeArray>(
|
||||||
|
std::make_shared<DataTypeTuple>(DataTypes{key_type_, value_type_}, Names{"keys", "values"}))) {}
|
||||||
|
|
||||||
|
std::string DataTypeMap::doGetName() const
|
||||||
|
{
|
||||||
|
WriteBufferFromOwnString s;
|
||||||
|
s << "Map(" << key_type->getName() << "," << value_type->getName() << ")";
|
||||||
|
|
||||||
|
return s.str();
|
||||||
|
}
|
||||||
|
|
||||||
|
static const IColumn & extractNestedColumn(const IColumn & column)
|
||||||
|
{
|
||||||
|
return assert_cast<const ColumnMap &>(column).getNestedColumn();
|
||||||
|
}
|
||||||
|
|
||||||
|
static IColumn & extractNestedColumn(IColumn & column)
|
||||||
|
{
|
||||||
|
return assert_cast<ColumnMap &>(column).getNestedColumn();
|
||||||
|
}
|
||||||
|
|
||||||
|
void DataTypeMap::serializeBinary(const Field & field, WriteBuffer & ostr) const
|
||||||
|
{
|
||||||
|
const auto & map = get<const Map &>(field);
|
||||||
|
writeVarUInt(map.size(), ostr);
|
||||||
|
for (const auto & elem : map)
|
||||||
|
{
|
||||||
|
const auto & tuple = elem.safeGet<const Tuple>();
|
||||||
|
assert(tuple.size() == 2);
|
||||||
|
key_type->serializeBinary(tuple[0], ostr);
|
||||||
|
value_type->serializeBinary(tuple[1], ostr);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void DataTypeMap::deserializeBinary(Field & field, ReadBuffer & istr) const
|
||||||
|
{
|
||||||
|
size_t size;
|
||||||
|
readVarUInt(size, istr);
|
||||||
|
field = Map(size);
|
||||||
|
for (auto & elem : field.get<Map &>())
|
||||||
|
{
|
||||||
|
Tuple tuple(2);
|
||||||
|
key_type->deserializeBinary(tuple[0], istr);
|
||||||
|
value_type->deserializeBinary(tuple[1], istr);
|
||||||
|
elem = std::move(tuple);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void DataTypeMap::serializeBinary(const IColumn & column, size_t row_num, WriteBuffer & ostr) const
|
||||||
|
{
|
||||||
|
nested->serializeBinary(extractNestedColumn(column), row_num, ostr);
|
||||||
|
}
|
||||||
|
|
||||||
|
void DataTypeMap::deserializeBinary(IColumn & column, ReadBuffer & istr) const
|
||||||
|
{
|
||||||
|
nested->deserializeBinary(extractNestedColumn(column), istr);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
template <typename Writer>
|
||||||
|
void DataTypeMap::serializeTextImpl(const IColumn & column, size_t row_num, WriteBuffer & ostr, Writer && writer) const
|
||||||
|
{
|
||||||
|
const auto & column_map = assert_cast<const ColumnMap &>(column);
|
||||||
|
|
||||||
|
const auto & nested_array = column_map.getNestedColumn();
|
||||||
|
const auto & nested_tuple = column_map.getNestedData();
|
||||||
|
const auto & offsets = nested_array.getOffsets();
|
||||||
|
|
||||||
|
size_t offset = offsets[row_num - 1];
|
||||||
|
size_t next_offset = offsets[row_num];
|
||||||
|
|
||||||
|
writeChar('{', ostr);
|
||||||
|
for (size_t i = offset; i < next_offset; ++i)
|
||||||
|
{
|
||||||
|
if (i != offset)
|
||||||
|
writeChar(',', ostr);
|
||||||
|
writer(key_type, nested_tuple.getColumn(0), i);
|
||||||
|
writeChar(':', ostr);
|
||||||
|
writer(value_type, nested_tuple.getColumn(1), i);
|
||||||
|
}
|
||||||
|
writeChar('}', ostr);
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename Reader>
|
||||||
|
void DataTypeMap::deserializeTextImpl(IColumn & column, ReadBuffer & istr, bool need_safe_get_int_key, Reader && reader) const
|
||||||
|
{
|
||||||
|
auto & column_map = assert_cast<ColumnMap &>(column);
|
||||||
|
|
||||||
|
auto & nested_array = column_map.getNestedColumn();
|
||||||
|
auto & nested_tuple = column_map.getNestedData();
|
||||||
|
auto & offsets = nested_array.getOffsets();
|
||||||
|
|
||||||
|
auto & key_column = nested_tuple.getColumn(0);
|
||||||
|
auto & value_column = nested_tuple.getColumn(1);
|
||||||
|
|
||||||
|
size_t size = 0;
|
||||||
|
assertChar('{', istr);
|
||||||
|
|
||||||
|
try
|
||||||
|
{
|
||||||
|
bool first = true;
|
||||||
|
while (!istr.eof() && *istr.position() != '}')
|
||||||
|
{
|
||||||
|
if (!first)
|
||||||
|
{
|
||||||
|
if (*istr.position() == ',')
|
||||||
|
++istr.position();
|
||||||
|
else
|
||||||
|
throw Exception("Cannot read Map from text", ErrorCodes::CANNOT_READ_MAP_FROM_TEXT);
|
||||||
|
}
|
||||||
|
|
||||||
|
first = false;
|
||||||
|
|
||||||
|
skipWhitespaceIfAny(istr);
|
||||||
|
|
||||||
|
if (*istr.position() == '}')
|
||||||
|
break;
|
||||||
|
|
||||||
|
if (need_safe_get_int_key)
|
||||||
|
{
|
||||||
|
ReadBuffer::Position tmp = istr.position();
|
||||||
|
while (*tmp != ':' && *tmp != '}')
|
||||||
|
++tmp;
|
||||||
|
*tmp = ' ';
|
||||||
|
reader(key_type, key_column);
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
reader(key_type, key_column);
|
||||||
|
skipWhitespaceIfAny(istr);
|
||||||
|
assertChar(':', istr);
|
||||||
|
}
|
||||||
|
|
||||||
|
++size;
|
||||||
|
skipWhitespaceIfAny(istr);
|
||||||
|
reader(value_type, value_column);
|
||||||
|
|
||||||
|
skipWhitespaceIfAny(istr);
|
||||||
|
}
|
||||||
|
|
||||||
|
offsets.push_back(offsets.back() + size);
|
||||||
|
assertChar('}', istr);
|
||||||
|
}
|
||||||
|
catch (...)
|
||||||
|
{
|
||||||
|
throw;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void DataTypeMap::serializeText(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings & settings) const
|
||||||
|
{
|
||||||
|
serializeTextImpl(column, row_num, ostr,
|
||||||
|
[&](const DataTypePtr & subcolumn_type, const IColumn & subcolumn, size_t pos)
|
||||||
|
{
|
||||||
|
subcolumn_type->serializeAsTextQuoted(subcolumn, pos, ostr, settings);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
void DataTypeMap::deserializeText(IColumn & column, ReadBuffer & istr, const FormatSettings & settings) const
|
||||||
|
{
|
||||||
|
// need_safe_get_int_key is set for Interger to prevent to readIntTextUnsafe
|
||||||
|
bool need_safe_get_int_key = isInteger(key_type);
|
||||||
|
|
||||||
|
deserializeTextImpl(column, istr, need_safe_get_int_key,
|
||||||
|
[&](const DataTypePtr & subcolumn_type, IColumn & subcolumn)
|
||||||
|
{
|
||||||
|
subcolumn_type->deserializeAsTextQuoted(subcolumn, istr, settings);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void DataTypeMap::serializeTextJSON(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings & settings) const
|
||||||
|
{
|
||||||
|
serializeTextImpl(column, row_num, ostr,
|
||||||
|
[&](const DataTypePtr & subcolumn_type, const IColumn & subcolumn, size_t pos)
|
||||||
|
{
|
||||||
|
subcolumn_type->serializeAsTextJSON(subcolumn, pos, ostr, settings);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
void DataTypeMap::deserializeTextJSON(IColumn & column, ReadBuffer & istr, const FormatSettings & settings) const
|
||||||
|
{
|
||||||
|
// need_safe_get_int_key is set for Interger to prevent to readIntTextUnsafe
|
||||||
|
bool need_safe_get_int_key = isInteger(key_type);
|
||||||
|
|
||||||
|
deserializeTextImpl(column, istr, need_safe_get_int_key,
|
||||||
|
[&](const DataTypePtr & subcolumn_type, IColumn & subcolumn)
|
||||||
|
{
|
||||||
|
subcolumn_type->deserializeAsTextJSON(subcolumn, istr, settings);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
void DataTypeMap::serializeTextXML(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings & settings) const
|
||||||
|
{
|
||||||
|
const auto & column_map = assert_cast<const ColumnMap &>(column);
|
||||||
|
const auto & offsets = column_map.getNestedColumn().getOffsets();
|
||||||
|
|
||||||
|
size_t offset = offsets[row_num - 1];
|
||||||
|
size_t next_offset = offsets[row_num];
|
||||||
|
|
||||||
|
const auto & nested_data = column_map.getNestedData();
|
||||||
|
|
||||||
|
writeCString("<map>", ostr);
|
||||||
|
for (size_t i = offset; i < next_offset; ++i)
|
||||||
|
{
|
||||||
|
writeCString("<elem>", ostr);
|
||||||
|
writeCString("<key>", ostr);
|
||||||
|
key_type->serializeAsTextXML(nested_data.getColumn(0), i, ostr, settings);
|
||||||
|
writeCString("</key>", ostr);
|
||||||
|
|
||||||
|
writeCString("<value>", ostr);
|
||||||
|
value_type->serializeAsTextXML(nested_data.getColumn(1), i, ostr, settings);
|
||||||
|
writeCString("</value>", ostr);
|
||||||
|
writeCString("</elem>", ostr);
|
||||||
|
}
|
||||||
|
writeCString("</map>", ostr);
|
||||||
|
}
|
||||||
|
|
||||||
|
void DataTypeMap::serializeTextCSV(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings & settings) const
|
||||||
|
{
|
||||||
|
WriteBufferFromOwnString wb;
|
||||||
|
serializeText(column, row_num, wb, settings);
|
||||||
|
writeCSV(wb.str(), ostr);
|
||||||
|
}
|
||||||
|
|
||||||
|
void DataTypeMap::deserializeTextCSV(IColumn & column, ReadBuffer & istr, const FormatSettings & settings) const
|
||||||
|
{
|
||||||
|
String s;
|
||||||
|
readCSV(s, istr, settings.csv);
|
||||||
|
ReadBufferFromString rb(s);
|
||||||
|
deserializeText(column, rb, settings);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void DataTypeMap::enumerateStreams(const StreamCallback & callback, SubstreamPath & path) const
|
||||||
|
{
|
||||||
|
nested->enumerateStreams(callback, path);
|
||||||
|
}
|
||||||
|
|
||||||
|
void DataTypeMap::serializeBinaryBulkStatePrefix(
|
||||||
|
SerializeBinaryBulkSettings & settings,
|
||||||
|
SerializeBinaryBulkStatePtr & state) const
|
||||||
|
{
|
||||||
|
nested->serializeBinaryBulkStatePrefix(settings, state);
|
||||||
|
}
|
||||||
|
|
||||||
|
void DataTypeMap::serializeBinaryBulkStateSuffix(
|
||||||
|
SerializeBinaryBulkSettings & settings,
|
||||||
|
SerializeBinaryBulkStatePtr & state) const
|
||||||
|
{
|
||||||
|
nested->serializeBinaryBulkStateSuffix(settings, state);
|
||||||
|
}
|
||||||
|
|
||||||
|
void DataTypeMap::deserializeBinaryBulkStatePrefix(
|
||||||
|
DeserializeBinaryBulkSettings & settings,
|
||||||
|
DeserializeBinaryBulkStatePtr & state) const
|
||||||
|
{
|
||||||
|
nested->deserializeBinaryBulkStatePrefix(settings, state);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void DataTypeMap::serializeBinaryBulkWithMultipleStreams(
|
||||||
|
const IColumn & column,
|
||||||
|
size_t offset,
|
||||||
|
size_t limit,
|
||||||
|
SerializeBinaryBulkSettings & settings,
|
||||||
|
SerializeBinaryBulkStatePtr & state) const
|
||||||
|
{
|
||||||
|
nested->serializeBinaryBulkWithMultipleStreams(extractNestedColumn(column), offset, limit, settings, state);
|
||||||
|
}
|
||||||
|
|
||||||
|
void DataTypeMap::deserializeBinaryBulkWithMultipleStreams(
|
||||||
|
IColumn & column,
|
||||||
|
size_t limit,
|
||||||
|
DeserializeBinaryBulkSettings & settings,
|
||||||
|
DeserializeBinaryBulkStatePtr & state) const
|
||||||
|
{
|
||||||
|
nested->deserializeBinaryBulkWithMultipleStreams(extractNestedColumn(column), limit, settings, state);
|
||||||
|
}
|
||||||
|
|
||||||
|
void DataTypeMap::serializeProtobuf(const IColumn & column, size_t row_num, ProtobufWriter & protobuf, size_t & value_index) const
|
||||||
|
{
|
||||||
|
nested->serializeProtobuf(extractNestedColumn(column), row_num, protobuf, value_index);
|
||||||
|
}
|
||||||
|
|
||||||
|
void DataTypeMap::deserializeProtobuf(IColumn & column, ProtobufReader & protobuf, bool allow_add_row, bool & row_added) const
|
||||||
|
{
|
||||||
|
nested->deserializeProtobuf(extractNestedColumn(column), protobuf, allow_add_row, row_added);
|
||||||
|
}
|
||||||
|
|
||||||
|
MutableColumnPtr DataTypeMap::createColumn() const
|
||||||
|
{
|
||||||
|
return ColumnMap::create(nested->createColumn());
|
||||||
|
}
|
||||||
|
|
||||||
|
Field DataTypeMap::getDefault() const
|
||||||
|
{
|
||||||
|
return Map();
|
||||||
|
}
|
||||||
|
|
||||||
|
bool DataTypeMap::equals(const IDataType & rhs) const
|
||||||
|
{
|
||||||
|
if (typeid(rhs) != typeid(*this))
|
||||||
|
return false;
|
||||||
|
|
||||||
|
const DataTypeMap & rhs_map = static_cast<const DataTypeMap &>(rhs);
|
||||||
|
return nested->equals(*rhs_map.nested);
|
||||||
|
}
|
||||||
|
|
||||||
|
static DataTypePtr create(const ASTPtr & arguments)
|
||||||
|
{
|
||||||
|
if (!arguments || arguments->children.size() != 2)
|
||||||
|
throw Exception("Map data type family must have two arguments: key and value types", ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH);
|
||||||
|
|
||||||
|
DataTypes nested_types;
|
||||||
|
nested_types.reserve(arguments->children.size());
|
||||||
|
|
||||||
|
for (const ASTPtr & child : arguments->children)
|
||||||
|
nested_types.emplace_back(DataTypeFactory::instance().get(child));
|
||||||
|
|
||||||
|
return std::make_shared<DataTypeMap>(nested_types);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void registerDataTypeMap(DataTypeFactory & factory)
|
||||||
|
{
|
||||||
|
factory.registerDataType("Map", create);
|
||||||
|
}
|
||||||
|
}
|
103
src/DataTypes/DataTypeMap.h
Normal file
103
src/DataTypes/DataTypeMap.h
Normal file
@ -0,0 +1,103 @@
|
|||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include <DataTypes/DataTypeWithSimpleSerialization.h>
|
||||||
|
|
||||||
|
|
||||||
|
namespace DB
|
||||||
|
{
|
||||||
|
|
||||||
|
/** Map data type.
|
||||||
|
* Map is implemented as two arrays of keys and values.
|
||||||
|
* Serialization of type 'Map(K, V)' is similar to serialization.
|
||||||
|
* of 'Array(Tuple(keys K, values V))' or in other words of 'Nested(keys K, valuev V)'.
|
||||||
|
*/
|
||||||
|
class DataTypeMap final : public DataTypeWithSimpleSerialization
|
||||||
|
{
|
||||||
|
private:
|
||||||
|
DataTypePtr key_type;
|
||||||
|
DataTypePtr value_type;
|
||||||
|
|
||||||
|
/// 'nested' is an Array(Tuple(key_type, value_type))
|
||||||
|
DataTypePtr nested;
|
||||||
|
|
||||||
|
public:
|
||||||
|
static constexpr bool is_parametric = true;
|
||||||
|
|
||||||
|
DataTypeMap(const DataTypes & elems);
|
||||||
|
DataTypeMap(const DataTypePtr & key_type_, const DataTypePtr & value_type_);
|
||||||
|
|
||||||
|
TypeIndex getTypeId() const override { return TypeIndex::Map; }
|
||||||
|
std::string doGetName() const override;
|
||||||
|
const char * getFamilyName() const override { return "Map"; }
|
||||||
|
|
||||||
|
bool canBeInsideNullable() const override { return false; }
|
||||||
|
|
||||||
|
void serializeBinary(const Field & field, WriteBuffer & ostr) const override;
|
||||||
|
void deserializeBinary(Field & field, ReadBuffer & istr) const override;
|
||||||
|
void serializeBinary(const IColumn & column, size_t row_num, WriteBuffer & ostr) const override;
|
||||||
|
void deserializeBinary(IColumn & column, ReadBuffer & istr) const override;
|
||||||
|
void serializeText(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings &) const override;
|
||||||
|
void deserializeText(IColumn & column, ReadBuffer & istr, const FormatSettings &) const override;
|
||||||
|
void serializeTextJSON(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings &) const override;
|
||||||
|
void deserializeTextJSON(IColumn & column, ReadBuffer & istr, const FormatSettings &) const override;
|
||||||
|
void serializeTextXML(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings &) const override;
|
||||||
|
|
||||||
|
void serializeTextCSV(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings &) const override;
|
||||||
|
void deserializeTextCSV(IColumn & column, ReadBuffer & istr, const FormatSettings &) const override;
|
||||||
|
|
||||||
|
|
||||||
|
/** Each sub-column in a map is serialized in separate stream.
|
||||||
|
*/
|
||||||
|
void enumerateStreams(const StreamCallback & callback, SubstreamPath & path) const override;
|
||||||
|
|
||||||
|
void serializeBinaryBulkStatePrefix(
|
||||||
|
SerializeBinaryBulkSettings & settings,
|
||||||
|
SerializeBinaryBulkStatePtr & state) const override;
|
||||||
|
|
||||||
|
void serializeBinaryBulkStateSuffix(
|
||||||
|
SerializeBinaryBulkSettings & settings,
|
||||||
|
SerializeBinaryBulkStatePtr & state) const override;
|
||||||
|
|
||||||
|
void deserializeBinaryBulkStatePrefix(
|
||||||
|
DeserializeBinaryBulkSettings & settings,
|
||||||
|
DeserializeBinaryBulkStatePtr & state) const override;
|
||||||
|
|
||||||
|
void serializeBinaryBulkWithMultipleStreams(
|
||||||
|
const IColumn & column,
|
||||||
|
size_t offset,
|
||||||
|
size_t limit,
|
||||||
|
SerializeBinaryBulkSettings & settings,
|
||||||
|
SerializeBinaryBulkStatePtr & state) const override;
|
||||||
|
|
||||||
|
void deserializeBinaryBulkWithMultipleStreams(
|
||||||
|
IColumn & column,
|
||||||
|
size_t limit,
|
||||||
|
DeserializeBinaryBulkSettings & settings,
|
||||||
|
DeserializeBinaryBulkStatePtr & state) const override;
|
||||||
|
|
||||||
|
void serializeProtobuf(const IColumn & column, size_t row_num, ProtobufWriter & protobuf, size_t & value_index) const override;
|
||||||
|
void deserializeProtobuf(IColumn & column, ProtobufReader & protobuf, bool allow_add_row, bool & row_added) const override;
|
||||||
|
|
||||||
|
MutableColumnPtr createColumn() const override;
|
||||||
|
|
||||||
|
Field getDefault() const override;
|
||||||
|
|
||||||
|
bool equals(const IDataType & rhs) const override;
|
||||||
|
bool isComparable() const override { return key_type->isComparable() && value_type->isComparable(); }
|
||||||
|
bool isParametric() const override { return true; }
|
||||||
|
bool haveSubtypes() const override { return true; }
|
||||||
|
|
||||||
|
const DataTypePtr & getKeyType() const { return key_type; }
|
||||||
|
const DataTypePtr & getValueType() const { return value_type; }
|
||||||
|
DataTypes getKeyValueTypes() const { return {key_type, value_type}; }
|
||||||
|
|
||||||
|
private:
|
||||||
|
template <typename Writer>
|
||||||
|
void serializeTextImpl(const IColumn & column, size_t row_num, WriteBuffer & ostr, Writer && writer) const;
|
||||||
|
|
||||||
|
template <typename Reader>
|
||||||
|
void deserializeTextImpl(IColumn & column, ReadBuffer & istr, bool need_safe_get_int_key, Reader && reader) const;
|
||||||
|
};
|
||||||
|
|
||||||
|
}
|
||||||
|
|
@ -96,22 +96,29 @@ inline UInt32 getDecimalScale(const DataTypeDecimal<T> & data_type)
|
|||||||
return data_type.getScale();
|
return data_type.getScale();
|
||||||
}
|
}
|
||||||
|
|
||||||
template <typename FromDataType, typename ToDataType>
|
template <typename FromDataType, typename ToDataType, typename ReturnType = void>
|
||||||
inline std::enable_if_t<IsDataTypeDecimal<FromDataType> && IsDataTypeDecimal<ToDataType>, typename ToDataType::FieldType>
|
inline std::enable_if_t<IsDataTypeDecimal<FromDataType> && IsDataTypeDecimal<ToDataType>, ReturnType>
|
||||||
convertDecimals(const typename FromDataType::FieldType & value, UInt32 scale_from, UInt32 scale_to)
|
convertDecimalsImpl(const typename FromDataType::FieldType & value, UInt32 scale_from, UInt32 scale_to, typename ToDataType::FieldType& result)
|
||||||
{
|
{
|
||||||
using FromFieldType = typename FromDataType::FieldType;
|
using FromFieldType = typename FromDataType::FieldType;
|
||||||
using ToFieldType = typename ToDataType::FieldType;
|
using ToFieldType = typename ToDataType::FieldType;
|
||||||
using MaxFieldType = std::conditional_t<(sizeof(FromFieldType) > sizeof(ToFieldType)), FromFieldType, ToFieldType>;
|
using MaxFieldType = std::conditional_t<(sizeof(FromFieldType) > sizeof(ToFieldType)), FromFieldType, ToFieldType>;
|
||||||
using MaxNativeType = typename MaxFieldType::NativeType;
|
using MaxNativeType = typename MaxFieldType::NativeType;
|
||||||
|
|
||||||
|
static constexpr bool throw_exception = std::is_same_v<ReturnType, void>;
|
||||||
|
|
||||||
MaxNativeType converted_value;
|
MaxNativeType converted_value;
|
||||||
if (scale_to > scale_from)
|
if (scale_to > scale_from)
|
||||||
{
|
{
|
||||||
converted_value = DecimalUtils::scaleMultiplier<MaxNativeType>(scale_to - scale_from);
|
converted_value = DecimalUtils::scaleMultiplier<MaxNativeType>(scale_to - scale_from);
|
||||||
if (common::mulOverflow(static_cast<MaxNativeType>(value.value), converted_value, converted_value))
|
if (common::mulOverflow(static_cast<MaxNativeType>(value.value), converted_value, converted_value))
|
||||||
throw Exception(std::string(ToDataType::family_name) + " convert overflow",
|
{
|
||||||
ErrorCodes::DECIMAL_OVERFLOW);
|
if constexpr (throw_exception)
|
||||||
|
throw Exception(std::string(ToDataType::family_name) + " convert overflow",
|
||||||
|
ErrorCodes::DECIMAL_OVERFLOW);
|
||||||
|
else
|
||||||
|
return ReturnType(false);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
converted_value = value.value / DecimalUtils::scaleMultiplier<MaxNativeType>(scale_from - scale_to);
|
converted_value = value.value / DecimalUtils::scaleMultiplier<MaxNativeType>(scale_from - scale_to);
|
||||||
@ -120,35 +127,87 @@ convertDecimals(const typename FromDataType::FieldType & value, UInt32 scale_fro
|
|||||||
{
|
{
|
||||||
if (converted_value < std::numeric_limits<typename ToFieldType::NativeType>::min() ||
|
if (converted_value < std::numeric_limits<typename ToFieldType::NativeType>::min() ||
|
||||||
converted_value > std::numeric_limits<typename ToFieldType::NativeType>::max())
|
converted_value > std::numeric_limits<typename ToFieldType::NativeType>::max())
|
||||||
throw Exception(std::string(ToDataType::family_name) + " convert overflow",
|
{
|
||||||
ErrorCodes::DECIMAL_OVERFLOW);
|
if constexpr (throw_exception)
|
||||||
|
throw Exception(std::string(ToDataType::family_name) + " convert overflow",
|
||||||
|
ErrorCodes::DECIMAL_OVERFLOW);
|
||||||
|
else
|
||||||
|
return ReturnType(false);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return static_cast<typename ToFieldType::NativeType>(converted_value);
|
result = static_cast<typename ToFieldType::NativeType>(converted_value);
|
||||||
|
|
||||||
|
return ReturnType(true);
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename FromDataType, typename ToDataType>
|
||||||
|
inline std::enable_if_t<IsDataTypeDecimal<FromDataType> && IsDataTypeDecimal<ToDataType>, typename ToDataType::FieldType>
|
||||||
|
convertDecimals(const typename FromDataType::FieldType & value, UInt32 scale_from, UInt32 scale_to)
|
||||||
|
{
|
||||||
|
using ToFieldType = typename ToDataType::FieldType;
|
||||||
|
ToFieldType result;
|
||||||
|
|
||||||
|
convertDecimalsImpl<FromDataType, ToDataType, void>(value, scale_from, scale_to, result);
|
||||||
|
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename FromDataType, typename ToDataType>
|
||||||
|
inline std::enable_if_t<IsDataTypeDecimal<FromDataType> && IsDataTypeDecimal<ToDataType>, bool>
|
||||||
|
tryConvertDecimals(const typename FromDataType::FieldType & value, UInt32 scale_from, UInt32 scale_to, typename ToDataType::FieldType& result)
|
||||||
|
{
|
||||||
|
return convertDecimalsImpl<FromDataType, ToDataType, bool>(value, scale_from, scale_to, result);
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename FromDataType, typename ToDataType, typename ReturnType>
|
||||||
|
inline std::enable_if_t<IsDataTypeDecimal<FromDataType> && IsNumber<typename ToDataType::FieldType>, ReturnType>
|
||||||
|
convertFromDecimalImpl(const typename FromDataType::FieldType & value, UInt32 scale, typename ToDataType::FieldType& result)
|
||||||
|
{
|
||||||
|
using FromFieldType = typename FromDataType::FieldType;
|
||||||
|
using ToFieldType = typename ToDataType::FieldType;
|
||||||
|
|
||||||
|
return DecimalUtils::convertToImpl<ToFieldType, FromFieldType, ReturnType>(value, scale, result);
|
||||||
}
|
}
|
||||||
|
|
||||||
template <typename FromDataType, typename ToDataType>
|
template <typename FromDataType, typename ToDataType>
|
||||||
inline std::enable_if_t<IsDataTypeDecimal<FromDataType> && IsNumber<typename ToDataType::FieldType>, typename ToDataType::FieldType>
|
inline std::enable_if_t<IsDataTypeDecimal<FromDataType> && IsNumber<typename ToDataType::FieldType>, typename ToDataType::FieldType>
|
||||||
convertFromDecimal(const typename FromDataType::FieldType & value, UInt32 scale)
|
convertFromDecimal(const typename FromDataType::FieldType & value, UInt32 scale)
|
||||||
{
|
{
|
||||||
using ToFieldType = typename ToDataType::FieldType;
|
typename ToDataType::FieldType result;
|
||||||
|
|
||||||
return DecimalUtils::convertTo<ToFieldType>(value, scale);
|
convertFromDecimalImpl<FromDataType, ToDataType, void>(value, scale, result);
|
||||||
|
|
||||||
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
template <typename FromDataType, typename ToDataType>
|
template <typename FromDataType, typename ToDataType>
|
||||||
inline std::enable_if_t<IsNumber<typename FromDataType::FieldType> && IsDataTypeDecimal<ToDataType>, typename ToDataType::FieldType>
|
inline std::enable_if_t<IsDataTypeDecimal<FromDataType> && IsNumber<typename ToDataType::FieldType>, bool>
|
||||||
convertToDecimal(const typename FromDataType::FieldType & value, UInt32 scale)
|
tryConvertFromDecimal(const typename FromDataType::FieldType & value, UInt32 scale, typename ToDataType::FieldType& result)
|
||||||
|
{
|
||||||
|
return convertFromDecimalImpl<FromDataType, ToDataType, bool>(value, scale, result);
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename FromDataType, typename ToDataType, typename ReturnType>
|
||||||
|
inline std::enable_if_t<IsNumber<typename FromDataType::FieldType> && IsDataTypeDecimal<ToDataType>, ReturnType>
|
||||||
|
convertToDecimalImpl(const typename FromDataType::FieldType & value, UInt32 scale, typename ToDataType::FieldType& result)
|
||||||
{
|
{
|
||||||
using FromFieldType = typename FromDataType::FieldType;
|
using FromFieldType = typename FromDataType::FieldType;
|
||||||
using ToFieldType = typename ToDataType::FieldType;
|
using ToFieldType = typename ToDataType::FieldType;
|
||||||
using ToNativeType = typename ToFieldType::NativeType;
|
using ToNativeType = typename ToFieldType::NativeType;
|
||||||
|
|
||||||
|
static constexpr bool throw_exception = std::is_same_v<ReturnType, void>;
|
||||||
|
|
||||||
if constexpr (std::is_floating_point_v<FromFieldType>)
|
if constexpr (std::is_floating_point_v<FromFieldType>)
|
||||||
{
|
{
|
||||||
if (!std::isfinite(value))
|
if (!std::isfinite(value))
|
||||||
throw Exception(std::string(ToDataType::family_name) + " convert overflow. Cannot convert infinity or NaN to decimal",
|
{
|
||||||
ErrorCodes::DECIMAL_OVERFLOW);
|
if constexpr (throw_exception)
|
||||||
|
throw Exception(std::string(ToDataType::family_name) + " convert overflow. Cannot convert infinity or NaN to decimal",
|
||||||
|
ErrorCodes::DECIMAL_OVERFLOW);
|
||||||
|
else
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
auto out = value * static_cast<FromFieldType>(DecimalUtils::scaleMultiplier<ToNativeType>(scale));
|
auto out = value * static_cast<FromFieldType>(DecimalUtils::scaleMultiplier<ToNativeType>(scale));
|
||||||
if constexpr (std::is_same_v<ToNativeType, Int128>)
|
if constexpr (std::is_same_v<ToNativeType, Int128>)
|
||||||
@ -157,29 +216,60 @@ convertToDecimal(const typename FromDataType::FieldType & value, UInt32 scale)
|
|||||||
static constexpr Int128 max_int128 = maxInt128();
|
static constexpr Int128 max_int128 = maxInt128();
|
||||||
|
|
||||||
if (out <= static_cast<ToNativeType>(min_int128) || out >= static_cast<ToNativeType>(max_int128))
|
if (out <= static_cast<ToNativeType>(min_int128) || out >= static_cast<ToNativeType>(max_int128))
|
||||||
throw Exception(std::string(ToDataType::family_name) + " convert overflow. Float is out of Decimal range",
|
{
|
||||||
ErrorCodes::DECIMAL_OVERFLOW);
|
if constexpr (throw_exception)
|
||||||
|
throw Exception(std::string(ToDataType::family_name) + " convert overflow. Float is out of Decimal range",
|
||||||
|
ErrorCodes::DECIMAL_OVERFLOW);
|
||||||
|
else
|
||||||
|
return ReturnType(false);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
if (out <= static_cast<FromFieldType>(std::numeric_limits<ToNativeType>::min()) ||
|
if (out <= static_cast<FromFieldType>(std::numeric_limits<ToNativeType>::min()) ||
|
||||||
out >= static_cast<FromFieldType>(std::numeric_limits<ToNativeType>::max()))
|
out >= static_cast<FromFieldType>(std::numeric_limits<ToNativeType>::max()))
|
||||||
throw Exception(std::string(ToDataType::family_name) + " convert overflow. Float is out of Decimal range",
|
{
|
||||||
ErrorCodes::DECIMAL_OVERFLOW);
|
if constexpr (throw_exception)
|
||||||
|
throw Exception(std::string(ToDataType::family_name) + " convert overflow. Float is out of Decimal range",
|
||||||
|
ErrorCodes::DECIMAL_OVERFLOW);
|
||||||
|
else
|
||||||
|
return ReturnType(false);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return static_cast<ToNativeType>(out);
|
|
||||||
|
result = static_cast<ToNativeType>(out);
|
||||||
|
|
||||||
|
return ReturnType(true);
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
if constexpr (is_big_int_v<FromFieldType>)
|
if constexpr (is_big_int_v<FromFieldType>)
|
||||||
return convertDecimals<DataTypeDecimal<Decimal256>, ToDataType>(static_cast<Int256>(value), 0, scale);
|
return ReturnType(convertDecimalsImpl<DataTypeDecimal<Decimal256>, ToDataType, ReturnType>(static_cast<Int256>(value), 0, scale, result));
|
||||||
else if constexpr (std::is_same_v<FromFieldType, UInt64>)
|
else if constexpr (std::is_same_v<FromFieldType, UInt64>)
|
||||||
return convertDecimals<DataTypeDecimal<Decimal128>, ToDataType>(value, 0, scale);
|
return ReturnType(convertDecimalsImpl<DataTypeDecimal<Decimal128>, ToDataType, ReturnType>(value, 0, scale, result));
|
||||||
else
|
else
|
||||||
return convertDecimals<DataTypeDecimal<Decimal64>, ToDataType>(value, 0, scale);
|
return ReturnType(convertDecimalsImpl<DataTypeDecimal<Decimal64>, ToDataType, ReturnType>(value, 0, scale, result));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
template <typename FromDataType, typename ToDataType>
|
||||||
|
inline std::enable_if_t<IsNumber<typename FromDataType::FieldType> && IsDataTypeDecimal<ToDataType>, typename ToDataType::FieldType>
|
||||||
|
convertToDecimal(const typename FromDataType::FieldType & value, UInt32 scale)
|
||||||
|
{
|
||||||
|
typename ToDataType::FieldType result;
|
||||||
|
|
||||||
|
convertToDecimalImpl<FromDataType, ToDataType, void>(value, scale, result);
|
||||||
|
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename FromDataType, typename ToDataType>
|
||||||
|
inline std::enable_if_t<IsNumber<typename FromDataType::FieldType> && IsDataTypeDecimal<ToDataType>, bool>
|
||||||
|
tryConvertToDecimal(const typename FromDataType::FieldType & value, UInt32 scale, typename ToDataType::FieldType& result)
|
||||||
|
{
|
||||||
|
return convertToDecimalImpl<FromDataType, ToDataType, bool>(value, scale, result);
|
||||||
|
}
|
||||||
|
|
||||||
template <typename T>
|
template <typename T>
|
||||||
inline DataTypePtr createDecimalMaxPrecision(UInt64 scale)
|
inline DataTypePtr createDecimalMaxPrecision(UInt64 scale)
|
||||||
{
|
{
|
||||||
|
@ -1,6 +1,7 @@
|
|||||||
#include <Common/FieldVisitors.h>
|
#include <Common/FieldVisitors.h>
|
||||||
#include <DataTypes/FieldToDataType.h>
|
#include <DataTypes/FieldToDataType.h>
|
||||||
#include <DataTypes/DataTypeTuple.h>
|
#include <DataTypes/DataTypeTuple.h>
|
||||||
|
#include <DataTypes/DataTypeMap.h>
|
||||||
#include <DataTypes/DataTypesNumber.h>
|
#include <DataTypes/DataTypesNumber.h>
|
||||||
#include <DataTypes/DataTypesDecimal.h>
|
#include <DataTypes/DataTypesDecimal.h>
|
||||||
#include <DataTypes/DataTypeString.h>
|
#include <DataTypes/DataTypeString.h>
|
||||||
@ -118,6 +119,24 @@ DataTypePtr FieldToDataType::operator() (const Tuple & tuple) const
|
|||||||
return std::make_shared<DataTypeTuple>(element_types);
|
return std::make_shared<DataTypeTuple>(element_types);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
DataTypePtr FieldToDataType::operator() (const Map & map) const
|
||||||
|
{
|
||||||
|
DataTypes key_types;
|
||||||
|
DataTypes value_types;
|
||||||
|
key_types.reserve(map.size());
|
||||||
|
value_types.reserve(map.size());
|
||||||
|
|
||||||
|
for (const auto & elem : map)
|
||||||
|
{
|
||||||
|
const auto & tuple = elem.safeGet<const Tuple &>();
|
||||||
|
assert(tuple.size() == 2);
|
||||||
|
key_types.push_back(applyVisitor(FieldToDataType(), tuple[0]));
|
||||||
|
value_types.push_back(applyVisitor(FieldToDataType(), tuple[1]));
|
||||||
|
}
|
||||||
|
|
||||||
|
return std::make_shared<DataTypeMap>(getLeastSupertype(key_types), getLeastSupertype(value_types));
|
||||||
|
}
|
||||||
|
|
||||||
DataTypePtr FieldToDataType::operator() (const AggregateFunctionStateData & x) const
|
DataTypePtr FieldToDataType::operator() (const AggregateFunctionStateData & x) const
|
||||||
{
|
{
|
||||||
const auto & name = static_cast<const AggregateFunctionStateData &>(x).name;
|
const auto & name = static_cast<const AggregateFunctionStateData &>(x).name;
|
||||||
|
@ -26,6 +26,7 @@ public:
|
|||||||
DataTypePtr operator() (const String & x) const;
|
DataTypePtr operator() (const String & x) const;
|
||||||
DataTypePtr operator() (const Array & x) const;
|
DataTypePtr operator() (const Array & x) const;
|
||||||
DataTypePtr operator() (const Tuple & tuple) const;
|
DataTypePtr operator() (const Tuple & tuple) const;
|
||||||
|
DataTypePtr operator() (const Map & map) const;
|
||||||
DataTypePtr operator() (const DecimalField<Decimal32> & x) const;
|
DataTypePtr operator() (const DecimalField<Decimal32> & x) const;
|
||||||
DataTypePtr operator() (const DecimalField<Decimal64> & x) const;
|
DataTypePtr operator() (const DecimalField<Decimal64> & x) const;
|
||||||
DataTypePtr operator() (const DecimalField<Decimal128> & x) const;
|
DataTypePtr operator() (const DecimalField<Decimal128> & x) const;
|
||||||
|
@ -91,6 +91,8 @@ public:
|
|||||||
|
|
||||||
TupleElement,
|
TupleElement,
|
||||||
|
|
||||||
|
MapElement,
|
||||||
|
|
||||||
DictionaryKeys,
|
DictionaryKeys,
|
||||||
DictionaryIndexes,
|
DictionaryIndexes,
|
||||||
};
|
};
|
||||||
@ -449,6 +451,7 @@ public:
|
|||||||
static bool isSpecialCompressionAllowed(const SubstreamPath & path);
|
static bool isSpecialCompressionAllowed(const SubstreamPath & path);
|
||||||
private:
|
private:
|
||||||
friend class DataTypeFactory;
|
friend class DataTypeFactory;
|
||||||
|
friend class AggregateFunctionSimpleState;
|
||||||
/// Customize this DataType
|
/// Customize this DataType
|
||||||
void setCustomization(DataTypeCustomDescPtr custom_desc_) const;
|
void setCustomization(DataTypeCustomDescPtr custom_desc_) const;
|
||||||
|
|
||||||
@ -517,6 +520,7 @@ struct WhichDataType
|
|||||||
constexpr bool isUUID() const { return idx == TypeIndex::UUID; }
|
constexpr bool isUUID() const { return idx == TypeIndex::UUID; }
|
||||||
constexpr bool isArray() const { return idx == TypeIndex::Array; }
|
constexpr bool isArray() const { return idx == TypeIndex::Array; }
|
||||||
constexpr bool isTuple() const { return idx == TypeIndex::Tuple; }
|
constexpr bool isTuple() const { return idx == TypeIndex::Tuple; }
|
||||||
|
constexpr bool isMap() const {return idx == TypeIndex::Map; }
|
||||||
constexpr bool isSet() const { return idx == TypeIndex::Set; }
|
constexpr bool isSet() const { return idx == TypeIndex::Set; }
|
||||||
constexpr bool isInterval() const { return idx == TypeIndex::Interval; }
|
constexpr bool isInterval() const { return idx == TypeIndex::Interval; }
|
||||||
|
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user