mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-24 08:32:02 +00:00
Merge branch 'master' into hierarchy-dictionaries-updated
This commit is contained in:
commit
55984e849c
@ -248,19 +248,27 @@ if (ARCH_NATIVE)
|
||||
set (COMPILER_FLAGS "${COMPILER_FLAGS} -march=native")
|
||||
endif ()
|
||||
|
||||
if (COMPILER_GCC OR COMPILER_CLANG)
|
||||
# to make numeric_limits<__int128> works with GCC
|
||||
set (_CXX_STANDARD "gnu++2a")
|
||||
else()
|
||||
set (_CXX_STANDARD "c++2a")
|
||||
endif()
|
||||
if (${CMAKE_VERSION} VERSION_LESS "3.12.4")
|
||||
# CMake < 3.12 doesn't support setting 20 as a C++ standard version.
|
||||
# We will add C++ standard controlling flag in CMAKE_CXX_FLAGS manually for now.
|
||||
|
||||
# cmake < 3.12 doesn't support 20. We'll set CMAKE_CXX_FLAGS for now
|
||||
# set (CMAKE_CXX_STANDARD 20)
|
||||
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=${_CXX_STANDARD}")
|
||||
if (COMPILER_GCC OR COMPILER_CLANG)
|
||||
# to make numeric_limits<__int128> works with GCC
|
||||
set (_CXX_STANDARD "gnu++2a")
|
||||
else ()
|
||||
set (_CXX_STANDARD "c++2a")
|
||||
endif ()
|
||||
|
||||
set (CMAKE_CXX_EXTENSIONS 0) # https://cmake.org/cmake/help/latest/prop_tgt/CXX_EXTENSIONS.html#prop_tgt:CXX_EXTENSIONS
|
||||
set (CMAKE_CXX_STANDARD_REQUIRED ON)
|
||||
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=${_CXX_STANDARD}")
|
||||
else ()
|
||||
set (CMAKE_CXX_STANDARD 20)
|
||||
set (CMAKE_CXX_EXTENSIONS ON) # Same as gnu++2a (ON) vs c++2a (OFF): https://cmake.org/cmake/help/latest/prop_tgt/CXX_EXTENSIONS.html
|
||||
set (CMAKE_CXX_STANDARD_REQUIRED ON)
|
||||
endif ()
|
||||
|
||||
set (CMAKE_C_STANDARD 11)
|
||||
set (CMAKE_C_EXTENSIONS ON)
|
||||
set (CMAKE_C_STANDARD_REQUIRED ON)
|
||||
|
||||
if (COMPILER_GCC OR COMPILER_CLANG)
|
||||
# Enable C++14 sized global deallocation functions. It should be enabled by setting -std=c++14 but I'm not sure.
|
||||
|
@ -47,6 +47,10 @@ endif()
|
||||
|
||||
target_include_directories(common PUBLIC .. ${CMAKE_CURRENT_BINARY_DIR}/..)
|
||||
|
||||
if (OS_DARWIN AND NOT MAKE_STATIC_LIBRARIES)
|
||||
target_link_libraries(common PUBLIC -Wl,-U,_inside_main)
|
||||
endif()
|
||||
|
||||
# Allow explicit fallback to readline
|
||||
if (NOT ENABLE_REPLXX AND ENABLE_READLINE)
|
||||
message (STATUS "Attempt to fallback to readline explicitly")
|
||||
|
@ -853,15 +853,43 @@ public:
|
||||
{
|
||||
if (hours == 1)
|
||||
return toStartOfHour(t);
|
||||
|
||||
/** We will round the hour number since the midnight.
|
||||
* It may split the day into non-equal intervals.
|
||||
* For example, if we will round to 11-hour interval,
|
||||
* the day will be split to the intervals 00:00:00..10:59:59, 11:00:00..21:59:59, 22:00:00..23:59:59.
|
||||
* In case of daylight saving time or other transitions,
|
||||
* the intervals can be shortened or prolonged to the amount of transition.
|
||||
*/
|
||||
|
||||
UInt64 seconds = hours * 3600;
|
||||
|
||||
t = roundDown(t, seconds);
|
||||
const LUTIndex index = findIndex(t);
|
||||
const Values & values = lut[index];
|
||||
|
||||
if (t >= 0 && offset_is_whole_number_of_hours_during_epoch)
|
||||
return t;
|
||||
time_t time = t - values.date;
|
||||
if (time >= values.time_at_offset_change())
|
||||
{
|
||||
/// Align to new hour numbers before rounding.
|
||||
time += values.amount_of_offset_change();
|
||||
time = time / seconds * seconds;
|
||||
|
||||
/// TODO check if it's correct.
|
||||
return toStartOfHour(t);
|
||||
/// Should subtract the shift back but only if rounded time is not before shift.
|
||||
if (time >= values.time_at_offset_change())
|
||||
{
|
||||
time -= values.amount_of_offset_change();
|
||||
|
||||
/// With cutoff at the time of the shift. Otherwise we may end up with something like 23:00 previous day.
|
||||
if (time < values.time_at_offset_change())
|
||||
time = values.time_at_offset_change();
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
time = time / seconds * seconds;
|
||||
}
|
||||
|
||||
return values.date + time;
|
||||
}
|
||||
|
||||
inline time_t toStartOfMinuteInterval(time_t t, UInt64 minutes) const
|
||||
@ -869,6 +897,14 @@ public:
|
||||
if (minutes == 1)
|
||||
return toStartOfMinute(t);
|
||||
|
||||
/** In contrast to "toStartOfHourInterval" function above,
|
||||
* the minute intervals are not aligned to the midnight.
|
||||
* You will get unexpected results if for example, you round down to 60 minute interval
|
||||
* and there was a time shift to 30 minutes.
|
||||
*
|
||||
* But this is not specified in docs and can be changed in future.
|
||||
*/
|
||||
|
||||
UInt64 seconds = 60 * minutes;
|
||||
return roundDown(t, seconds);
|
||||
}
|
||||
|
@ -1,45 +1,28 @@
|
||||
// https://stackoverflow.com/questions/1413445/reading-a-password-from-stdcin
|
||||
|
||||
#include <common/setTerminalEcho.h>
|
||||
#include <common/errnoToString.h>
|
||||
#include <stdexcept>
|
||||
#include <cstring>
|
||||
#include <string>
|
||||
|
||||
#ifdef WIN32
|
||||
#include <windows.h>
|
||||
#else
|
||||
#include <termios.h>
|
||||
#include <unistd.h>
|
||||
#include <errno.h>
|
||||
#endif
|
||||
|
||||
|
||||
void setTerminalEcho(bool enable)
|
||||
{
|
||||
#ifdef WIN32
|
||||
auto handle = GetStdHandle(STD_INPUT_HANDLE);
|
||||
DWORD mode;
|
||||
if (!GetConsoleMode(handle, &mode))
|
||||
throw std::runtime_error(std::string("setTerminalEcho failed get: ") + std::to_string(GetLastError()));
|
||||
/// Obtain terminal attributes,
|
||||
/// toggle the ECHO flag
|
||||
/// and set them back.
|
||||
|
||||
if (!enable)
|
||||
mode &= ~ENABLE_ECHO_INPUT;
|
||||
else
|
||||
mode |= ENABLE_ECHO_INPUT;
|
||||
struct termios tty{};
|
||||
|
||||
if (!SetConsoleMode(handle, mode))
|
||||
throw std::runtime_error(std::string("setTerminalEcho failed set: ") + std::to_string(GetLastError()));
|
||||
#else
|
||||
struct termios tty;
|
||||
if (tcgetattr(STDIN_FILENO, &tty))
|
||||
if (0 != tcgetattr(STDIN_FILENO, &tty))
|
||||
throw std::runtime_error(std::string("setTerminalEcho failed get: ") + errnoToString(errno));
|
||||
if (!enable)
|
||||
tty.c_lflag &= ~ECHO;
|
||||
else
|
||||
tty.c_lflag |= ECHO;
|
||||
|
||||
auto ret = tcsetattr(STDIN_FILENO, TCSANOW, &tty);
|
||||
if (ret)
|
||||
if (enable)
|
||||
tty.c_lflag |= ECHO;
|
||||
else
|
||||
tty.c_lflag &= ~ECHO;
|
||||
|
||||
if (0 != tcsetattr(STDIN_FILENO, TCSANOW, &tty))
|
||||
throw std::runtime_error(std::string("setTerminalEcho failed set: ") + errnoToString(errno));
|
||||
#endif
|
||||
}
|
||||
|
@ -5,6 +5,11 @@ add_library (daemon
|
||||
)
|
||||
|
||||
target_include_directories (daemon PUBLIC ..)
|
||||
|
||||
if (OS_DARWIN AND NOT MAKE_STATIC_LIBRARIES)
|
||||
target_link_libraries (daemon PUBLIC -Wl,-undefined,dynamic_lookup)
|
||||
endif()
|
||||
|
||||
target_link_libraries (daemon PUBLIC loggers PRIVATE clickhouse_common_io clickhouse_common_config common ${EXECINFO_LIBRARIES})
|
||||
|
||||
if (USE_SENTRY)
|
||||
|
25
contrib/CMakeLists.txt
vendored
25
contrib/CMakeLists.txt
vendored
@ -215,15 +215,17 @@ if (USE_EMBEDDED_COMPILER AND USE_INTERNAL_LLVM_LIBRARY)
|
||||
set (LLVM_ENABLE_RTTI 1 CACHE INTERNAL "")
|
||||
set (LLVM_ENABLE_PIC 0 CACHE INTERNAL "")
|
||||
set (LLVM_TARGETS_TO_BUILD "X86;AArch64" CACHE STRING "")
|
||||
# Yes it is set globally, but this is not enough, since llvm will add -std=c++11 after default
|
||||
# And c++2a cannot be used, due to ambiguous operator !=
|
||||
if (COMPILER_GCC OR COMPILER_CLANG)
|
||||
set (_CXX_STANDARD "gnu++17")
|
||||
else()
|
||||
set (_CXX_STANDARD "c++17")
|
||||
endif()
|
||||
set (LLVM_CXX_STD ${_CXX_STANDARD} CACHE STRING "" FORCE)
|
||||
|
||||
# Need to use C++17 since the compilation is not possible with C++20 currently, due to ambiguous operator != etc.
|
||||
# LLVM project will set its default value for the -std=... but our global setting from CMake will override it.
|
||||
set (CMAKE_CXX_STANDARD_bak ${CMAKE_CXX_STANDARD})
|
||||
set (CMAKE_CXX_STANDARD 17)
|
||||
|
||||
add_subdirectory (llvm/llvm)
|
||||
|
||||
set (CMAKE_CXX_STANDARD ${CMAKE_CXX_STANDARD_bak})
|
||||
unset (CMAKE_CXX_STANDARD_bak)
|
||||
|
||||
target_include_directories(LLVMSupport SYSTEM BEFORE PRIVATE ${ZLIB_INCLUDE_DIR})
|
||||
endif ()
|
||||
|
||||
@ -280,7 +282,14 @@ if (USE_AMQPCPP)
|
||||
add_subdirectory (amqpcpp-cmake)
|
||||
endif()
|
||||
if (USE_CASSANDRA)
|
||||
# Need to use C++17 since the compilation is not possible with C++20 currently.
|
||||
set (CMAKE_CXX_STANDARD_bak ${CMAKE_CXX_STANDARD})
|
||||
set (CMAKE_CXX_STANDARD 17)
|
||||
|
||||
add_subdirectory (cassandra)
|
||||
|
||||
set (CMAKE_CXX_STANDARD ${CMAKE_CXX_STANDARD_bak})
|
||||
unset (CMAKE_CXX_STANDARD_bak)
|
||||
endif()
|
||||
|
||||
# Should go before:
|
||||
|
2
contrib/NuRaft
vendored
2
contrib/NuRaft
vendored
@ -1 +1 @@
|
||||
Subproject commit 3d3683e77753cfe015a05fae95ddf418e19f59e1
|
||||
Subproject commit 70468326ad5d72e9497944838484c591dae054ea
|
2
contrib/grpc
vendored
2
contrib/grpc
vendored
@ -1 +1 @@
|
||||
Subproject commit 7436366ceb341ba5c00ea29f1645e02a2b70bf93
|
||||
Subproject commit 8d558f03fe370240081424fafa76cdc9301ea14b
|
File diff suppressed because it is too large
Load Diff
@ -14,12 +14,8 @@ RUN apt-get update \
|
||||
lsb-release \
|
||||
wget \
|
||||
--yes --no-install-recommends --verbose-versions \
|
||||
&& cat /etc/resolv.conf \
|
||||
&& echo "nameserver 1.1.1.1" >> /etc/resolv.conf \
|
||||
&& nslookup -debug apt.llvm.org \
|
||||
&& ping -c1 apt.llvm.org \
|
||||
&& wget -nv --retry-connrefused --tries=10 -O /tmp/llvm-snapshot.gpg.key https://apt.llvm.org/llvm-snapshot.gpg.key \
|
||||
&& export LLVM_PUBKEY_HASH="bda960a8da687a275a2078d43c111d66b1c6a893a3275271beedf266c1ff4a0cdecb429c7a5cccf9f486ea7aa43fd27f" \
|
||||
&& wget -nv -O /tmp/llvm-snapshot.gpg.key https://apt.llvm.org/llvm-snapshot.gpg.key \
|
||||
&& echo "${LLVM_PUBKEY_HASH} /tmp/llvm-snapshot.gpg.key" | sha384sum -c \
|
||||
&& apt-key add /tmp/llvm-snapshot.gpg.key \
|
||||
&& export CODENAME="$(lsb_release --codename --short | tr 'A-Z' 'a-z')" \
|
||||
@ -36,10 +32,7 @@ RUN apt-get update \
|
||||
software-properties-common \
|
||||
--yes --no-install-recommends
|
||||
|
||||
RUN cat /etc/resolv.conf \
|
||||
&& echo "nameserver 1.1.1.1" >> /etc/resolv.conf \
|
||||
&& nslookup -debug apt.llvm.org \
|
||||
&& apt-get update \
|
||||
RUN apt-get update \
|
||||
&& apt-get install \
|
||||
bash \
|
||||
cmake \
|
||||
|
@ -4,6 +4,21 @@ ARG repository="deb https://repo.clickhouse.tech/deb/stable/ main/"
|
||||
ARG version=21.4.1.*
|
||||
ARG gosu_ver=1.10
|
||||
|
||||
# set non-empty deb_location_url url to create a docker image
|
||||
# from debs created by CI build, for example:
|
||||
# docker build . --network host --build-arg version="21.4.1.6282" --build-arg deb_location_url="https://clickhouse-builds.s3.yandex.net/21852/069cfbff388b3d478d1a16dc7060b48073f5d522/clickhouse_build_check/clang-11_relwithdebuginfo_none_bundled_unsplitted_disable_False_deb/" -t filimonovq/clickhouse-server:pr21852
|
||||
ARG deb_location_url=""
|
||||
|
||||
# set non-empty single_binary_location_url to create docker image
|
||||
# from a single binary url (useful for non-standard builds - with sanitizers, for arm64).
|
||||
# for example (run on aarch64 server):
|
||||
# docker build . --network host --build-arg single_binary_location_url="https://builds.clickhouse.tech/master/aarch64/clickhouse" -t altinity/clickhouse-server:master-testing-arm
|
||||
# note: clickhouse-odbc-bridge is not supported there.
|
||||
ARG single_binary_location_url=""
|
||||
|
||||
# see https://github.com/moby/moby/issues/4032#issuecomment-192327844
|
||||
ARG DEBIAN_FRONTEND=noninteractive
|
||||
|
||||
# user/group precreated explicitly with fixed uid/gid on purpose.
|
||||
# It is especially important for rootless containers: in that case entrypoint
|
||||
# can't do chown and owners of mounted volumes should be configured externally.
|
||||
@ -19,20 +34,37 @@ RUN groupadd -r clickhouse --gid=101 \
|
||||
ca-certificates \
|
||||
dirmngr \
|
||||
gnupg \
|
||||
locales \
|
||||
wget \
|
||||
tzdata \
|
||||
&& mkdir -p /etc/apt/sources.list.d \
|
||||
&& apt-key adv --keyserver keyserver.ubuntu.com --recv E0C56BD4 \
|
||||
&& echo $repository > /etc/apt/sources.list.d/clickhouse.list \
|
||||
&& apt-get update \
|
||||
&& env DEBIAN_FRONTEND=noninteractive \
|
||||
apt-get --yes -o "Dpkg::Options::=--force-confdef" -o "Dpkg::Options::=--force-confold" upgrade \
|
||||
&& env DEBIAN_FRONTEND=noninteractive \
|
||||
apt-get install --allow-unauthenticated --yes --no-install-recommends \
|
||||
clickhouse-common-static=$version \
|
||||
clickhouse-client=$version \
|
||||
clickhouse-server=$version \
|
||||
locales \
|
||||
wget \
|
||||
tzdata \
|
||||
&& if [ -n "$deb_location_url" ]; then \
|
||||
echo "installing from custom url with deb packages: $deb_location_url" \
|
||||
rm -rf /tmp/clickhouse_debs \
|
||||
&& mkdir -p /tmp/clickhouse_debs \
|
||||
&& wget --progress=bar:force:noscroll "${deb_location_url}/clickhouse-common-static_${version}_amd64.deb" -P /tmp/clickhouse_debs \
|
||||
&& wget --progress=bar:force:noscroll "${deb_location_url}/clickhouse-client_${version}_all.deb" -P /tmp/clickhouse_debs \
|
||||
&& wget --progress=bar:force:noscroll "${deb_location_url}/clickhouse-server_${version}_all.deb" -P /tmp/clickhouse_debs \
|
||||
&& dpkg -i /tmp/clickhouse_debs/*.deb ; \
|
||||
elif [ -n "$single_binary_location_url" ]; then \
|
||||
echo "installing from single binary url: $single_binary_location_url" \
|
||||
&& rm -rf /tmp/clickhouse_binary \
|
||||
&& mkdir -p /tmp/clickhouse_binary \
|
||||
&& wget --progress=bar:force:noscroll "$single_binary_location_url" -O /tmp/clickhouse_binary/clickhouse \
|
||||
&& chmod +x /tmp/clickhouse_binary/clickhouse \
|
||||
&& /tmp/clickhouse_binary/clickhouse install --user "clickhouse" --group "clickhouse" ; \
|
||||
else \
|
||||
echo "installing from repository: $repository" \
|
||||
&& apt-get update \
|
||||
&& apt-get --yes -o "Dpkg::Options::=--force-confdef" -o "Dpkg::Options::=--force-confold" upgrade \
|
||||
&& apt-get install --allow-unauthenticated --yes --no-install-recommends \
|
||||
clickhouse-common-static=$version \
|
||||
clickhouse-client=$version \
|
||||
clickhouse-server=$version ; \
|
||||
fi \
|
||||
&& clickhouse-local -q 'SELECT * FROM system.build_options' \
|
||||
&& rm -rf \
|
||||
/var/lib/apt/lists/* \
|
||||
/var/cache/debconf \
|
||||
|
@ -38,17 +38,16 @@ if ! $gosu test -f "$CLICKHOUSE_CONFIG" -a -r "$CLICKHOUSE_CONFIG"; then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# port is needed to check if clickhouse-server is ready for connections
|
||||
HTTP_PORT="$(clickhouse extract-from-config --config-file "$CLICKHOUSE_CONFIG" --key=http_port)"
|
||||
|
||||
# get CH directories locations
|
||||
DATA_DIR="$(clickhouse extract-from-config --config-file "$CLICKHOUSE_CONFIG" --key=path || true)"
|
||||
TMP_DIR="$(clickhouse extract-from-config --config-file "$CLICKHOUSE_CONFIG" --key=tmp_path || true)"
|
||||
USER_PATH="$(clickhouse extract-from-config --config-file "$CLICKHOUSE_CONFIG" --key=user_files_path || true)"
|
||||
LOG_PATH="$(clickhouse extract-from-config --config-file "$CLICKHOUSE_CONFIG" --key=logger.log || true)"
|
||||
LOG_DIR="$(dirname "$LOG_PATH" || true)"
|
||||
LOG_DIR=""
|
||||
if [ -n "$LOG_PATH" ]; then LOG_DIR="$(dirname "$LOG_PATH")"; fi
|
||||
ERROR_LOG_PATH="$(clickhouse extract-from-config --config-file "$CLICKHOUSE_CONFIG" --key=logger.errorlog || true)"
|
||||
ERROR_LOG_DIR="$(dirname "$ERROR_LOG_PATH" || true)"
|
||||
ERROR_LOG_DIR=""
|
||||
if [ -n "$ERROR_LOG_PATH" ]; then ERROR_LOG_DIR="$(dirname "$ERROR_LOG_PATH")"; fi
|
||||
FORMAT_SCHEMA_PATH="$(clickhouse extract-from-config --config-file "$CLICKHOUSE_CONFIG" --key=format_schema_path || true)"
|
||||
|
||||
CLICKHOUSE_USER="${CLICKHOUSE_USER:-default}"
|
||||
@ -106,6 +105,9 @@ EOT
|
||||
fi
|
||||
|
||||
if [ -n "$(ls /docker-entrypoint-initdb.d/)" ] || [ -n "$CLICKHOUSE_DB" ]; then
|
||||
# port is needed to check if clickhouse-server is ready for connections
|
||||
HTTP_PORT="$(clickhouse extract-from-config --config-file "$CLICKHOUSE_CONFIG" --key=http_port)"
|
||||
|
||||
# Listen only on localhost until the initialization is done
|
||||
$gosu /usr/bin/clickhouse-server --config-file="$CLICKHOUSE_CONFIG" -- --listen_host=127.0.0.1 &
|
||||
pid="$!"
|
||||
|
@ -4,9 +4,8 @@ FROM ubuntu:20.04
|
||||
ENV DEBIAN_FRONTEND=noninteractive LLVM_VERSION=11
|
||||
|
||||
RUN apt-get update \
|
||||
&& apt-get install apt-utils ca-certificates lsb-release wget gnupg apt-transport-https \
|
||||
&& apt-get install ca-certificates lsb-release wget gnupg apt-transport-https \
|
||||
--yes --no-install-recommends --verbose-versions \
|
||||
&& echo "nameserver 1.1.1.1" >> /etc/resolv.conf \
|
||||
&& export LLVM_PUBKEY_HASH="bda960a8da687a275a2078d43c111d66b1c6a893a3275271beedf266c1ff4a0cdecb429c7a5cccf9f486ea7aa43fd27f" \
|
||||
&& wget -nv -O /tmp/llvm-snapshot.gpg.key https://apt.llvm.org/llvm-snapshot.gpg.key \
|
||||
&& echo "${LLVM_PUBKEY_HASH} /tmp/llvm-snapshot.gpg.key" | sha384sum -c \
|
||||
@ -32,8 +31,7 @@ RUN curl -O https://clickhouse-builds.s3.yandex.net/utils/1/dpkg-deb \
|
||||
&& chmod +x dpkg-deb \
|
||||
&& cp dpkg-deb /usr/bin
|
||||
|
||||
RUN echo "nameserver 1.1.1.1" >> /etc/resolv.conf \
|
||||
&& apt-get update \
|
||||
RUN apt-get update \
|
||||
&& apt-get install \
|
||||
clang-${LLVM_VERSION} \
|
||||
debhelper \
|
||||
|
@ -70,7 +70,7 @@ function start_server
|
||||
--path "$FASTTEST_DATA"
|
||||
--user_files_path "$FASTTEST_DATA/user_files"
|
||||
--top_level_domains_path "$FASTTEST_DATA/top_level_domains"
|
||||
--test_keeper_server.log_storage_path "$FASTTEST_DATA/coordination"
|
||||
--keeper_server.log_storage_path "$FASTTEST_DATA/coordination"
|
||||
)
|
||||
clickhouse-server "${opts[@]}" &>> "$FASTTEST_OUTPUT/server.log" &
|
||||
server_pid=$!
|
||||
|
@ -3,7 +3,7 @@
|
||||
<mysql_port remove="remove"/>
|
||||
<interserver_http_port remove="remove"/>
|
||||
<tcp_with_proxy_port remove="remove"/>
|
||||
<test_keeper_server remove="remove"/>
|
||||
<keeper_server remove="remove"/>
|
||||
<listen_host>::</listen_host>
|
||||
|
||||
<logger>
|
||||
|
@ -2,7 +2,6 @@
|
||||
FROM ubuntu:20.04
|
||||
|
||||
RUN apt-get update --yes && env DEBIAN_FRONTEND=noninteractive apt-get install wget unzip git openjdk-14-jdk maven python3 --yes --no-install-recommends
|
||||
|
||||
RUN wget https://github.com/sqlancer/sqlancer/archive/master.zip -O /sqlancer.zip
|
||||
RUN mkdir /sqlancer && \
|
||||
cd /sqlancer && \
|
||||
|
@ -13,6 +13,25 @@ dpkg -i package_folder/clickhouse-test_*.deb
|
||||
|
||||
function start()
|
||||
{
|
||||
if [[ -n "$USE_DATABASE_REPLICATED" ]] && [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]; then
|
||||
# NOTE We run "clickhouse server" instead of "clickhouse-server"
|
||||
# to make "pidof clickhouse-server" return single pid of the main instance.
|
||||
# We wil run main instance using "service clickhouse-server start"
|
||||
sudo -E -u clickhouse /usr/bin/clickhouse server --config /etc/clickhouse-server1/config.xml --daemon \
|
||||
-- --path /var/lib/clickhouse1/ --logger.stderr /var/log/clickhouse-server/stderr1.log \
|
||||
--logger.log /var/log/clickhouse-server/clickhouse-server1.log --logger.errorlog /var/log/clickhouse-server/clickhouse-server1.err.log \
|
||||
--tcp_port 19000 --tcp_port_secure 19440 --http_port 18123 --https_port 18443 --interserver_http_port 19009 --tcp_with_proxy_port 19010 \
|
||||
--mysql_port 19004 \
|
||||
--keeper_server.tcp_port 19181 --keeper_server.server_id 2
|
||||
|
||||
sudo -E -u clickhouse /usr/bin/clickhouse server --config /etc/clickhouse-server2/config.xml --daemon \
|
||||
-- --path /var/lib/clickhouse2/ --logger.stderr /var/log/clickhouse-server/stderr2.log \
|
||||
--logger.log /var/log/clickhouse-server/clickhouse-server2.log --logger.errorlog /var/log/clickhouse-server/clickhouse-server2.err.log \
|
||||
--tcp_port 29000 --tcp_port_secure 29440 --http_port 28123 --https_port 28443 --interserver_http_port 29009 --tcp_with_proxy_port 29010 \
|
||||
--mysql_port 29004 \
|
||||
--keeper_server.tcp_port 29181 --keeper_server.server_id 3
|
||||
fi
|
||||
|
||||
counter=0
|
||||
until clickhouse-client --query "SELECT 1"
|
||||
do
|
||||
@ -35,9 +54,8 @@ start
|
||||
/s3downloader --dataset-names $DATASETS
|
||||
chmod 777 -R /var/lib/clickhouse
|
||||
clickhouse-client --query "SHOW DATABASES"
|
||||
clickhouse-client --query "ATTACH DATABASE datasets ENGINE = Ordinary"
|
||||
clickhouse-client --query "CREATE DATABASE test"
|
||||
|
||||
clickhouse-client --query "ATTACH DATABASE datasets ENGINE = Ordinary"
|
||||
service clickhouse-server restart
|
||||
|
||||
# Wait for server to start accepting connections
|
||||
@ -47,24 +65,50 @@ for _ in {1..120}; do
|
||||
done
|
||||
|
||||
clickhouse-client --query "SHOW TABLES FROM datasets"
|
||||
clickhouse-client --query "SHOW TABLES FROM test"
|
||||
clickhouse-client --query "RENAME TABLE datasets.hits_v1 TO test.hits"
|
||||
clickhouse-client --query "RENAME TABLE datasets.visits_v1 TO test.visits"
|
||||
clickhouse-client --query "SHOW TABLES FROM test"
|
||||
|
||||
if grep -q -- "--use-skip-list" /usr/bin/clickhouse-test ; then
|
||||
SKIP_LIST_OPT="--use-skip-list"
|
||||
fi
|
||||
|
||||
# We can have several additional options so we path them as array because it's
|
||||
# more idiologically correct.
|
||||
read -ra ADDITIONAL_OPTIONS <<< "${ADDITIONAL_OPTIONS:-}"
|
||||
|
||||
if [[ -n "$USE_DATABASE_REPLICATED" ]] && [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]; then
|
||||
ADDITIONAL_OPTIONS+=('--replicated-database')
|
||||
clickhouse-client --query "CREATE DATABASE test ON CLUSTER 'test_cluster_database_replicated'
|
||||
ENGINE=Replicated('/test/clickhouse/db/test', '{shard}', '{replica}')"
|
||||
|
||||
clickhouse-client --query "CREATE TABLE test.hits AS datasets.hits_v1"
|
||||
clickhouse-client --query "CREATE TABLE test.visits AS datasets.visits_v1"
|
||||
|
||||
clickhouse-client --query "INSERT INTO test.hits SELECT * FROM datasets.hits_v1"
|
||||
clickhouse-client --query "INSERT INTO test.visits SELECT * FROM datasets.visits_v1"
|
||||
|
||||
clickhouse-client --query "DROP TABLE datasets.hits_v1"
|
||||
clickhouse-client --query "DROP TABLE datasets.visits_v1"
|
||||
|
||||
MAX_RUN_TIME=$((MAX_RUN_TIME < 9000 ? MAX_RUN_TIME : 9000)) # min(MAX_RUN_TIME, 2.5 hours)
|
||||
MAX_RUN_TIME=$((MAX_RUN_TIME != 0 ? MAX_RUN_TIME : 9000)) # set to 2.5 hours if 0 (unlimited)
|
||||
else
|
||||
clickhouse-client --query "CREATE DATABASE test"
|
||||
clickhouse-client --query "SHOW TABLES FROM test"
|
||||
clickhouse-client --query "RENAME TABLE datasets.hits_v1 TO test.hits"
|
||||
clickhouse-client --query "RENAME TABLE datasets.visits_v1 TO test.visits"
|
||||
fi
|
||||
|
||||
clickhouse-test --testname --shard --zookeeper --no-stateless --hung-check --print-time "$SKIP_LIST_OPT" "${ADDITIONAL_OPTIONS[@]}" "$SKIP_TESTS_OPTION" 2>&1 | ts '%Y-%m-%d %H:%M:%S' | tee test_output/test_result.txt
|
||||
clickhouse-client --query "SHOW TABLES FROM test"
|
||||
clickhouse-client --query "SELECT count() FROM test.hits"
|
||||
clickhouse-client --query "SELECT count() FROM test.visits"
|
||||
|
||||
function run_tests()
|
||||
{
|
||||
set -x
|
||||
# We can have several additional options so we path them as array because it's
|
||||
# more idiologically correct.
|
||||
read -ra ADDITIONAL_OPTIONS <<< "${ADDITIONAL_OPTIONS:-}"
|
||||
|
||||
if [[ -n "$USE_DATABASE_REPLICATED" ]] && [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]; then
|
||||
ADDITIONAL_OPTIONS+=('--replicated-database')
|
||||
fi
|
||||
|
||||
clickhouse-test --testname --shard --zookeeper --no-stateless --hung-check --use-skip-list --print-time "${ADDITIONAL_OPTIONS[@]}" \
|
||||
"$SKIP_TESTS_OPTION" 2>&1 | ts '%Y-%m-%d %H:%M:%S' | tee test_output/test_result.txt
|
||||
}
|
||||
|
||||
export -f run_tests
|
||||
timeout "$MAX_RUN_TIME" bash -c run_tests ||:
|
||||
|
||||
./process_functional_tests_result.py || echo -e "failure\tCannot parse results" > /test_output/check_status.tsv
|
||||
|
||||
@ -73,3 +117,9 @@ mv /var/log/clickhouse-server/stderr.log /test_output/ ||:
|
||||
if [[ -n "$WITH_COVERAGE" ]] && [[ "$WITH_COVERAGE" -eq 1 ]]; then
|
||||
tar -chf /test_output/clickhouse_coverage.tar.gz /profraw ||:
|
||||
fi
|
||||
if [[ -n "$USE_DATABASE_REPLICATED" ]] && [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]; then
|
||||
pigz < /var/log/clickhouse-server/clickhouse-server1.log > /test_output/clickhouse-server1.log.gz ||:
|
||||
pigz < /var/log/clickhouse-server/clickhouse-server2.log > /test_output/clickhouse-server2.log.gz ||:
|
||||
mv /var/log/clickhouse-server/stderr1.log /test_output/ ||:
|
||||
mv /var/log/clickhouse-server/stderr2.log /test_output/ ||:
|
||||
fi
|
||||
|
@ -12,6 +12,8 @@ UNKNOWN_SIGN = "[ UNKNOWN "
|
||||
SKIPPED_SIGN = "[ SKIPPED "
|
||||
HUNG_SIGN = "Found hung queries in processlist"
|
||||
|
||||
NO_TASK_TIMEOUT_SIGN = "All tests have finished"
|
||||
|
||||
def process_test_log(log_path):
|
||||
total = 0
|
||||
skipped = 0
|
||||
@ -19,10 +21,13 @@ def process_test_log(log_path):
|
||||
failed = 0
|
||||
success = 0
|
||||
hung = False
|
||||
task_timeout = True
|
||||
test_results = []
|
||||
with open(log_path, 'r') as test_file:
|
||||
for line in test_file:
|
||||
line = line.strip()
|
||||
if NO_TASK_TIMEOUT_SIGN in line:
|
||||
task_timeout = False
|
||||
if HUNG_SIGN in line:
|
||||
hung = True
|
||||
if any(sign in line for sign in (OK_SIGN, FAIL_SING, UNKNOWN_SIGN, SKIPPED_SIGN)):
|
||||
@ -52,7 +57,7 @@ def process_test_log(log_path):
|
||||
else:
|
||||
success += int(OK_SIGN in line)
|
||||
test_results.append((test_name, "OK", test_time))
|
||||
return total, skipped, unknown, failed, success, hung, test_results
|
||||
return total, skipped, unknown, failed, success, hung, task_timeout, test_results
|
||||
|
||||
def process_result(result_path):
|
||||
test_results = []
|
||||
@ -68,7 +73,7 @@ def process_result(result_path):
|
||||
state = "error"
|
||||
|
||||
if result_path and os.path.exists(result_path):
|
||||
total, skipped, unknown, failed, success, hung, test_results = process_test_log(result_path)
|
||||
total, skipped, unknown, failed, success, hung, task_timeout, test_results = process_test_log(result_path)
|
||||
is_flacky_check = 1 < int(os.environ.get('NUM_TRIES', 1))
|
||||
# If no tests were run (success == 0) it indicates an error (e.g. server did not start or crashed immediately)
|
||||
# But it's Ok for "flaky checks" - they can contain just one test for check which is marked as skipped.
|
||||
@ -78,6 +83,9 @@ def process_result(result_path):
|
||||
if hung:
|
||||
description = "Some queries hung, "
|
||||
state = "failure"
|
||||
elif task_timeout:
|
||||
description = "Timeout, "
|
||||
state = "failure"
|
||||
else:
|
||||
description = ""
|
||||
|
||||
|
@ -34,23 +34,44 @@ if [ "$NUM_TRIES" -gt "1" ]; then
|
||||
|
||||
# simpliest way to forward env variables to server
|
||||
sudo -E -u clickhouse /usr/bin/clickhouse-server --config /etc/clickhouse-server/config.xml --daemon
|
||||
sleep 5
|
||||
else
|
||||
service clickhouse-server start && sleep 5
|
||||
service clickhouse-server start
|
||||
fi
|
||||
|
||||
if grep -q -- "--use-skip-list" /usr/bin/clickhouse-test; then
|
||||
SKIP_LIST_OPT="--use-skip-list"
|
||||
if [[ -n "$USE_DATABASE_REPLICATED" ]] && [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]; then
|
||||
|
||||
sudo -E -u clickhouse /usr/bin/clickhouse server --config /etc/clickhouse-server1/config.xml --daemon \
|
||||
-- --path /var/lib/clickhouse1/ --logger.stderr /var/log/clickhouse-server/stderr1.log \
|
||||
--logger.log /var/log/clickhouse-server/clickhouse-server1.log --logger.errorlog /var/log/clickhouse-server/clickhouse-server1.err.log \
|
||||
--tcp_port 19000 --tcp_port_secure 19440 --http_port 18123 --https_port 18443 --interserver_http_port 19009 --tcp_with_proxy_port 19010 \
|
||||
--mysql_port 19004 \
|
||||
--keeper_server.tcp_port 19181 --keeper_server.server_id 2 \
|
||||
--macros.replica r2 # It doesn't work :(
|
||||
|
||||
sudo -E -u clickhouse /usr/bin/clickhouse server --config /etc/clickhouse-server2/config.xml --daemon \
|
||||
-- --path /var/lib/clickhouse2/ --logger.stderr /var/log/clickhouse-server/stderr2.log \
|
||||
--logger.log /var/log/clickhouse-server/clickhouse-server2.log --logger.errorlog /var/log/clickhouse-server/clickhouse-server2.err.log \
|
||||
--tcp_port 29000 --tcp_port_secure 29440 --http_port 28123 --https_port 28443 --interserver_http_port 29009 --tcp_with_proxy_port 29010 \
|
||||
--mysql_port 29004 \
|
||||
--keeper_server.tcp_port 29181 --keeper_server.server_id 3 \
|
||||
--macros.shard s2 # It doesn't work :(
|
||||
|
||||
MAX_RUN_TIME=$((MAX_RUN_TIME < 9000 ? MAX_RUN_TIME : 9000)) # min(MAX_RUN_TIME, 2.5 hours)
|
||||
MAX_RUN_TIME=$((MAX_RUN_TIME != 0 ? MAX_RUN_TIME : 9000)) # set to 2.5 hours if 0 (unlimited)
|
||||
fi
|
||||
|
||||
sleep 5
|
||||
|
||||
function run_tests()
|
||||
{
|
||||
set -x
|
||||
# We can have several additional options so we path them as array because it's
|
||||
# more idiologically correct.
|
||||
read -ra ADDITIONAL_OPTIONS <<< "${ADDITIONAL_OPTIONS:-}"
|
||||
|
||||
# Skip these tests, because they fail when we rerun them multiple times
|
||||
if [ "$NUM_TRIES" -gt "1" ]; then
|
||||
ADDITIONAL_OPTIONS+=('--order=random')
|
||||
ADDITIONAL_OPTIONS+=('--skip')
|
||||
ADDITIONAL_OPTIONS+=('00000_no_tests_to_skip')
|
||||
ADDITIONAL_OPTIONS+=('--jobs')
|
||||
@ -62,8 +83,7 @@ function run_tests()
|
||||
fi
|
||||
|
||||
clickhouse-test --testname --shard --zookeeper --hung-check --print-time \
|
||||
--test-runs "$NUM_TRIES" \
|
||||
"$SKIP_LIST_OPT" "${ADDITIONAL_OPTIONS[@]}" 2>&1 \
|
||||
--use-skip-list --test-runs "$NUM_TRIES" "${ADDITIONAL_OPTIONS[@]}" 2>&1 \
|
||||
| ts '%Y-%m-%d %H:%M:%S' \
|
||||
| tee -a test_output/test_result.txt
|
||||
}
|
||||
@ -74,10 +94,23 @@ timeout "$MAX_RUN_TIME" bash -c run_tests ||:
|
||||
|
||||
./process_functional_tests_result.py || echo -e "failure\tCannot parse results" > /test_output/check_status.tsv
|
||||
|
||||
pigz < /var/log/clickhouse-server/clickhouse-server.log > /test_output/clickhouse-server.log.gz ||:
|
||||
clickhouse-client -q "system flush logs" ||:
|
||||
|
||||
pigz < /var/log/clickhouse-server/clickhouse-server.log > /test_output/clickhouse-server.log.gz &
|
||||
clickhouse-client -q "select * from system.query_log format TSVWithNamesAndTypes" | pigz > /test_output/query-log.tsv.gz &
|
||||
clickhouse-client -q "select * from system.query_thread_log format TSVWithNamesAndTypes" | pigz > /test_output/query-thread-log.tsv.gz &
|
||||
wait ||:
|
||||
|
||||
mv /var/log/clickhouse-server/stderr.log /test_output/ ||:
|
||||
if [[ -n "$WITH_COVERAGE" ]] && [[ "$WITH_COVERAGE" -eq 1 ]]; then
|
||||
tar -chf /test_output/clickhouse_coverage.tar.gz /profraw ||:
|
||||
fi
|
||||
tar -chf /test_output/text_log_dump.tar /var/lib/clickhouse/data/system/text_log ||:
|
||||
tar -chf /test_output/query_log_dump.tar /var/lib/clickhouse/data/system/query_log ||:
|
||||
|
||||
if [[ -n "$USE_DATABASE_REPLICATED" ]] && [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]; then
|
||||
pigz < /var/log/clickhouse-server/clickhouse-server1.log > /test_output/clickhouse-server1.log.gz ||:
|
||||
pigz < /var/log/clickhouse-server/clickhouse-server2.log > /test_output/clickhouse-server2.log.gz ||:
|
||||
mv /var/log/clickhouse-server/stderr1.log /test_output/ ||:
|
||||
mv /var/log/clickhouse-server/stderr2.log /test_output/ ||:
|
||||
fi
|
||||
|
@ -69,7 +69,7 @@ MySQL DDL queries are converted into the corresponding ClickHouse DDL queries ([
|
||||
|
||||
- MySQL `INSERT` query is converted into `INSERT` with `_sign=1`.
|
||||
|
||||
- MySQl `DELETE` query is converted into `INSERT` with `_sign=-1`.
|
||||
- MySQL `DELETE` query is converted into `INSERT` with `_sign=-1`.
|
||||
|
||||
- MySQL `UPDATE` query is converted into `INSERT` with `_sign=-1` and `INSERT` with `_sign=1`.
|
||||
|
||||
|
@ -3,7 +3,7 @@ toc_priority: 8
|
||||
toc_title: PostgreSQL
|
||||
---
|
||||
|
||||
# PosgtreSQL {#postgresql}
|
||||
# PostgreSQL {#postgresql}
|
||||
|
||||
The PostgreSQL engine allows you to perform `SELECT` queries on data that is stored on a remote PostgreSQL server.
|
||||
|
||||
|
@ -769,6 +769,38 @@ Example:
|
||||
log_query_threads=1
|
||||
```
|
||||
|
||||
## log_comment {#settings-log-comment}
|
||||
|
||||
Specifies the value for the `log_comment` field of the [system.query_log](../system-tables/query_log.md) table and comment text for the server log.
|
||||
|
||||
It can be used to improve the readability of server logs. Additionally, it helps to select queries related to the test from the `system.query_log` after running [clickhouse-test](../../development/tests.md).
|
||||
|
||||
Possible values:
|
||||
|
||||
- Any string no longer than [max_query_size](#settings-max_query_size). If length is exceeded, the server throws an exception.
|
||||
|
||||
Default value: empty string.
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
SET log_comment = 'log_comment test', log_queries = 1;
|
||||
SELECT 1;
|
||||
SYSTEM FLUSH LOGS;
|
||||
SELECT type, query FROM system.query_log WHERE log_comment = 'log_comment test' AND event_date >= yesterday() ORDER BY event_time DESC LIMIT 2;
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
┌─type────────┬─query─────┐
|
||||
│ QueryStart │ SELECT 1; │
|
||||
│ QueryFinish │ SELECT 1; │
|
||||
└─────────────┴───────────┘
|
||||
```
|
||||
|
||||
## max_insert_block_size {#settings-max_insert_block_size}
|
||||
|
||||
The size of blocks (in a count of rows) to form for insertion into a table.
|
||||
@ -1514,6 +1546,14 @@ FORMAT PrettyCompactMonoBlock
|
||||
|
||||
Default value: 0
|
||||
|
||||
## optimize_skip_unused_shards_limit {#optimize-skip-unused-shards-limit}
|
||||
|
||||
Limit for number of sharding key values, turns off `optimize_skip_unused_shards` if the limit is reached.
|
||||
|
||||
Too many values may require significant amount for processing, while the benefit is doubtful, since if you have huge number of values in `IN (...)`, then most likely the query will be sent to all shards anyway.
|
||||
|
||||
Default value: 1000
|
||||
|
||||
## optimize_skip_unused_shards {#optimize-skip-unused-shards}
|
||||
|
||||
Enables or disables skipping of unused shards for [SELECT](../../sql-reference/statements/select/index.md) queries that have sharding key condition in `WHERE/PREWHERE` (assuming that the data is distributed by sharding key, otherwise does nothing).
|
||||
@ -2728,11 +2768,11 @@ Default value: `0`.
|
||||
|
||||
## engine_file_truncate_on_insert {#engine-file-truncate-on-insert}
|
||||
|
||||
Enables or disables truncate before insert in file engine tables.
|
||||
Enables or disables truncate before insert in [File](../../engines/table-engines/special/file.md) engine tables.
|
||||
|
||||
Possible values:
|
||||
- 0 — Disabled.
|
||||
- 1 — Enabled.
|
||||
- 0 — `INSERT` query appends new data to the end of the file.
|
||||
- 1 — `INSERT` replaces existing content of the file with the new data.
|
||||
|
||||
Default value: `0`.
|
||||
|
||||
@ -2747,4 +2787,39 @@ Possible values:
|
||||
|
||||
Default value: `0`.
|
||||
|
||||
## allow_experimental_live_view {#allow-experimental-live-view}
|
||||
|
||||
Allows creation of experimental [live views](../../sql-reference/statements/create/view.md#live-view).
|
||||
|
||||
Possible values:
|
||||
|
||||
- 0 — Working with live views is disabled.
|
||||
- 1 — Working with live views is enabled.
|
||||
|
||||
Default value: `0`.
|
||||
|
||||
## live_view_heartbeat_interval {#live-view-heartbeat-interval}
|
||||
|
||||
Sets the heartbeat interval in seconds to indicate [live view](../../sql-reference/statements/create/view.md#live-view) is alive .
|
||||
|
||||
Default value: `15`.
|
||||
|
||||
## max_live_view_insert_blocks_before_refresh {#max-live-view-insert-blocks-before-refresh}
|
||||
|
||||
Sets the maximum number of inserted blocks after which mergeable blocks are dropped and query for [live view](../../sql-reference/statements/create/view.md#live-view) is re-executed.
|
||||
|
||||
Default value: `64`.
|
||||
|
||||
## temporary_live_view_timeout {#temporary-live-view-timeout}
|
||||
|
||||
Sets the interval in seconds after which [live view](../../sql-reference/statements/create/view.md#live-view) with timeout is deleted.
|
||||
|
||||
Default value: `5`.
|
||||
|
||||
## periodic_live_view_refresh {#periodic-live-view-refresh}
|
||||
|
||||
Sets the interval in seconds after which periodically refreshed [live view](../../sql-reference/statements/create/view.md#live-view) is forced to refresh.
|
||||
|
||||
Default value: `60`.
|
||||
|
||||
[Original article](https://clickhouse.tech/docs/en/operations/settings/settings/) <!-- hide -->
|
||||
|
@ -243,7 +243,7 @@ The function works according to the algorithm:
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
windowFunnel(window, [mode])(timestamp, cond1, cond2, ..., condN)
|
||||
windowFunnel(window, [mode, [mode, ... ]])(timestamp, cond1, cond2, ..., condN)
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
@ -253,9 +253,11 @@ windowFunnel(window, [mode])(timestamp, cond1, cond2, ..., condN)
|
||||
|
||||
**Parameters**
|
||||
|
||||
- `window` — Length of the sliding window. The unit of `window` depends on the `timestamp` itself and varies. Determined using the expression `timestamp of cond2 <= timestamp of cond1 + window`.
|
||||
- `mode` — It is an optional argument.
|
||||
- `'strict'` — When the `'strict'` is set, the windowFunnel() applies conditions only for the unique values.
|
||||
- `window` — Length of the sliding window, it is the time interval between first condition and last condition. The unit of `window` depends on the `timestamp` itself and varies. Determined using the expression `timestamp of cond1 <= timestamp of cond2 <= ... <= timestamp of condN <= timestamp of cond1 + window`.
|
||||
- `mode` — It is an optional argument. One or more modes can be set.
|
||||
- `'strict'` — If same condition holds for sequence of events then such non-unique events would be skipped.
|
||||
- `'strict_order'` — Don't allow interventions of other events. E.g. in the case of `A->B->D->C`, it stops finding `A->B->C` at the `D` and the max event level is 2.
|
||||
- `'strict_increase'` — Apply conditions only to events with strictly increasing timestamps.
|
||||
|
||||
**Returned value**
|
||||
|
||||
|
35
docs/en/sql-reference/functions/files.md
Normal file
35
docs/en/sql-reference/functions/files.md
Normal file
@ -0,0 +1,35 @@
|
||||
---
|
||||
toc_priority: 43
|
||||
toc_title: Files
|
||||
---
|
||||
|
||||
# Functions for Working with Files {#functions-for-working-with-files}
|
||||
|
||||
## file {#file}
|
||||
|
||||
Reads file as a String. The file content is not parsed, so any information is read as one string and placed into the specified column.
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
file(path)
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `path` — The relative path to the file from [user_files_path](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-user_files_path). Path to file support following wildcards: `*`, `?`, `{abc,def}` and `{N..M}` where `N`, `M` — numbers, `'abc', 'def'` — strings.
|
||||
|
||||
**Example**
|
||||
|
||||
Inserting data from files a.txt and b.txt into a table as strings:
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
INSERT INTO table SELECT file('a.txt'), file('b.txt');
|
||||
```
|
||||
|
||||
**See Also**
|
||||
|
||||
- [user_files_path](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-user_files_path)
|
||||
- [file](../table-functions/file.md)
|
@ -394,3 +394,55 @@ Result:
|
||||
└──────────────────┴────────────────────┘
|
||||
```
|
||||
|
||||
## isIPAddressInRange {#isipaddressinrange}
|
||||
|
||||
Determines if an IP address is contained in a network represented in the [CIDR](https://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) notation. Returns `1` if true, or `0` otherwise.
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
isIPAddressInRange(address, prefix)
|
||||
```
|
||||
|
||||
This function accepts both IPv4 and IPv6 addresses (and networks) represented as strings. It returns `0` if the IP version of the address and the CIDR don't match.
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `address` — An IPv4 or IPv6 address. [String](../../sql-reference/data-types/string.md).
|
||||
- `prefix` — An IPv4 or IPv6 network prefix in CIDR. [String](../../sql-reference/data-types/string.md).
|
||||
|
||||
**Returned value**
|
||||
|
||||
- `1` or `0`.
|
||||
|
||||
Type: [UInt8](../../sql-reference/data-types/int-uint.md).
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
SELECT isIPAddressInRange('127.0.0.1', '127.0.0.0/8')
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
┌─isIPAddressInRange('127.0.0.1', '127.0.0.0/8')─┐
|
||||
│ 1 │
|
||||
└────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
SELECT isIPAddressInRange('127.0.0.1', 'ffff::/16')
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
┌─isIPAddressInRange('127.0.0.1', 'ffff::/16')─┐
|
||||
│ 0 │
|
||||
└──────────────────────────────────────────────┘
|
||||
```
|
||||
|
@ -12,7 +12,9 @@ The search is case-sensitive by default in all these functions. There are separa
|
||||
|
||||
## position(haystack, needle), locate(haystack, needle) {#position}
|
||||
|
||||
Returns the position (in bytes) of the found substring in the string, starting from 1.
|
||||
Searches for the substring `needle` in the string `haystack`.
|
||||
|
||||
Returns the position (in bytes) of the found substring in the string, starting from 1.
|
||||
|
||||
For a case-insensitive search, use the function [positionCaseInsensitive](#positioncaseinsensitive).
|
||||
|
||||
@ -20,15 +22,22 @@ For a case-insensitive search, use the function [positionCaseInsensitive](#posit
|
||||
|
||||
``` sql
|
||||
position(haystack, needle[, start_pos])
|
||||
```
|
||||
```
|
||||
|
||||
``` sql
|
||||
position(needle IN haystack)
|
||||
```
|
||||
|
||||
Alias: `locate(haystack, needle[, start_pos])`.
|
||||
|
||||
!!! note "Note"
|
||||
Syntax of `position(needle IN haystack)` provides SQL-compatibility, the function works the same way as to `position(haystack, needle)`.
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `haystack` — String, in which substring will to be searched. [String](../../sql-reference/syntax.md#syntax-string-literal).
|
||||
- `needle` — Substring to be searched. [String](../../sql-reference/syntax.md#syntax-string-literal).
|
||||
- `start_pos` — Optional parameter, position of the first character in the string to start search. [UInt](../../sql-reference/data-types/int-uint.md).
|
||||
- `start_pos` – Position of the first character in the string to start search. [UInt](../../sql-reference/data-types/int-uint.md). Optional.
|
||||
|
||||
**Returned values**
|
||||
|
||||
@ -83,6 +92,36 @@ Result:
|
||||
└───────────────────────────────┘
|
||||
```
|
||||
|
||||
**Examples for POSITION(needle IN haystack) syntax**
|
||||
|
||||
Query:
|
||||
|
||||
```sql
|
||||
SELECT 3 = position('c' IN 'abc');
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```text
|
||||
┌─equals(3, position('abc', 'c'))─┐
|
||||
│ 1 │
|
||||
└─────────────────────────────────┘
|
||||
```
|
||||
|
||||
Query:
|
||||
|
||||
```sql
|
||||
SELECT 6 = position('/' IN s) FROM (SELECT 'Hello/World' AS s);
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```text
|
||||
┌─equals(6, position(s, '/'))─┐
|
||||
│ 1 │
|
||||
└─────────────────────────────┘
|
||||
```
|
||||
|
||||
## positionCaseInsensitive {#positioncaseinsensitive}
|
||||
|
||||
The same as [position](#position) returns the position (in bytes) of the found substring in the string, starting from 1. Use the function for a case-insensitive search.
|
||||
@ -772,4 +811,3 @@ Result:
|
||||
│ 2 │
|
||||
└───────────────────────────────┘
|
||||
```
|
||||
|
||||
|
@ -144,7 +144,7 @@ This query changes the `name` column properties:
|
||||
|
||||
- TTL
|
||||
|
||||
For examples of columns TTL modifying, see [Column TTL](../../engines/table_engines/mergetree_family/mergetree.md#mergetree-column-ttl).
|
||||
For examples of columns TTL modifying, see [Column TTL](../../../engines/table-engines/mergetree-family/mergetree.md#mergetree-column-ttl).
|
||||
|
||||
If the `IF EXISTS` clause is specified, the query won’t return an error if the column doesn’t exist.
|
||||
|
||||
|
@ -68,7 +68,7 @@ To delete a view, use [DROP VIEW](../../../sql-reference/statements/drop.md#drop
|
||||
|
||||
!!! important "Important"
|
||||
This is an experimental feature that may change in backwards-incompatible ways in the future releases.
|
||||
Enable usage of live views and `WATCH` query using `set allow_experimental_live_view = 1`.
|
||||
Enable usage of live views and `WATCH` query using [allow_experimental_live_view](../../../operations/settings/settings.md#allow-experimental-live-view) setting. Input the command `set allow_experimental_live_view = 1`.
|
||||
|
||||
|
||||
```sql
|
||||
@ -90,7 +90,9 @@ Live views work similarly to how a query in a distributed table works. But inste
|
||||
|
||||
See [WITH REFRESH](#live-view-with-refresh) to force periodic updates of a live view that in some cases can be used as a workaround.
|
||||
|
||||
You can watch for changes in the live view query result using the [WATCH](../../../sql-reference/statements/watch.md) query
|
||||
### Monitoring Changes {#live-view-monitoring}
|
||||
|
||||
You can monitor changes in the `LIVE VIEW` query result using [WATCH](../../../sql-reference/statements/watch.md) query.
|
||||
|
||||
```sql
|
||||
WATCH [db.]live_view
|
||||
@ -102,11 +104,10 @@ WATCH [db.]live_view
|
||||
CREATE TABLE mt (x Int8) Engine = MergeTree ORDER BY x;
|
||||
CREATE LIVE VIEW lv AS SELECT sum(x) FROM mt;
|
||||
```
|
||||
|
||||
Watch a live view while doing a parallel insert into the source table.
|
||||
|
||||
```sql
|
||||
WATCH lv
|
||||
WATCH lv;
|
||||
```
|
||||
|
||||
```bash
|
||||
@ -128,16 +129,16 @@ INSERT INTO mt VALUES (2);
|
||||
INSERT INTO mt VALUES (3);
|
||||
```
|
||||
|
||||
or add [EVENTS](../../../sql-reference/statements/watch.md#events-clause) clause to just get change events.
|
||||
Or add [EVENTS](../../../sql-reference/statements/watch.md#events-clause) clause to just get change events.
|
||||
|
||||
```sql
|
||||
WATCH [db.]live_view EVENTS
|
||||
WATCH [db.]live_view EVENTS;
|
||||
```
|
||||
|
||||
**Example:**
|
||||
|
||||
```sql
|
||||
WATCH lv EVENTS
|
||||
WATCH lv EVENTS;
|
||||
```
|
||||
|
||||
```bash
|
||||
@ -163,15 +164,15 @@ SELECT * FROM [db.]live_view WHERE ...
|
||||
|
||||
You can force live view refresh using the `ALTER LIVE VIEW [db.]table_name REFRESH` statement.
|
||||
|
||||
### With Timeout {#live-view-with-timeout}
|
||||
### WITH TIMEOUT Clause {#live-view-with-timeout}
|
||||
|
||||
When a live view is create with a `WITH TIMEOUT` clause then the live view will be dropped automatically after the specified number of seconds elapse since the end of the last [WATCH](../../../sql-reference/statements/watch.md) query that was watching the live view.
|
||||
When a live view is created with a `WITH TIMEOUT` clause then the live view will be dropped automatically after the specified number of seconds elapse since the end of the last [WATCH](../../../sql-reference/statements/watch.md) query that was watching the live view.
|
||||
|
||||
```sql
|
||||
CREATE LIVE VIEW [db.]table_name WITH TIMEOUT [value_in_sec] AS SELECT ...
|
||||
```
|
||||
|
||||
If the timeout value is not specified then the value specified by the `temporary_live_view_timeout` setting is used.
|
||||
If the timeout value is not specified then the value specified by the [temporary_live_view_timeout](../../../operations/settings/settings.md#temporary-live-view-timeout) setting is used.
|
||||
|
||||
**Example:**
|
||||
|
||||
@ -180,7 +181,7 @@ CREATE TABLE mt (x Int8) Engine = MergeTree ORDER BY x;
|
||||
CREATE LIVE VIEW lv WITH TIMEOUT 15 AS SELECT sum(x) FROM mt;
|
||||
```
|
||||
|
||||
### With Refresh {#live-view-with-refresh}
|
||||
### WITH REFRESH Clause {#live-view-with-refresh}
|
||||
|
||||
When a live view is created with a `WITH REFRESH` clause then it will be automatically refreshed after the specified number of seconds elapse since the last refresh or trigger.
|
||||
|
||||
@ -188,7 +189,7 @@ When a live view is created with a `WITH REFRESH` clause then it will be automat
|
||||
CREATE LIVE VIEW [db.]table_name WITH REFRESH [value_in_sec] AS SELECT ...
|
||||
```
|
||||
|
||||
If the refresh value is not specified then the value specified by the `periodic_live_view_refresh` setting is used.
|
||||
If the refresh value is not specified then the value specified by the [periodic_live_view_refresh](../../../operations/settings/settings.md#periodic-live-view-refresh) setting is used.
|
||||
|
||||
**Example:**
|
||||
|
||||
@ -231,7 +232,7 @@ WATCH lv
|
||||
Code: 60. DB::Exception: Received from localhost:9000. DB::Exception: Table default.lv doesn't exist..
|
||||
```
|
||||
|
||||
### Usage
|
||||
### Usage {#live-view-usage}
|
||||
|
||||
Most common uses of live view tables include:
|
||||
|
||||
@ -240,15 +241,4 @@ Most common uses of live view tables include:
|
||||
- Watching for table changes and triggering a follow-up select queries.
|
||||
- Watching metrics from system tables using periodic refresh.
|
||||
|
||||
### Settings {#live-view-settings}
|
||||
|
||||
You can use the following settings to control the behaviour of live views.
|
||||
|
||||
- `allow_experimental_live_view` - enable live views. Default is `0`.
|
||||
- `live_view_heartbeat_interval` - the heartbeat interval in seconds to indicate live query is alive. Default is `15` seconds.
|
||||
- `max_live_view_insert_blocks_before_refresh` - maximum number of inserted blocks after which
|
||||
mergeable blocks are dropped and query is re-executed. Default is `64` inserts.
|
||||
- `temporary_live_view_timeout` - interval after which live view with timeout is deleted. Default is `5` seconds.
|
||||
- `periodic_live_view_refresh` - interval after which periodically refreshed live view is forced to refresh. Default is `60` seconds.
|
||||
|
||||
[Original article](https://clickhouse.tech/docs/en/sql-reference/statements/create/view/) <!--hide-->
|
||||
|
@ -17,19 +17,21 @@ WATCH [db.]live_view
|
||||
[FORMAT format]
|
||||
```
|
||||
|
||||
The `WATCH` query performs continuous data retrieval from a [live view](./create/view.md#live-view) table. Unless the `LIMIT` clause is specified it provides an infinite stream of query results from a [live view](./create/view.md#live-view).
|
||||
The `WATCH` query performs continuous data retrieval from a [LIVE VIEW](./create/view.md#live-view) table. Unless the `LIMIT` clause is specified it provides an infinite stream of query results from a [LIVE VIEW](./create/view.md#live-view).
|
||||
|
||||
```sql
|
||||
WATCH [db.]live_view
|
||||
WATCH [db.]live_view [EVENTS] [LIMIT n] [FORMAT format]
|
||||
```
|
||||
|
||||
## Virtual columns {#watch-virtual-columns}
|
||||
|
||||
The virtual `_version` column in the query result indicates the current result version.
|
||||
|
||||
**Example:**
|
||||
|
||||
```sql
|
||||
CREATE LIVE VIEW lv WITH REFRESH 5 AS SELECT now();
|
||||
WATCH lv
|
||||
WATCH lv;
|
||||
```
|
||||
|
||||
```bash
|
||||
@ -47,6 +49,8 @@ WATCH lv
|
||||
|
||||
By default, the requested data is returned to the client, while in conjunction with [INSERT INTO](../../sql-reference/statements/insert-into.md) it can be forwarded to a different table.
|
||||
|
||||
**Example:**
|
||||
|
||||
```sql
|
||||
INSERT INTO [db.]table WATCH [db.]live_view ...
|
||||
```
|
||||
@ -56,14 +60,14 @@ INSERT INTO [db.]table WATCH [db.]live_view ...
|
||||
The `EVENTS` clause can be used to obtain a short form of the `WATCH` query where instead of the query result you will just get the latest query result version.
|
||||
|
||||
```sql
|
||||
WATCH [db.]live_view EVENTS
|
||||
WATCH [db.]live_view EVENTS;
|
||||
```
|
||||
|
||||
**Example:**
|
||||
|
||||
```sql
|
||||
CREATE LIVE VIEW lv WITH REFRESH 5 AS SELECT now();
|
||||
WATCH lv EVENTS
|
||||
WATCH lv EVENTS;
|
||||
```
|
||||
|
||||
```bash
|
||||
@ -78,17 +82,17 @@ WATCH lv EVENTS
|
||||
|
||||
## LIMIT Clause {#limit-clause}
|
||||
|
||||
The `LIMIT n` clause species the number of updates the `WATCH` query should wait for before terminating. By default there is no limit on the number of updates and therefore the query will not terminate. The value of `0` indicates that the `WATCH` query should not wait for any new query results and therefore will return immediately once query is evaluated.
|
||||
The `LIMIT n` clause specifies the number of updates the `WATCH` query should wait for before terminating. By default there is no limit on the number of updates and therefore the query will not terminate. The value of `0` indicates that the `WATCH` query should not wait for any new query results and therefore will return immediately once query result is evaluated.
|
||||
|
||||
```sql
|
||||
WATCH [db.]live_view LIMIT 1
|
||||
WATCH [db.]live_view LIMIT 1;
|
||||
```
|
||||
|
||||
**Example:**
|
||||
|
||||
```sql
|
||||
CREATE LIVE VIEW lv WITH REFRESH 5 AS SELECT now();
|
||||
WATCH lv EVENTS LIMIT 1
|
||||
WATCH lv EVENTS LIMIT 1;
|
||||
```
|
||||
|
||||
```bash
|
||||
@ -102,5 +106,4 @@ WATCH lv EVENTS LIMIT 1
|
||||
The `FORMAT` clause works the same way as for the [SELECT](../../sql-reference/statements/select/format.md#format-clause).
|
||||
|
||||
!!! info "Note"
|
||||
The [JSONEachRowWithProgress](../../interfaces/formats/#jsoneachrowwithprogress) format should be used when watching [live view](./create/view.md#live-view) tables over the HTTP interface. The progress messages will be added to the output to keep the long-lived HTTP connection alive until the query result changes. The interval between progress messages is controlled using the [live_view_heartbeat_interval](./create/view.md#live-view-settings) setting.
|
||||
|
||||
The [JSONEachRowWithProgress](../../interfaces/formats.md#jsoneachrowwithprogress) format should be used when watching [LIVE VIEW](./create/view.md#live-view) tables over the HTTP interface. The progress messages will be added to the output to keep the long-lived HTTP connection alive until the query result changes. The interval between progress messages is controlled using the [live_view_heartbeat_interval](./create/view.md#live-view-settings) setting.
|
||||
|
@ -759,6 +759,38 @@ log_queries_min_type='EXCEPTION_WHILE_PROCESSING'
|
||||
log_query_threads=1
|
||||
```
|
||||
|
||||
## log_comment {#settings-log-comment}
|
||||
|
||||
Задаёт значение поля `log_comment` таблицы [system.query_log](../system-tables/query_log.md) и текст комментария в логе сервера.
|
||||
|
||||
Может быть использована для улучшения читабельности логов сервера. Кроме того, помогает быстро выделить связанные с тестом запросы из `system.query_log` после запуска [clickhouse-test](../../development/tests.md).
|
||||
|
||||
Возможные значения:
|
||||
|
||||
- Любая строка не длиннее [max_query_size](#settings-max_query_size). При превышении длины сервер сгенерирует исключение.
|
||||
|
||||
Значение по умолчанию: пустая строка.
|
||||
|
||||
**Пример**
|
||||
|
||||
Запрос:
|
||||
|
||||
``` sql
|
||||
SET log_comment = 'log_comment test', log_queries = 1;
|
||||
SELECT 1;
|
||||
SYSTEM FLUSH LOGS;
|
||||
SELECT type, query FROM system.query_log WHERE log_comment = 'log_comment test' AND event_date >= yesterday() ORDER BY event_time DESC LIMIT 2;
|
||||
```
|
||||
|
||||
Результат:
|
||||
|
||||
``` text
|
||||
┌─type────────┬─query─────┐
|
||||
│ QueryStart │ SELECT 1; │
|
||||
│ QueryFinish │ SELECT 1; │
|
||||
└─────────────┴───────────┘
|
||||
```
|
||||
|
||||
## max_insert_block_size {#settings-max_insert_block_size}
|
||||
|
||||
Формировать блоки указанного размера, при вставке в таблицу.
|
||||
@ -2615,14 +2647,68 @@ SELECT * FROM test2;
|
||||
|
||||
Обратите внимание на то, что эта настройка влияет на поведение [материализованных представлений](../../sql-reference/statements/create/view.md#materialized) и БД [MaterializeMySQL](../../engines/database-engines/materialize-mysql.md).
|
||||
|
||||
## engine_file_empty_if_not_exists {#engine-file-empty_if-not-exists}
|
||||
|
||||
Включает или отключает возможность выполнять запрос `SELECT` к таблице на движке [File](../../engines/table-engines/special/file.md), не содержащей файл.
|
||||
|
||||
Возможные значения:
|
||||
- 0 — запрос `SELECT` генерирует исключение.
|
||||
- 1 — запрос `SELECT` возвращает пустой результат.
|
||||
|
||||
Значение по умолчанию: `0`.
|
||||
|
||||
## engine_file_truncate_on_insert {#engine-file-truncate-on-insert}
|
||||
|
||||
Включает или выключает удаление данных из таблицы до вставки в таблицу на движке [File](../../engines/table-engines/special/file.md).
|
||||
|
||||
Возможные значения:
|
||||
- 0 — запрос `INSERT` добавляет данные в конец файла после существующих.
|
||||
- 1 — `INSERT` удаляет имеющиеся в файле данные и замещает их новыми.
|
||||
|
||||
Значение по умолчанию: `0`.
|
||||
|
||||
## allow_experimental_geo_types {#allow-experimental-geo-types}
|
||||
|
||||
Разрешает использование экспериментальных типов данных для работы с [географическими структурами](../../sql-reference/data-types/geo.md).
|
||||
|
||||
Возможные значения:
|
||||
|
||||
- 0 — Использование типов данных для работы с географическими структурами не поддерживается.
|
||||
- 1 — Использование типов данных для работы с географическими структурами поддерживается.
|
||||
- 0 — использование типов данных для работы с географическими структурами не поддерживается.
|
||||
- 1 — использование типов данных для работы с географическими структурами поддерживается.
|
||||
|
||||
Значение по умолчанию: `0`.
|
||||
|
||||
## allow_experimental_live_view {#allow-experimental-live-view}
|
||||
|
||||
Включает экспериментальную возможность использования [LIVE-представлений](../../sql-reference/statements/create/view.md#live-view).
|
||||
|
||||
Возможные значения:
|
||||
- 0 — живые представления не поддерживаются.
|
||||
- 1 — живые представления поддерживаются.
|
||||
|
||||
Значение по умолчанию: `0`.
|
||||
|
||||
## live_view_heartbeat_interval {#live-view-heartbeat-interval}
|
||||
|
||||
Задает интервал в секундах для периодической проверки существования [LIVE VIEW](../../sql-reference/statements/create/view.md#live-view).
|
||||
|
||||
Значение по умолчанию: `15`.
|
||||
|
||||
## max_live_view_insert_blocks_before_refresh {#max-live-view-insert-blocks-before-refresh}
|
||||
|
||||
Задает наибольшее число вставок, после которых запрос на формирование [LIVE VIEW](../../sql-reference/statements/create/view.md#live-view) исполняется снова.
|
||||
|
||||
Значение по умолчанию: `64`.
|
||||
|
||||
## temporary_live_view_timeout {#temporary-live-view-timeout}
|
||||
|
||||
Задает время в секундах, после которого [LIVE VIEW](../../sql-reference/statements/create/view.md#live-view) удаляется.
|
||||
|
||||
Значение по умолчанию: `5`.
|
||||
|
||||
## periodic_live_view_refresh {#periodic-live-view-refresh}
|
||||
|
||||
Задает время в секундах, по истечении которого [LIVE VIEW](../../sql-reference/statements/create/view.md#live-view) с установленным автообновлением обновляется.
|
||||
|
||||
Значение по умолчанию: `60`.
|
||||
|
||||
[Оригинальная статья](https://clickhouse.tech/docs/ru/operations/settings/settings/) <!--hide-->
|
||||
|
@ -243,7 +243,7 @@ SELECT sequenceCount('(?1).*(?2)')(time, number = 1, number = 2) FROM t
|
||||
**Синтаксис**
|
||||
|
||||
``` sql
|
||||
windowFunnel(window, [mode])(timestamp, cond1, cond2, ..., condN)
|
||||
windowFunnel(window, [mode, [mode, ... ]])(timestamp, cond1, cond2, ..., condN)
|
||||
```
|
||||
|
||||
**Аргументы**
|
||||
@ -254,7 +254,10 @@ windowFunnel(window, [mode])(timestamp, cond1, cond2, ..., condN)
|
||||
**Параметры**
|
||||
|
||||
- `window` — ширина скользящего окна по времени. Единица измерения зависит от `timestamp` и может варьироваться. Должно соблюдаться условие `timestamp события cond2 <= timestamp события cond1 + window`.
|
||||
- `mode` — необязательный параметр. Если установлено значение `'strict'`, то функция `windowFunnel()` применяет условия только для уникальных значений.
|
||||
- `mode` — необязательный параметр. Может быть установленно несколько значений одновременно.
|
||||
- `'strict'` — не учитывать подряд идущие повторяющиеся события.
|
||||
- `'strict_order'` — запрещает посторонние события в искомой последовательности. Например, при поиске цепочки `A->B->C` в `A->B->D->C` поиск будет остановлен на `D` и функция вернет 2.
|
||||
- `'strict_increase'` — условия прменяются только для событий со строго возрастающими временными метками.
|
||||
|
||||
**Возвращаемое значение**
|
||||
|
||||
|
33
docs/ru/sql-reference/functions/files.md
Normal file
33
docs/ru/sql-reference/functions/files.md
Normal file
@ -0,0 +1,33 @@
|
||||
---
|
||||
toc_priority: 43
|
||||
toc_title: "Функции для работы с файлами"
|
||||
---
|
||||
|
||||
# Функции для работы с файлами {#funktsii-dlia-raboty-s-failami}
|
||||
|
||||
## file {#file}
|
||||
|
||||
Читает файл как строку. Содержимое файла не разбирается (не парсится) и записывается в указанную колонку в виде единой строки.
|
||||
|
||||
**Синтаксис**
|
||||
|
||||
``` sql
|
||||
file(path)
|
||||
```
|
||||
|
||||
**Аргументы**
|
||||
|
||||
- `path` — относительный путь до файла от [user_files_path](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-user_files_path). Путь к файлу может включать следующие символы подстановки и шаблоны: `*`, `?`, `{abc,def}` и `{N..M}`, где `N`, `M` — числа, `'abc', 'def'` — строки.
|
||||
|
||||
**Примеры**
|
||||
|
||||
Вставка данных из файлов a.txt и b.txt в таблицу в виде строк:
|
||||
|
||||
``` sql
|
||||
INSERT INTO table SELECT file('a.txt'), file('b.txt');
|
||||
```
|
||||
|
||||
**Смотрите также**
|
||||
|
||||
- [user_files_path](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-user_files_path)
|
||||
- [file](../table-functions/file.md)
|
@ -395,3 +395,54 @@ SELECT addr, isIPv6String(addr) FROM ( SELECT ['::', '1111::ffff', '::ffff:127.0
|
||||
└──────────────────┴────────────────────┘
|
||||
```
|
||||
|
||||
## isIPAddressInRange {#isipaddressinrange}
|
||||
|
||||
Проверяет попадает ли IP адрес в интервал, заданный в [CIDR](https://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) нотации.
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
isIPAddressInRange(address, prefix)
|
||||
```
|
||||
Функция принимает IPv4 или IPv6 адрес виде строки. Возвращает `0`, если версия адреса и интервала не совпадают.
|
||||
|
||||
**Аргументы**
|
||||
|
||||
- `address` — IPv4 или IPv6 адрес. [String](../../sql-reference/data-types/string.md).
|
||||
- `prefix` — IPv4 или IPv6 подсеть, заданная в CIDR нотации. [String](../../sql-reference/data-types/string.md).
|
||||
|
||||
**Возвращаемое значение**
|
||||
|
||||
- `1` или `0`.
|
||||
|
||||
Тип: [UInt8](../../sql-reference/data-types/int-uint.md).
|
||||
|
||||
**Примеры**
|
||||
|
||||
Запрос:
|
||||
|
||||
``` sql
|
||||
SELECT isIPAddressInRange('127.0.0.1', '127.0.0.0/8')
|
||||
```
|
||||
|
||||
Результат:
|
||||
|
||||
``` text
|
||||
┌─isIPAddressInRange('127.0.0.1', '127.0.0.0/8')─┐
|
||||
│ 1 │
|
||||
└────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
Запрос:
|
||||
|
||||
``` sql
|
||||
SELECT isIPAddressInRange('127.0.0.1', 'ffff::/16')
|
||||
```
|
||||
|
||||
Результат:
|
||||
|
||||
``` text
|
||||
┌─isIPAddressInRange('127.0.0.1', 'ffff::/16')─┐
|
||||
│ 0 │
|
||||
└──────────────────────────────────────────────┘
|
||||
```
|
||||
|
@ -672,7 +672,7 @@ neighbor(column, offset[, default_value])
|
||||
Функция может получить доступ к значению в столбце соседней строки только внутри обрабатываемого в данный момент блока данных.
|
||||
|
||||
Порядок строк, используемый при вычислении функции `neighbor`, может отличаться от порядка строк, возвращаемых пользователю.
|
||||
Чтобы этого не случилось, вы можете сделать подзапрос с [ORDER BY](../../sql-reference/statements/select/order-by.md) и вызвать функцию изне подзапроса.
|
||||
Чтобы этого не случилось, вы можете сделать подзапрос с [ORDER BY](../../sql-reference/statements/select/order-by.md) и вызвать функцию извне подзапроса.
|
||||
|
||||
**Аргументы**
|
||||
|
||||
|
@ -7,7 +7,7 @@ toc_title: "Функции поиска в строках"
|
||||
|
||||
Во всех функциях, поиск регистрозависимый по умолчанию. Существуют варианты функций для регистронезависимого поиска.
|
||||
|
||||
## position(haystack, needle) {#position}
|
||||
## position(haystack, needle), locate(haystack, needle) {#position}
|
||||
|
||||
Поиск подстроки `needle` в строке `haystack`.
|
||||
|
||||
@ -21,8 +21,15 @@ toc_title: "Функции поиска в строках"
|
||||
position(haystack, needle[, start_pos])
|
||||
```
|
||||
|
||||
``` sql
|
||||
position(needle IN haystack)
|
||||
```
|
||||
|
||||
Алиас: `locate(haystack, needle[, start_pos])`.
|
||||
|
||||
!!! note "Примечание"
|
||||
Синтаксис `position(needle IN haystack)` обеспечивает совместимость с SQL, функция работает так же, как `position(haystack, needle)`.
|
||||
|
||||
**Аргументы**
|
||||
|
||||
- `haystack` — строка, по которой выполняется поиск. [Строка](../syntax.md#syntax-string-literal).
|
||||
@ -70,6 +77,36 @@ SELECT position('Привет, мир!', '!');
|
||||
└───────────────────────────────┘
|
||||
```
|
||||
|
||||
**Примеры работы функции с синтаксисом POSITION(needle IN haystack)**
|
||||
|
||||
Запрос:
|
||||
|
||||
```sql
|
||||
SELECT 1 = position('абв' IN 'абв');
|
||||
```
|
||||
|
||||
Результат:
|
||||
|
||||
```text
|
||||
┌─equals(1, position('абв', 'абв'))─┐
|
||||
│ 1 │
|
||||
└───────────────────────────────────┘
|
||||
```
|
||||
|
||||
Запрос:
|
||||
|
||||
```sql
|
||||
SELECT 0 = position('абв' IN '');
|
||||
```
|
||||
|
||||
Результат:
|
||||
|
||||
```text
|
||||
┌─equals(0, position('', 'абв'))─┐
|
||||
│ 1 │
|
||||
└────────────────────────────────┘
|
||||
```
|
||||
|
||||
## positionCaseInsensitive {#positioncaseinsensitive}
|
||||
|
||||
Такая же, как и [position](#position), но работает без учета регистра. Возвращает позицию в байтах найденной подстроки в строке, начиная с 1.
|
||||
@ -758,4 +795,3 @@ SELECT countSubstringsCaseInsensitiveUTF8('аБв__АбВ__абв', 'Абв');
|
||||
│ 3 │
|
||||
└────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
|
@ -13,7 +13,7 @@ toc_title: "Представление"
|
||||
CREATE [OR REPLACE] VIEW [IF NOT EXISTS] [db.]table_name [ON CLUSTER] AS SELECT ...
|
||||
```
|
||||
|
||||
Обычные представления не хранят никаких данных, они выполняют чтение данных из другой таблицы при каждом доступе. Другими словами, обычное представление - это не что иное, как сохраненный запрос. При чтении данных из представления этот сохраненный запрос используется как подзапрос в секции [FROM](../../../sql-reference/statements/select/from.md).
|
||||
Обычные представления не хранят никаких данных, они выполняют чтение данных из другой таблицы при каждом доступе. Другими словами, обычное представление — это не что иное, как сохраненный запрос. При чтении данных из представления этот сохраненный запрос используется как подзапрос в секции [FROM](../../../sql-reference/statements/select/from.md).
|
||||
|
||||
Для примера, пусть вы создали представление:
|
||||
|
||||
@ -43,12 +43,12 @@ CREATE MATERIALIZED VIEW [IF NOT EXISTS] [db.]table_name [ON CLUSTER] [TO[db.]na
|
||||
|
||||
При создании материализованного представления без использования `TO [db].[table]`, нужно обязательно указать `ENGINE` - движок таблицы для хранения данных.
|
||||
|
||||
При создании материализованного представления с испольованием `TO [db].[table]`, нельзя указывать `POPULATE`
|
||||
При создании материализованного представления с испольованием `TO [db].[table]`, нельзя указывать `POPULATE`.
|
||||
|
||||
Материализованное представление устроено следующим образом: при вставке данных в таблицу, указанную в SELECT-е, кусок вставляемых данных преобразуется этим запросом SELECT, и полученный результат вставляется в представление.
|
||||
|
||||
!!! important "Важно"
|
||||
Материализованные представлени в ClickHouse больше похожи на `after insert` триггеры. Если в запросе материализованного представления есть агрегирование, оно применяется только к вставляемому блоку записей. Любые изменения существующих данных исходной таблицы (например обновление, удаление, удаление раздела и т.д.) не изменяют материализованное представление.
|
||||
Материализованные представления в ClickHouse больше похожи на `after insert` триггеры. Если в запросе материализованного представления есть агрегирование, оно применяется только к вставляемому блоку записей. Любые изменения существующих данных исходной таблицы (например обновление, удаление, удаление раздела и т.д.) не изменяют материализованное представление.
|
||||
|
||||
Если указано `POPULATE`, то при создании представления, в него будут вставлены имеющиеся данные таблицы, как если бы был сделан запрос `CREATE TABLE ... AS SELECT ...` . Иначе, представление будет содержать только данные, вставляемые в таблицу после создания представления. Не рекомендуется использовать POPULATE, так как вставляемые в таблицу данные во время создания представления, не попадут в него.
|
||||
|
||||
@ -56,9 +56,177 @@ CREATE MATERIALIZED VIEW [IF NOT EXISTS] [db.]table_name [ON CLUSTER] [TO[db.]na
|
||||
|
||||
Недоработано выполнение запросов `ALTER` над материализованными представлениями, поэтому они могут быть неудобными для использования. Если материализованное представление использует конструкцию `TO [db.]name`, то можно выполнить `DETACH` представления, `ALTER` для целевой таблицы и последующий `ATTACH` ранее отсоединенного (`DETACH`) представления.
|
||||
|
||||
Обратите внимание, что работа материлизованного представления находится под влиянием настройки [optimize_on_insert](../../../operations/settings/settings.md#optimize-on-insert). Перед вставкой данных в таблицу происходит их слияние.
|
||||
Обратите внимание, что работа материализованного представления находится под влиянием настройки [optimize_on_insert](../../../operations/settings/settings.md#optimize-on-insert). Перед вставкой данных в таблицу происходит их слияние.
|
||||
|
||||
Представления выглядят так же, как обычные таблицы. Например, они перечисляются в результате запроса `SHOW TABLES`.
|
||||
|
||||
Чтобы удалить представление, следует использовать [DROP VIEW](../../../sql-reference/statements/drop.md#drop-view). Впрочем, `DROP TABLE` тоже работает для представлений.
|
||||
|
||||
## LIVE-представления {#live-view}
|
||||
|
||||
!!! important "Важно"
|
||||
Представления `LIVE VIEW` являются экспериментальной возможностью. Их использование может повлечь потерю совместимости в будущих версиях.
|
||||
Чтобы использовать `LIVE VIEW` и запросы `WATCH`, включите настройку [allow_experimental_live_view](../../../operations/settings/settings.md#allow-experimental-live-view).
|
||||
|
||||
```sql
|
||||
CREATE LIVE VIEW [IF NOT EXISTS] [db.]table_name [WITH [TIMEOUT [value_in_sec] [AND]] [REFRESH [value_in_sec]]] AS SELECT ...
|
||||
```
|
||||
`LIVE VIEW` хранит результат запроса [SELECT](../../../sql-reference/statements/select/index.md), указанного при создании, и обновляется сразу же при изменении этого результата. Конечный результат запроса и промежуточные данные, из которых формируется результат, хранятся в оперативной памяти, и это обеспечивает высокую скорость обработки для повторяющихся запросов. LIVE-представления могут отправлять push-уведомления при изменении результата исходного запроса `SELECT`. Для этого используйте запрос [WATCH](../../../sql-reference/statements/watch.md).
|
||||
|
||||
Изменение `LIVE VIEW` запускается при вставке данных в таблицу, указанную в исходном запросе `SELECT`.
|
||||
|
||||
LIVE-представления работают по тому же принципу, что и распределенные таблицы. Но вместо объединения отдельных частей данных с разных серверов, LIVE-представления объединяют уже имеющийся результат с новыми данными. Если в исходном запросе LIVE-представления есть вложенный подзапрос, его результаты не кешируются, в кеше хранится только результат основного запроса.
|
||||
|
||||
!!! info "Ограничения"
|
||||
- [Табличные функции](../../../sql-reference/table-functions/index.md) в основном запросе не поддерживаются.
|
||||
- Таблицы, не поддерживающие изменение с помощью запроса `INSERT`, такие как [словари](../../../sql-reference/dictionaries/index.md) и [системные таблицы](../../../operations/system-tables/index.md), а также [нормальные представления](#normal) или [материализованные представления](#materialized), не запускают обновление LIVE-представления.
|
||||
- В LIVE-представлениях могут использоваться только такие запросы, которые объединяют результаты по старым и новым данным. LIVE-представления не работают с запросами, требующими полного пересчета данных или агрегирования с сохранением состояния.
|
||||
- `LIVE VIEW` не работает для реплицируемых и распределенных таблиц, добавление данных в которые происходит на разных узлах.
|
||||
- `LIVE VIEW` не обновляется, если в исходном запросе используются несколько таблиц.
|
||||
|
||||
В случаях, когда `LIVE VIEW` не обновляется автоматически, чтобы обновлять его принудительно с заданной периодичностью, используйте [WITH REFRESH](#live-view-with-refresh).
|
||||
|
||||
### Отслеживание изменений {#live-view-monitoring}
|
||||
|
||||
Для отслеживания изменений LIVE-представления используйте запрос [WATCH](../../../sql-reference/statements/watch.md).
|
||||
|
||||
|
||||
**Пример:**
|
||||
|
||||
```sql
|
||||
CREATE TABLE mt (x Int8) Engine = MergeTree ORDER BY x;
|
||||
CREATE LIVE VIEW lv AS SELECT sum(x) FROM mt;
|
||||
```
|
||||
Отслеживаем изменения LIVE-представления при вставке данных в исходную таблицу.
|
||||
|
||||
```sql
|
||||
WATCH lv;
|
||||
```
|
||||
|
||||
```bash
|
||||
┌─sum(x)─┬─_version─┐
|
||||
│ 1 │ 1 │
|
||||
└────────┴──────────┘
|
||||
┌─sum(x)─┬─_version─┐
|
||||
│ 2 │ 2 │
|
||||
└────────┴──────────┘
|
||||
┌─sum(x)─┬─_version─┐
|
||||
│ 6 │ 3 │
|
||||
└────────┴──────────┘
|
||||
...
|
||||
```
|
||||
|
||||
```sql
|
||||
INSERT INTO mt VALUES (1);
|
||||
INSERT INTO mt VALUES (2);
|
||||
INSERT INTO mt VALUES (3);
|
||||
```
|
||||
|
||||
Для получения списка изменений используйте ключевое слово [EVENTS](../../../sql-reference/statements/watch.md#events-clause).
|
||||
|
||||
|
||||
```sql
|
||||
WATCH lv EVENTS;
|
||||
```
|
||||
|
||||
```bash
|
||||
┌─version─┐
|
||||
│ 1 │
|
||||
└─────────┘
|
||||
┌─version─┐
|
||||
│ 2 │
|
||||
└─────────┘
|
||||
┌─version─┐
|
||||
│ 3 │
|
||||
└─────────┘
|
||||
...
|
||||
```
|
||||
|
||||
Для работы с LIVE-представлениями, как и с любыми другими, можно использовать запросы [SELECT](../../../sql-reference/statements/select/index.md). Если результат запроса кеширован, он будет возвращен немедленно, без обращения к исходным таблицам представления.
|
||||
|
||||
```sql
|
||||
SELECT * FROM [db.]live_view WHERE ...
|
||||
```
|
||||
|
||||
### Принудительное обновление {#live-view-alter-refresh}
|
||||
|
||||
Чтобы принудительно обновить LIVE-представление, используйте запрос `ALTER LIVE VIEW [db.]table_name REFRESH`.
|
||||
|
||||
### Секция WITH TIMEOUT {#live-view-with-timeout}
|
||||
|
||||
LIVE-представление, созданное с параметром `WITH TIMEOUT`, будет автоматически удалено через определенное количество секунд с момента предыдущего запроса [WATCH](../../../sql-reference/statements/watch.md), примененного к данному LIVE-представлению.
|
||||
|
||||
```sql
|
||||
CREATE LIVE VIEW [db.]table_name WITH TIMEOUT [value_in_sec] AS SELECT ...
|
||||
```
|
||||
|
||||
Если временной промежуток не указан, используется значение настройки [temporary_live_view_timeout](../../../operations/settings/settings.md#temporary-live-view-timeout).
|
||||
|
||||
**Пример:**
|
||||
|
||||
```sql
|
||||
CREATE TABLE mt (x Int8) Engine = MergeTree ORDER BY x;
|
||||
CREATE LIVE VIEW lv WITH TIMEOUT 15 AS SELECT sum(x) FROM mt;
|
||||
```
|
||||
|
||||
### Секция WITH REFRESH {#live-view-with-refresh}
|
||||
|
||||
LIVE-представление, созданное с параметром `WITH REFRESH`, будет автоматически обновляться через указанные промежутки времени, начиная с момента последнего обновления.
|
||||
|
||||
```sql
|
||||
CREATE LIVE VIEW [db.]table_name WITH REFRESH [value_in_sec] AS SELECT ...
|
||||
```
|
||||
|
||||
Если значение временного промежутка не задано, используется значение [periodic_live_view_refresh](../../../operations/settings/settings.md#periodic-live-view-refresh).
|
||||
|
||||
**Пример:**
|
||||
|
||||
```sql
|
||||
CREATE LIVE VIEW lv WITH REFRESH 5 AS SELECT now();
|
||||
WATCH lv;
|
||||
```
|
||||
|
||||
```bash
|
||||
┌───────────────now()─┬─_version─┐
|
||||
│ 2021-02-21 08:47:05 │ 1 │
|
||||
└─────────────────────┴──────────┘
|
||||
┌───────────────now()─┬─_version─┐
|
||||
│ 2021-02-21 08:47:10 │ 2 │
|
||||
└─────────────────────┴──────────┘
|
||||
┌───────────────now()─┬─_version─┐
|
||||
│ 2021-02-21 08:47:15 │ 3 │
|
||||
└─────────────────────┴──────────┘
|
||||
```
|
||||
|
||||
Параметры `WITH TIMEOUT` и `WITH REFRESH` можно сочетать с помощью `AND`.
|
||||
|
||||
```sql
|
||||
CREATE LIVE VIEW [db.]table_name WITH TIMEOUT [value_in_sec] AND REFRESH [value_in_sec] AS SELECT ...
|
||||
```
|
||||
|
||||
**Пример:**
|
||||
|
||||
```sql
|
||||
CREATE LIVE VIEW lv WITH TIMEOUT 15 AND REFRESH 5 AS SELECT now();
|
||||
```
|
||||
|
||||
По истечении 15 секунд представление будет автоматически удалено, если нет активного запроса `WATCH`.
|
||||
|
||||
```sql
|
||||
WATCH lv;
|
||||
```
|
||||
|
||||
```
|
||||
Code: 60. DB::Exception: Received from localhost:9000. DB::Exception: Table default.lv doesn't exist..
|
||||
```
|
||||
|
||||
### Использование {#live-view-usage}
|
||||
|
||||
Наиболее частые случаи использования `LIVE-VIEW`:
|
||||
|
||||
- Получение push-уведомлений об изменениях данных без дополнительных периодических запросов.
|
||||
- Кеширование результатов часто используемых запросов для получения их без задержки.
|
||||
- Отслеживание изменений таблицы для запуска других запросов `SELECT`.
|
||||
- Отслеживание показателей из системных таблиц с помощью периодических обновлений.
|
||||
|
||||
[Оригинальная статья](https://clickhouse.tech/docs/ru/sql-reference/statements/create/view) <!--hide-->
|
||||
|
106
docs/ru/sql-reference/statements/watch.md
Normal file
106
docs/ru/sql-reference/statements/watch.md
Normal file
@ -0,0 +1,106 @@
|
||||
---
|
||||
toc_priority: 53
|
||||
toc_title: WATCH
|
||||
---
|
||||
|
||||
# Запрос WATCH {#watch}
|
||||
|
||||
!!! important "Важно"
|
||||
Это экспериментальная функция. Она может повлечь потерю совместимости в будущих версиях.
|
||||
Чтобы использовать `LIVE VIEW` и запросы `WATCH`, включите настройку `set allow_experimental_live_view = 1`.
|
||||
|
||||
**Синтаксис**
|
||||
|
||||
``` sql
|
||||
WATCH [db.]live_view [EVENTS] [LIMIT n] [FORMAT format]
|
||||
```
|
||||
|
||||
Запрос `WATCH` постоянно возвращает содержимое [LIVE-представления](./create/view.md#live-view). Если параметр `LIMIT` не был задан, запрос `WATCH` будет непрерывно обновлять содержимое [LIVE-представления](./create/view.md#live-view).
|
||||
|
||||
```sql
|
||||
WATCH [db.]live_view;
|
||||
```
|
||||
## Виртуальные столбцы {#watch-virtual-columns}
|
||||
|
||||
Виртуальный столбец `_version` в результате запроса обозначает версию данного результата.
|
||||
|
||||
**Пример:**
|
||||
|
||||
```sql
|
||||
CREATE LIVE VIEW lv WITH REFRESH 5 AS SELECT now();
|
||||
WATCH lv;
|
||||
```
|
||||
|
||||
```bash
|
||||
┌───────────────now()─┬─_version─┐
|
||||
│ 2021-02-21 09:17:21 │ 1 │
|
||||
└─────────────────────┴──────────┘
|
||||
┌───────────────now()─┬─_version─┐
|
||||
│ 2021-02-21 09:17:26 │ 2 │
|
||||
└─────────────────────┴──────────┘
|
||||
┌───────────────now()─┬─_version─┐
|
||||
│ 2021-02-21 09:17:31 │ 3 │
|
||||
└─────────────────────┴──────────┘
|
||||
...
|
||||
```
|
||||
|
||||
По умолчанию запрашиваемые данные возвращаются клиенту, однако в сочетании с запросом [INSERT INTO](../../sql-reference/statements/insert-into.md) они могут быть перенаправлены для вставки в другую таблицу.
|
||||
|
||||
**Пример:**
|
||||
|
||||
```sql
|
||||
INSERT INTO [db.]table WATCH [db.]live_view ...
|
||||
```
|
||||
|
||||
## Секция EVENTS {#events-clause}
|
||||
|
||||
С помощью параметра `EVENTS` можно получить компактную форму результата запроса `WATCH`. Вместо полного результата вы получаете номер последней версии результата.
|
||||
|
||||
```sql
|
||||
WATCH [db.]live_view EVENTS;
|
||||
```
|
||||
|
||||
**Пример:**
|
||||
|
||||
```sql
|
||||
CREATE LIVE VIEW lv WITH REFRESH 5 AS SELECT now();
|
||||
WATCH lv EVENTS;
|
||||
```
|
||||
|
||||
```bash
|
||||
┌─version─┐
|
||||
│ 1 │
|
||||
└─────────┘
|
||||
┌─version─┐
|
||||
│ 2 │
|
||||
└─────────┘
|
||||
...
|
||||
```
|
||||
|
||||
## Секция LIMIT {#limit-clause}
|
||||
|
||||
Параметр `LIMIT n` задает количество обновлений запроса `WATCH`, после которого отслеживание прекращается. По умолчанию это число не задано, поэтому запрос будет выполняться постоянно. Значение `LIMIT 0` означает, что запрос `WATCH` вернет единственный актуальный результат запроса и прекратит отслеживание.
|
||||
|
||||
```sql
|
||||
WATCH [db.]live_view LIMIT 1;
|
||||
```
|
||||
|
||||
**Пример:**
|
||||
|
||||
```sql
|
||||
CREATE LIVE VIEW lv WITH REFRESH 5 AS SELECT now();
|
||||
WATCH lv EVENTS LIMIT 1;
|
||||
```
|
||||
|
||||
```bash
|
||||
┌─version─┐
|
||||
│ 1 │
|
||||
└─────────┘
|
||||
```
|
||||
|
||||
## Секция FORMAT {#format-clause}
|
||||
|
||||
Параметр `FORMAT` работает аналогично одноименному параметру запроса [SELECT](../../sql-reference/statements/select/format.md#format-clause).
|
||||
|
||||
!!! info "Примечание"
|
||||
При отслеживании [LIVE VIEW](./create/view.md#live-view) через интерфейс HTTP следует использовать формат [JSONEachRowWithProgress](../../interfaces/formats.md#jsoneachrowwithprogress). Постоянные сообщения об изменениях будут добавлены в поток вывода для поддержания активности долговременного HTTP-соединения до тех пор, пока результат запроса изменяется. Проомежуток времени между сообщениями об изменениях управляется настройкой[live_view_heartbeat_interval](./create/view.md#live-view-settings).
|
@ -23,7 +23,6 @@ nltk==3.5
|
||||
nose==1.3.7
|
||||
protobuf==3.14.0
|
||||
numpy==1.19.2
|
||||
Pygments==2.5.2
|
||||
pymdown-extensions==8.0
|
||||
python-slugify==4.0.1
|
||||
PyYAML==5.4.1
|
||||
@ -36,3 +35,4 @@ termcolor==1.1.0
|
||||
tornado==6.1
|
||||
Unidecode==1.1.1
|
||||
urllib3==1.25.10
|
||||
Pygments>=2.7.4
|
||||
|
38
docs/zh/faq/terms_translation_zh.md
Normal file
38
docs/zh/faq/terms_translation_zh.md
Normal file
@ -0,0 +1,38 @@
|
||||
# 术语翻译约定
|
||||
本文档用来维护从英文翻译成中文的术语集。
|
||||
|
||||
|
||||
|
||||
## 保持英文,不译
|
||||
Parquet
|
||||
|
||||
## 英文 <-> 中文
|
||||
Integer 整数
|
||||
floating-point 浮点数
|
||||
Fitting 拟合
|
||||
Decimal 定点数
|
||||
Tuple 元组
|
||||
function 函数
|
||||
array 数组/阵列
|
||||
hash 哈希/散列
|
||||
Parameters 参数
|
||||
Arguments 参数
|
||||
|
||||
|
||||
##
|
||||
1. 对于array的翻译,保持初始翻译 数组/阵列 不变。
|
||||
|
||||
2. 对于倒装句。翻译时非直译,会调整语序。
|
||||
比如, groupArrayInsertAt 翻译中
|
||||
|
||||
``` text
|
||||
- `x` — [Expression] resulting in one of the [supported data types].
|
||||
```
|
||||
|
||||
``` text
|
||||
`x` — 生成所[支持的数据类型](数据)的[表达式]。
|
||||
```
|
||||
|
||||
3. See also 参见
|
||||
|
||||
|
@ -1,7 +1,5 @@
|
||||
---
|
||||
machine_translated: true
|
||||
machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd
|
||||
toc_folder_title: "\u5BFC\u8A00"
|
||||
toc_folder_title: 快速上手
|
||||
toc_priority: 2
|
||||
---
|
||||
|
||||
@ -9,7 +7,7 @@ toc_priority: 2
|
||||
|
||||
如果您是ClickHouse的新手,并希望亲身体验它的性能。
|
||||
|
||||
首先需要进行 [环境安装与部署](install.md).
|
||||
首先需要完成 [安装与部署](install.md).
|
||||
|
||||
之后,您可以通过教程与示例数据完成自己的入门第一步:
|
||||
|
||||
|
@ -238,6 +238,6 @@ FROM
|
||||
```
|
||||
|
||||
!!! note "注"
|
||||
查看函数说明 [avg()](../sql-reference/aggregate-functions/reference.md#agg_function-avg) 和 [log()](../sql-reference/functions/math-functions.md) 。
|
||||
查看函数说明 [avg()](../sql-reference/aggregate-functions/reference/avg.md#agg_function-avg) 和 [log()](../sql-reference/functions/math-functions.md) 。
|
||||
|
||||
[原始文章](https://clickhouse.tech/docs/en/guides/apply_catboost_model/) <!--hide-->
|
||||
|
@ -1,5 +1,5 @@
|
||||
---
|
||||
toc_folder_title: 引言
|
||||
toc_folder_title: 简介
|
||||
toc_priority: 1
|
||||
---
|
||||
|
||||
|
@ -988,15 +988,15 @@ ClickHouse生成异常
|
||||
|
||||
## count_distinct_implementation {#settings-count_distinct_implementation}
|
||||
|
||||
指定其中的 `uniq*` 函数应用于执行 [COUNT(DISTINCT …)](../../sql-reference/aggregate-functions/reference.md#agg_function-count) 建筑。
|
||||
指定其中的 `uniq*` 函数应用于执行 [COUNT(DISTINCT …)](../../sql-reference/aggregate-functions/reference/count.md#agg_function-count) 建筑。
|
||||
|
||||
可能的值:
|
||||
|
||||
- [uniq](../../sql-reference/aggregate-functions/reference.md#agg_function-uniq)
|
||||
- [uniqCombined](../../sql-reference/aggregate-functions/reference.md#agg_function-uniqcombined)
|
||||
- [uniqCombined64](../../sql-reference/aggregate-functions/reference.md#agg_function-uniqcombined64)
|
||||
- [uniqHLL12](../../sql-reference/aggregate-functions/reference.md#agg_function-uniqhll12)
|
||||
- [uniqExact](../../sql-reference/aggregate-functions/reference.md#agg_function-uniqexact)
|
||||
- [uniq](../../sql-reference/aggregate-functions/reference/uniq.md#agg_function-uniq)
|
||||
- [uniqCombined](../../sql-reference/aggregate-functions/reference/uniqcombined.md#agg_function-uniqcombined)
|
||||
- [uniqCombined64](../../sql-reference/aggregate-functions/reference/uniqcombined64.md#agg_function-uniqcombined64)
|
||||
- [uniqHLL12](../../sql-reference/aggregate-functions/reference/uniqhll12.md#agg_function-uniqhll12)
|
||||
- [uniqExact](../../sql-reference/aggregate-functions/reference/uniqexact.md#agg_function-uniqexact)
|
||||
|
||||
默认值: `uniqExact`.
|
||||
|
||||
|
@ -1,24 +1,8 @@
|
||||
# 使用建议 {#usage-recommendations}
|
||||
|
||||
## CPU {#cpu}
|
||||
## CPU频率调节器 {#cpu-scaling-governor}
|
||||
|
||||
必须支持SSE4.2指令集。 现代处理器(自2008年以来)支持它。
|
||||
|
||||
选择处理器时,与较少的内核和较高的时钟速率相比,更喜欢大量内核和稍慢的时钟速率。
|
||||
例如,具有2600MHz的16核心比具有3600MHz的8核心更好。
|
||||
|
||||
## 超线程 {#hyper-threading}
|
||||
|
||||
不要禁用超线程。 它有助于某些查询,但不适用于其他查询。
|
||||
|
||||
## 超频 {#turbo-boost}
|
||||
|
||||
强烈推荐超频(turbo-boost)。 它显着提高了典型负载的性能。
|
||||
您可以使用 `turbostat` 要查看负载下的CPU的实际时钟速率。
|
||||
|
||||
## CPU缩放调控器 {#cpu-scaling-governor}
|
||||
|
||||
始终使用 `performance` 缩放调控器。 该 `on-demand` 随着需求的不断增加,缩放调节器的工作要糟糕得多。
|
||||
始终使用 `performance` 频率调节器。 `on-demand` 频率调节器在持续高需求的情况下,效果更差。
|
||||
|
||||
``` bash
|
||||
echo 'performance' | sudo tee /sys/devices/system/cpu/cpu*/cpufreq/scaling_governor
|
||||
@ -26,68 +10,70 @@ echo 'performance' | sudo tee /sys/devices/system/cpu/cpu*/cpufreq/scaling_gover
|
||||
|
||||
## CPU限制 {#cpu-limitations}
|
||||
|
||||
处理器可能会过热。 使用 `dmesg` 看看CPU的时钟速率是否由于过热而受到限制。
|
||||
此限制也可以在数据中心级别的外部设置。 您可以使用 `turbostat` 在负载下监视它。
|
||||
处理器可能会过热。 使用 `dmesg` 查看CPU的时钟速率是否由于过热而受到限制。
|
||||
该限制也可以在数据中心级别外部设置。 您可以使用 `turbostat` 在负载下对其进行监控。
|
||||
|
||||
## RAM {#ram}
|
||||
|
||||
对于少量数据(高达-200GB压缩),最好使用与数据量一样多的内存。
|
||||
对于大量数据和处理交互式(在线)查询时,应使用合理数量的RAM(128GB或更多),以便热数据子集适合页面缓存。
|
||||
即使对于每台服务器约50TB的数据量,使用128GB的RAM与64GB相比显着提高了查询性能。
|
||||
对于少量数据(压缩后约200GB),最好使用与数据量一样多的内存。
|
||||
对于大量数据,以及在处理交互式(在线)查询时,应使用合理数量的RAM(128GB或更多),以便热数据子集适合页面缓存。
|
||||
即使对于每台服务器约50TB的数据量,与64GB相比,使用128GB的RAM也可以显着提高查询性能。
|
||||
|
||||
## 交换文件 {#swap-file}
|
||||
不要禁用 overcommit。`cat /proc/sys/vm/overcommit_memory` 的值应该为0或1。运行
|
||||
|
||||
始终禁用交换文件。 不这样做的唯一原因是,如果您使用的ClickHouse在您的个人笔记本电脑。
|
||||
``` bash
|
||||
$ echo 0 | sudo tee /proc/sys/vm/overcommit_memory
|
||||
```
|
||||
|
||||
## 大页(Huge Pages) {#huge-pages}
|
||||
|
||||
始终禁用透明大页(transparent huge pages)。 它会干扰内存分alloc,从而导致显着的性能下降。
|
||||
始终禁用透明大页(transparent huge pages)。 它会干扰内存分配器,从而导致显着的性能下降。
|
||||
|
||||
``` bash
|
||||
echo 'never' | sudo tee /sys/kernel/mm/transparent_hugepage/enabled
|
||||
```
|
||||
|
||||
使用 `perf top` 观察内核中用于内存管理的时间。
|
||||
使用 `perf top` 来查看内核在内存管理上花费的时间。
|
||||
永久大页(permanent huge pages)也不需要被分配。
|
||||
|
||||
## 存储系统 {#storage-subsystem}
|
||||
## 存储子系统 {#storage-subsystem}
|
||||
|
||||
如果您的预算允许您使用SSD,请使用SSD。
|
||||
如果没有,请使用硬盘。 SATA硬盘7200转就行了。
|
||||
|
||||
优先选择带有本地硬盘驱动器的大量服务器,而不是带有附加磁盘架的小量服务器。
|
||||
但是对于存储具有罕见查询的档案,货架将起作用。
|
||||
优先选择许多带有本地硬盘驱动器的服务器,而不是少量带有附加磁盘架的服务器。
|
||||
但是对于存储极少查询的档案,架子可以使用。
|
||||
|
||||
## RAID {#raid}
|
||||
|
||||
当使用硬盘,你可以结合他们的RAID-10,RAID-5,RAID-6或RAID-50。
|
||||
对于Linux,软件RAID更好(与 `mdadm`). 我们不建议使用LVM。
|
||||
对于Linux,软件RAID更好(使用 `mdadm`). 我们不建议使用LVM。
|
||||
当创建RAID-10,选择 `far` 布局。
|
||||
如果您的预算允许,请选择RAID-10。
|
||||
|
||||
如果您有超过4个磁盘,请使用RAID-6(首选)或RAID-50,而不是RAID-5。
|
||||
如果您有4个以上的磁盘,请使用RAID-6(首选)或RAID-50,而不是RAID-5。
|
||||
当使用RAID-5、RAID-6或RAID-50时,始终增加stripe_cache_size,因为默认值通常不是最佳选择。
|
||||
|
||||
``` bash
|
||||
echo 4096 | sudo tee /sys/block/md2/md/stripe_cache_size
|
||||
```
|
||||
|
||||
使用以下公式,从设备数量和块大小计算确切数量: `2 * num_devices * chunk_size_in_bytes / 4096`.
|
||||
使用以下公式从设备数量和块大小中计算出确切的数量: `2 * num_devices * chunk_size_in_bytes / 4096`。
|
||||
|
||||
1025KB的块大小足以满足所有RAID配置。
|
||||
1024KB的块大小足以满足所有RAID配置。
|
||||
切勿将块大小设置得太小或太大。
|
||||
|
||||
您可以在SSD上使用RAID-0。
|
||||
无论使用何种RAID,始终使用复制来保证数据安全。
|
||||
无论使用哪种RAID,始终使用复制来保证数据安全。
|
||||
|
||||
使用长队列启用NCQ。 对于HDD,选择CFQ调度程序,对于SSD,选择noop。 不要减少 ‘readahead’ 设置。
|
||||
启用有长队列的NCQ。 对于HDD,选择CFQ调度程序,对于SSD,选择noop。 不要减少 ‘readahead’ 设置。
|
||||
对于HDD,启用写入缓存。
|
||||
|
||||
## 文件系统 {#file-system}
|
||||
|
||||
Ext4是最可靠的选择。 设置挂载选项 `noatime, nobarrier`.
|
||||
XFS也是合适的,但它还没有经过ClickHouse的彻底测试。
|
||||
大多数其他文件系统也应该正常工作。 具有延迟分配的文件系统工作得更好。
|
||||
XFS也是合适的,但它还没有经过ClickHouse的全面测试。
|
||||
大多数其他文件系统也应该可以正常工作。 具有延迟分配的文件系统工作得更好。
|
||||
|
||||
## Linux内核 {#linux-kernel}
|
||||
|
||||
@ -95,26 +81,43 @@ XFS也是合适的,但它还没有经过ClickHouse的彻底测试。
|
||||
|
||||
## 网络 {#network}
|
||||
|
||||
如果您使用的是IPv6,请增加路由缓存的大小。
|
||||
3.2之前的Linux内核在IPv6实现方面遇到了许多问题。
|
||||
如果使用的是IPv6,请增加路由缓存的大小。
|
||||
3.2之前的Linux内核在IPv6实现方面存在许多问题。
|
||||
|
||||
如果可能的话,至少使用一个10GB的网络。 1Gb也可以工作,但对于使用数十tb的数据修补副本或处理具有大量中间数据的分布式查询,情况会更糟。
|
||||
如果可能的话,至少使用10GB的网络。1GB也可以工作,但对于使用数十TB的数据修补副本或处理具有大量中间数据的分布式查询,情况会更糟。
|
||||
|
||||
## 虚拟机监视器(Hypervisor)配置
|
||||
|
||||
如果您使用的是OpenStack,请在nova.conf中设置
|
||||
```
|
||||
cpu_mode=host-passthrough
|
||||
```
|
||||
。
|
||||
|
||||
如果您使用的是libvirt,请在XML配置中设置
|
||||
```
|
||||
<cpu mode='host-passthrough'/>
|
||||
```
|
||||
。
|
||||
|
||||
这对于ClickHouse能够通过 `cpuid` 指令获取正确的信息非常重要。
|
||||
否则,当在旧的CPU型号上运行虚拟机监视器时,可能会导致 `Illegal instruction` 崩溃。
|
||||
|
||||
## Zookeeper {#zookeeper}
|
||||
|
||||
您可能已经将ZooKeeper用于其他目的。 您可以使用相同的zookeeper安装,如果它还没有超载。
|
||||
您可能已经将ZooKeeper用于其他目的。 如果它还没有超载,您可以使用相同的zookeeper。
|
||||
|
||||
最好使用新版本的 Zookeeper – 3.4.9 或之后的版本. 稳定 Liunx 发行版中的 Zookeeper 版本可能是落后的。
|
||||
最好使用新版本的Zookeeper – 3.4.9 或更高的版本. 稳定的Liunx发行版中的Zookeeper版本可能已过时。
|
||||
|
||||
你永远不该使用自己手写的脚本在不同的 Zookeeper 集群之间转移数据, 这可能会导致序列节点的数据不正确。出于同样的原因,永远不要使用 zkcopy 工具: https://github.com/ksprojects/zkcopy/issues/15
|
||||
你永远不要使用手动编写的脚本在不同的Zookeeper集群之间传输数据, 这可能会导致序列节点的数据不正确。出于相同的原因,永远不要使用 zkcopy 工具: https://github.com/ksprojects/zkcopy/issues/15
|
||||
|
||||
如果要将现有ZooKeeper集群分为两个,正确的方法是增加其副本的数量,然后将其重新配置为两个独立的集群。
|
||||
如果要将现有的ZooKeeper集群分为两个,正确的方法是增加其副本的数量,然后将其重新配置为两个独立的集群。
|
||||
|
||||
不要在与ClickHouse相同的服务器上运行ZooKeeper。 因为ZooKeeper对延迟非常敏感,而ClickHouse可能会占用所有可用的系统资源。
|
||||
不要在ClickHouse所在的服务器上运行ZooKeeper。 因为ZooKeeper对延迟非常敏感,而ClickHouse可能会占用所有可用的系统资源。
|
||||
|
||||
默认设置下,ZooKeeper 就像是一个定时炸弹:
|
||||
|
||||
当使用默认配置时,ZooKeeper服务不会从旧快照和日志中删除文件(请参阅autopurge),这是操作员的责任。
|
||||
当使用默认配置时,ZooKeeper服务器不会从旧的快照和日志中删除文件(请参阅autopurge),这是操作员的责任。
|
||||
|
||||
必须拆除炸弹。
|
||||
|
||||
@ -222,7 +225,7 @@ JAVA_OPTS="-Xms{{ '{{' }} cluster.get('xms','128M') {{ '}}' }} \
|
||||
-XX:+CMSParallelRemarkEnabled"
|
||||
```
|
||||
|
||||
Salt init:
|
||||
初始化:
|
||||
|
||||
description "zookeeper-{{ '{{' }} cluster['name'] {{ '}}' }} centralized coordination service"
|
||||
|
||||
|
@ -27,7 +27,7 @@ toc_title: 聚合函数组合器
|
||||
|
||||
## -State {#agg-functions-combinator-state}
|
||||
|
||||
如果应用此combinator,则聚合函数不会返回结果值(例如唯一值的数量 [uniq](reference.md#agg_function-uniq) 函数),但是返回聚合的中间状态(对于 `uniq`,返回的是计算唯一值的数量的哈希表)。 这是一个 `AggregateFunction(...)` 可用于进一步处理或存储在表中以完成稍后的聚合。
|
||||
如果应用此combinator,则聚合函数不会返回结果值(例如唯一值的数量 [uniq](./reference/uniq.md#agg_function-uniq) 函数),但是返回聚合的中间状态(对于 `uniq`,返回的是计算唯一值的数量的哈希表)。 这是一个 `AggregateFunction(...)` 可用于进一步处理或存储在表中以完成稍后的聚合。
|
||||
|
||||
要使用这些状态,请使用:
|
||||
|
||||
@ -209,7 +209,7 @@ FROM
|
||||
|
||||
让我们得到的人的名字,他们的年龄在于的时间间隔 `[30,60)` 和 `[60,75)`。 由于我们使用整数表示的年龄,我们得到的年龄 `[30, 59]` 和 `[60,74]` 间隔。
|
||||
|
||||
要在数组中聚合名称,我们使用 [groupArray](reference.md#agg_function-grouparray) 聚合函数。 这需要一个参数。 在我们的例子中,它是 `name` 列。 `groupArrayResample` 函数应该使用 `age` 按年龄聚合名称, 要定义所需的时间间隔,我们传入 `30, 75, 30` 参数给 `groupArrayResample` 函数。
|
||||
要在数组中聚合名称,我们使用 [groupArray](./reference/grouparray.md#agg_function-grouparray) 聚合函数。 这需要一个参数。 在我们的例子中,它是 `name` 列。 `groupArrayResample` 函数应该使用 `age` 按年龄聚合名称, 要定义所需的时间间隔,我们传入 `30, 75, 30` 参数给 `groupArrayResample` 函数。
|
||||
|
||||
``` sql
|
||||
SELECT groupArrayResample(30, 75, 30)(name, age) FROM people
|
||||
|
@ -493,6 +493,6 @@ FROM
|
||||
|
||||
## sumMapFiltered(keys_to_keep)(keys, values) {#summapfilteredkeys-to-keepkeys-values}
|
||||
|
||||
和 [sumMap](reference.md#agg_functions-summap) 基本一致, 除了一个键数组作为参数传递。这在使用高基数key时尤其有用。
|
||||
和 [sumMap](./reference/summap.md#agg_functions-summap) 基本一致, 除了一个键数组作为参数传递。这在使用高基数key时尤其有用。
|
||||
|
||||
[原始文章](https://clickhouse.tech/docs/en/query_language/agg_functions/parametric_functions/) <!--hide-->
|
||||
|
File diff suppressed because it is too large
Load Diff
13
docs/zh/sql-reference/aggregate-functions/reference/any.md
Normal file
13
docs/zh/sql-reference/aggregate-functions/reference/any.md
Normal file
@ -0,0 +1,13 @@
|
||||
---
|
||||
toc_priority: 6
|
||||
---
|
||||
|
||||
# any {#agg_function-any}
|
||||
|
||||
选择第一个遇到的值。
|
||||
查询可以以任何顺序执行,甚至每次都以不同的顺序执行,因此此函数的结果是不确定的。
|
||||
要获得确定的结果,您可以使用 ‘min’ 或 ‘max’ 功能,而不是 ‘any’.
|
||||
|
||||
在某些情况下,可以依靠执行的顺序。 这适用于SELECT来自使用ORDER BY的子查询的情况。
|
||||
|
||||
当一个 `SELECT` 查询具有 `GROUP BY` 子句或至少一个聚合函数,ClickHouse(相对于MySQL)要求在所有表达式 `SELECT`, `HAVING`,和 `ORDER BY` 子句可以从键或聚合函数计算。 换句话说,从表中选择的每个列必须在键或聚合函数内使用。 要获得像MySQL这样的行为,您可以将其他列放在 `any` 聚合函数。
|
@ -0,0 +1,34 @@
|
||||
---
|
||||
toc_priority: 103
|
||||
---
|
||||
|
||||
# anyHeavy {#anyheavyx}
|
||||
|
||||
选择一个频繁出现的值,使用[heavy hitters](http://www.cs.umd.edu/~samir/498/karp.pdf) 算法。 如果某个值在查询的每个执行线程中出现的情况超过一半,则返回此值。 通常情况下,结果是不确定的。
|
||||
|
||||
``` sql
|
||||
anyHeavy(column)
|
||||
```
|
||||
|
||||
**参数**
|
||||
|
||||
- `column` – The column name。
|
||||
|
||||
**示例**
|
||||
|
||||
使用 [OnTime](../../../getting-started/example-datasets/ontime.md) 数据集,并选择在 `AirlineID` 列任何频繁出现的值。
|
||||
|
||||
查询:
|
||||
|
||||
``` sql
|
||||
SELECT anyHeavy(AirlineID) AS res
|
||||
FROM ontime;
|
||||
```
|
||||
|
||||
结果:
|
||||
|
||||
``` text
|
||||
┌───res─┐
|
||||
│ 19690 │
|
||||
└───────┘
|
||||
```
|
@ -0,0 +1,9 @@
|
||||
---
|
||||
toc_priority: 104
|
||||
---
|
||||
|
||||
## anyLast {#anylastx}
|
||||
|
||||
选择遇到的最后一个值。
|
||||
其结果和[any](../../../sql-reference/aggregate-functions/reference/any.md) 函数一样是不确定的 。
|
||||
|
@ -0,0 +1,64 @@
|
||||
---
|
||||
toc_priority: 106
|
||||
---
|
||||
|
||||
# argMax {#agg-function-argmax}
|
||||
|
||||
计算 `val` 最大值对应的 `arg` 值。 如果 `val` 最大值存在几个不同的 `arg` 值,输出遇到的第一个值。
|
||||
|
||||
这个函数的Tuple版本将返回 `val` 最大值对应的元组。本函数适合和 `SimpleAggregateFunction` 搭配使用。
|
||||
|
||||
**语法**
|
||||
|
||||
``` sql
|
||||
argMax(arg, val)
|
||||
```
|
||||
|
||||
或
|
||||
|
||||
``` sql
|
||||
argMax(tuple(arg, val))
|
||||
```
|
||||
|
||||
**参数**
|
||||
|
||||
- `arg` — Argument.
|
||||
- `val` — Value.
|
||||
|
||||
**返回值**
|
||||
|
||||
- `val` 最大值对应的 `arg` 值。
|
||||
|
||||
类型: 匹配 `arg` 类型。
|
||||
|
||||
对于输入中的元组:
|
||||
|
||||
- 元组 `(arg, val)`, 其中 `val` 最大值,`arg` 是对应的值。
|
||||
|
||||
类型: [元组](../../../sql-reference/data-types/tuple.md)。
|
||||
|
||||
**示例**
|
||||
|
||||
输入表:
|
||||
|
||||
``` text
|
||||
┌─user─────┬─salary─┐
|
||||
│ director │ 5000 │
|
||||
│ manager │ 3000 │
|
||||
│ worker │ 1000 │
|
||||
└──────────┴────────┘
|
||||
```
|
||||
|
||||
查询:
|
||||
|
||||
``` sql
|
||||
SELECT argMax(user, salary), argMax(tuple(user, salary), salary), argMax(tuple(user, salary)) FROM salary;
|
||||
```
|
||||
|
||||
结果:
|
||||
|
||||
``` text
|
||||
┌─argMax(user, salary)─┬─argMax(tuple(user, salary), salary)─┬─argMax(tuple(user, salary))─┐
|
||||
│ director │ ('director',5000) │ ('director',5000) │
|
||||
└──────────────────────┴─────────────────────────────────────┴─────────────────────────────┘
|
||||
```
|
@ -0,0 +1,37 @@
|
||||
---
|
||||
toc_priority: 105
|
||||
---
|
||||
|
||||
# argMin {#agg-function-argmin}
|
||||
|
||||
语法: `argMin(arg, val)` 或 `argMin(tuple(arg, val))`
|
||||
|
||||
计算 `val` 最小值对应的 `arg` 值。 如果 `val` 最小值存在几个不同的 `arg` 值,输出遇到的第一个(`arg`)值。
|
||||
|
||||
这个函数的Tuple版本将返回 `val` 最小值对应的tuple。本函数适合和`SimpleAggregateFunction`搭配使用。
|
||||
|
||||
**示例:**
|
||||
|
||||
输入表:
|
||||
|
||||
``` text
|
||||
┌─user─────┬─salary─┐
|
||||
│ director │ 5000 │
|
||||
│ manager │ 3000 │
|
||||
│ worker │ 1000 │
|
||||
└──────────┴────────┘
|
||||
```
|
||||
|
||||
查询:
|
||||
|
||||
``` sql
|
||||
SELECT argMin(user, salary), argMin(tuple(user, salary)) FROM salary;
|
||||
```
|
||||
|
||||
结果:
|
||||
|
||||
``` text
|
||||
┌─argMin(user, salary)─┬─argMin(tuple(user, salary))─┐
|
||||
│ worker │ ('worker',1000) │
|
||||
└──────────────────────┴─────────────────────────────┘
|
||||
```
|
64
docs/zh/sql-reference/aggregate-functions/reference/avg.md
Normal file
64
docs/zh/sql-reference/aggregate-functions/reference/avg.md
Normal file
@ -0,0 +1,64 @@
|
||||
---
|
||||
toc_priority: 5
|
||||
---
|
||||
|
||||
# avg {#agg_function-avg}
|
||||
|
||||
计算算术平均值。
|
||||
|
||||
**语法**
|
||||
|
||||
``` sql
|
||||
avg(x)
|
||||
```
|
||||
|
||||
**参数**
|
||||
|
||||
- `x` — 输入值, 必须是 [Integer](../../../sql-reference/data-types/int-uint.md), [Float](../../../sql-reference/data-types/float.md), 或 [Decimal](../../../sql-reference/data-types/decimal.md)。
|
||||
|
||||
**返回值**
|
||||
|
||||
- 算术平均值,总是 [Float64](../../../sql-reference/data-types/float.md) 类型。
|
||||
- 输入参数 `x` 为空时返回 `NaN` 。
|
||||
|
||||
**示例**
|
||||
|
||||
查询:
|
||||
|
||||
``` sql
|
||||
SELECT avg(x) FROM values('x Int8', 0, 1, 2, 3, 4, 5);
|
||||
```
|
||||
|
||||
结果:
|
||||
|
||||
``` text
|
||||
┌─avg(x)─┐
|
||||
│ 2.5 │
|
||||
└────────┘
|
||||
```
|
||||
|
||||
**示例**
|
||||
|
||||
创建一个临时表:
|
||||
|
||||
查询:
|
||||
|
||||
``` sql
|
||||
CREATE table test (t UInt8) ENGINE = Memory;
|
||||
```
|
||||
|
||||
获取算术平均值:
|
||||
|
||||
查询:
|
||||
|
||||
```
|
||||
SELECT avg(t) FROM test;
|
||||
```
|
||||
|
||||
结果:
|
||||
|
||||
``` text
|
||||
┌─avg(x)─┐
|
||||
│ nan │
|
||||
└────────┘
|
||||
```
|
@ -0,0 +1,84 @@
|
||||
---
|
||||
toc_priority: 107
|
||||
---
|
||||
|
||||
# avgWeighted {#avgweighted}
|
||||
|
||||
|
||||
计算 [加权算术平均值](https://en.wikipedia.org/wiki/Weighted_arithmetic_mean)。
|
||||
|
||||
**语法**
|
||||
|
||||
``` sql
|
||||
avgWeighted(x, weight)
|
||||
```
|
||||
|
||||
**参数**
|
||||
|
||||
- `x` — 值。
|
||||
- `weight` — 值的加权。
|
||||
|
||||
`x` 和 `weight` 的类型必须是
|
||||
[整数](../../../sql-reference/data-types/int-uint.md), 或
|
||||
[浮点数](../../../sql-reference/data-types/float.md), 或
|
||||
[定点数](../../../sql-reference/data-types/decimal.md),
|
||||
但是可以不一样。
|
||||
|
||||
**返回值**
|
||||
|
||||
- `NaN`。 如果所有的权重都等于0 或所提供的权重参数是空。
|
||||
- 加权平均值。 其他。
|
||||
|
||||
类型: 总是[Float64](../../../sql-reference/data-types/float.md).
|
||||
|
||||
**示例**
|
||||
|
||||
查询:
|
||||
|
||||
``` sql
|
||||
SELECT avgWeighted(x, w)
|
||||
FROM values('x Int8, w Int8', (4, 1), (1, 0), (10, 2))
|
||||
```
|
||||
|
||||
结果:
|
||||
|
||||
``` text
|
||||
┌─avgWeighted(x, weight)─┐
|
||||
│ 8 │
|
||||
└────────────────────────┘
|
||||
```
|
||||
|
||||
|
||||
**示例**
|
||||
|
||||
查询:
|
||||
|
||||
``` sql
|
||||
SELECT avgWeighted(x, w)
|
||||
FROM values('x Int8, w Int8', (0, 0), (1, 0), (10, 0))
|
||||
```
|
||||
|
||||
结果:
|
||||
|
||||
``` text
|
||||
┌─avgWeighted(x, weight)─┐
|
||||
│ nan │
|
||||
└────────────────────────┘
|
||||
```
|
||||
|
||||
**示例**
|
||||
|
||||
查询:
|
||||
|
||||
``` sql
|
||||
CREATE table test (t UInt8) ENGINE = Memory;
|
||||
SELECT avgWeighted(t) FROM test
|
||||
```
|
||||
|
||||
结果:
|
||||
|
||||
``` text
|
||||
┌─avgWeighted(x, weight)─┐
|
||||
│ nan │
|
||||
└────────────────────────┘
|
||||
```
|
@ -0,0 +1,13 @@
|
||||
---
|
||||
toc_priority: 250
|
||||
---
|
||||
|
||||
# categoricalInformationValue {#categoricalinformationvalue}
|
||||
|
||||
对于每个类别计算 `(P(tag = 1) - P(tag = 0))(log(P(tag = 1)) - log(P(tag = 0)))` 。
|
||||
|
||||
``` sql
|
||||
categoricalInformationValue(category1, category2, ..., tag)
|
||||
```
|
||||
|
||||
结果指示离散(分类)要素如何使用 `[category1, category2, ...]` 有助于使用学习模型预测`tag`的值。
|
15
docs/zh/sql-reference/aggregate-functions/reference/corr.md
Normal file
15
docs/zh/sql-reference/aggregate-functions/reference/corr.md
Normal file
@ -0,0 +1,15 @@
|
||||
---
|
||||
toc_priority: 107
|
||||
---
|
||||
|
||||
# corr {#corrx-y}
|
||||
|
||||
**语法**
|
||||
``` sql
|
||||
`corr(x, y)`
|
||||
```
|
||||
|
||||
计算Pearson相关系数: `Σ((x - x̅)(y - y̅)) / sqrt(Σ((x - x̅)^2) * Σ((y - y̅)^2))`。
|
||||
|
||||
!!! note "注"
|
||||
该函数使用数值不稳定的算法。 如果你需要 [数值稳定性](https://en.wikipedia.org/wiki/Numerical_stability) 在计算中,使用 `corrStable` 函数。 它的工作速度较慢,但提供较低的计算错误。
|
70
docs/zh/sql-reference/aggregate-functions/reference/count.md
Normal file
70
docs/zh/sql-reference/aggregate-functions/reference/count.md
Normal file
@ -0,0 +1,70 @@
|
||||
---
|
||||
toc_priority: 1
|
||||
---
|
||||
|
||||
# count {#agg_function-count}
|
||||
|
||||
|
||||
计数行数或非空值。
|
||||
|
||||
ClickHouse支持以下 `count` 语法:
|
||||
- `count(expr)` 或 `COUNT(DISTINCT expr)`。
|
||||
- `count()` 或 `COUNT(*)`. 该 `count()` 语法是ClickHouse特定的。
|
||||
|
||||
**参数**
|
||||
|
||||
该函数可以采取:
|
||||
|
||||
- 零参数。
|
||||
- 一个 [表达式](../../../sql-reference/syntax.md#syntax-expressions)。
|
||||
|
||||
**返回值**
|
||||
|
||||
- 如果没有参数调用函数,它会计算行数。
|
||||
- 如果 [表达式](../../../sql-reference/syntax.md#syntax-expressions) 被传递,则该函数计数此表达式返回非null的次数。 如果表达式返回 [可为空](../../../sql-reference/data-types/nullable.md)类型的值,`count`的结果仍然不 `Nullable`。 如果表达式对于所有的行都返回 `NULL` ,则该函数返回 0 。
|
||||
|
||||
在这两种情况下,返回值的类型为 [UInt64](../../../sql-reference/data-types/int-uint.md)。
|
||||
|
||||
**详细信息**
|
||||
|
||||
ClickHouse支持 `COUNT(DISTINCT ...)` 语法,这种结构的行为取决于 [count_distinct_implementation](../../../operations/settings/settings.md#settings-count_distinct_implementation) 设置。 它定义了用于执行该操作的 [uniq\*](../../../sql-reference/aggregate-functions/reference/uniq.md#agg_function-uniq)函数。 默认值是 [uniqExact](../../../sql-reference/aggregate-functions/reference/uniqexact.md#agg_function-uniqexact)函数。
|
||||
|
||||
`SELECT count() FROM table` 这个查询未被优化,因为表中的条目数没有单独存储。 它从表中选择一个小列并计算其值的个数。
|
||||
|
||||
**示例**
|
||||
|
||||
示例1:
|
||||
|
||||
``` sql
|
||||
SELECT count() FROM t
|
||||
```
|
||||
|
||||
``` text
|
||||
┌─count()─┐
|
||||
│ 5 │
|
||||
└─────────┘
|
||||
```
|
||||
|
||||
示例2:
|
||||
|
||||
``` sql
|
||||
SELECT name, value FROM system.settings WHERE name = 'count_distinct_implementation'
|
||||
```
|
||||
|
||||
``` text
|
||||
┌─name──────────────────────────┬─value─────┐
|
||||
│ count_distinct_implementation │ uniqExact │
|
||||
└───────────────────────────────┴───────────┘
|
||||
```
|
||||
|
||||
``` sql
|
||||
SELECT count(DISTINCT num) FROM t
|
||||
```
|
||||
|
||||
``` text
|
||||
┌─uniqExact(num)─┐
|
||||
│ 3 │
|
||||
└────────────────┘
|
||||
```
|
||||
|
||||
这个例子表明 `count(DISTINCT num)` 是通过 `count_distinct_implementation` 的设定值 `uniqExact` 函数来执行的。
|
@ -0,0 +1,15 @@
|
||||
---
|
||||
toc_priority: 36
|
||||
---
|
||||
|
||||
# covarPop {#covarpop}
|
||||
|
||||
**语法**
|
||||
``` sql
|
||||
covarPop(x, y)
|
||||
```
|
||||
|
||||
计算 `Σ((x - x̅)(y - y̅)) / n` 的值。
|
||||
|
||||
!!! note "注"
|
||||
该函数使用数值不稳定的算法。 如果你需要 [数值稳定性](https://en.wikipedia.org/wiki/Numerical_stability) 在计算中,使用 `covarPopStable` 函数。 它的工作速度较慢,但提供了较低的计算错误。
|
@ -0,0 +1,17 @@
|
||||
---
|
||||
toc_priority: 37
|
||||
---
|
||||
|
||||
# covarSamp {#covarsamp}
|
||||
|
||||
**语法**
|
||||
``` sql
|
||||
covarSamp(x, y)
|
||||
```
|
||||
|
||||
计算 `Σ((x - x̅)(y - y̅)) / (n - 1)` 的值。
|
||||
|
||||
返回Float64。 当 `n <= 1`, 返回 +∞。
|
||||
|
||||
!!! note "注"
|
||||
该函数使用数值不稳定的算法。 如果你需要 [数值稳定性](https://en.wikipedia.org/wiki/Numerical_stability) 在计算中,使用 `covarSampStable` 函数。 它的工作速度较慢,但提供较低的计算错误。
|
@ -0,0 +1,69 @@
|
||||
---
|
||||
toc_priority: 141
|
||||
---
|
||||
|
||||
# deltaSum {#agg_functions-deltasum}
|
||||
|
||||
计算连续行之间的差值和。如果差值为负,则忽略。
|
||||
|
||||
**语法**
|
||||
|
||||
``` sql
|
||||
deltaSum(value)
|
||||
```
|
||||
|
||||
**参数**
|
||||
|
||||
- `value` — 必须是 [整型](../../data-types/int-uint.md) 或者 [浮点型](../../data-types/float.md) 。
|
||||
|
||||
**返回值**
|
||||
|
||||
- `Integer` or `Float` 型的算术差值和。
|
||||
|
||||
**示例**
|
||||
|
||||
查询:
|
||||
|
||||
``` sql
|
||||
SELECT deltaSum(arrayJoin([1, 2, 3]));
|
||||
```
|
||||
|
||||
结果:
|
||||
|
||||
``` text
|
||||
┌─deltaSum(arrayJoin([1, 2, 3]))─┐
|
||||
│ 2 │
|
||||
└────────────────────────────────┘
|
||||
```
|
||||
|
||||
查询:
|
||||
|
||||
``` sql
|
||||
SELECT deltaSum(arrayJoin([1, 2, 3, 0, 3, 4, 2, 3]));
|
||||
```
|
||||
|
||||
结果:
|
||||
|
||||
``` text
|
||||
┌─deltaSum(arrayJoin([1, 2, 3, 0, 3, 4, 2, 3]))─┐
|
||||
│ 7 │
|
||||
└───────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
查询:
|
||||
|
||||
``` sql
|
||||
SELECT deltaSum(arrayJoin([2.25, 3, 4.5]));
|
||||
```
|
||||
|
||||
结果:
|
||||
|
||||
``` text
|
||||
┌─deltaSum(arrayJoin([2.25, 3, 4.5]))─┐
|
||||
│ 2.25 │
|
||||
└─────────────────────────────────────┘
|
||||
```
|
||||
|
||||
**参见**
|
||||
|
||||
- [runningDifference](../../functions/other-functions.md#other_functions-runningdifference)
|
@ -0,0 +1,20 @@
|
||||
---
|
||||
toc_priority: 110
|
||||
---
|
||||
|
||||
# groupArray {#agg_function-grouparray}
|
||||
|
||||
**语法**
|
||||
``` sql
|
||||
groupArray(x)
|
||||
或
|
||||
groupArray(max_size)(x)
|
||||
```
|
||||
|
||||
创建参数值的数组。
|
||||
值可以按任何(不确定)顺序添加到数组中。
|
||||
|
||||
第二个版本(带有 `max_size` 参数)将结果数组的大小限制为 `max_size` 个元素。
|
||||
例如, `groupArray (1) (x)` 相当于 `[any (x)]` 。
|
||||
|
||||
在某些情况下,您仍然可以依赖执行顺序。这适用于SELECT(查询)来自使用了 `ORDER BY` 子查询的情况。
|
@ -0,0 +1,91 @@
|
||||
---
|
||||
toc_priority: 112
|
||||
---
|
||||
|
||||
# groupArrayInsertAt {#grouparrayinsertat}
|
||||
|
||||
在指定位置向数组中插入一个值。
|
||||
|
||||
**语法**
|
||||
|
||||
``` sql
|
||||
groupArrayInsertAt(default_x, size)(x, pos);
|
||||
```
|
||||
|
||||
如果在一个查询中将多个值插入到同一位置,则该函数的行为方式如下:
|
||||
|
||||
- 如果在单个线程中执行查询,则使用第一个插入的值。
|
||||
- 如果在多个线程中执行查询,则结果值是未确定的插入值之一。
|
||||
|
||||
**参数**
|
||||
|
||||
- `x` — 要插入的值。生成所[支持的数据类型](../../../sql-reference/data-types/index.md)(数据)的[表达式](../../../sql-reference/syntax.md#syntax-expressions)。
|
||||
- `pos` — 指定元素 `x` 将被插入的位置。 数组中的索引编号从零开始。 [UInt32](../../../sql-reference/data-types/int-uint.md#uint-ranges).
|
||||
- `default_x` — 在空位置替换的默认值。可选参数。生成 `x` 数据类型 (数据) 的[表达式](../../../sql-reference/syntax.md#syntax-expressions)。 如果 `default_x` 未定义,则 [默认值](../../../sql-reference/statements/create.md#create-default-values) 被使用。
|
||||
- `size`— 结果数组的长度。可选参数。如果使用该参数,必须指定默认值 `default_x` 。 [UInt32](../../../sql-reference/data-types/int-uint.md#uint-ranges)。
|
||||
|
||||
**返回值**
|
||||
|
||||
- 具有插入值的数组。
|
||||
|
||||
类型: [阵列](../../../sql-reference/data-types/array.md#data-type-array)。
|
||||
|
||||
**示例**
|
||||
|
||||
查询:
|
||||
|
||||
``` sql
|
||||
SELECT groupArrayInsertAt(toString(number), number * 2) FROM numbers(5);
|
||||
```
|
||||
|
||||
结果:
|
||||
|
||||
``` text
|
||||
┌─groupArrayInsertAt(toString(number), multiply(number, 2))─┐
|
||||
│ ['0','','1','','2','','3','','4'] │
|
||||
└───────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
查询:
|
||||
|
||||
``` sql
|
||||
SELECT groupArrayInsertAt('-')(toString(number), number * 2) FROM numbers(5);
|
||||
```
|
||||
|
||||
结果:
|
||||
|
||||
``` text
|
||||
┌─groupArrayInsertAt('-')(toString(number), multiply(number, 2))─┐
|
||||
│ ['0','-','1','-','2','-','3','-','4'] │
|
||||
└────────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
查询:
|
||||
|
||||
``` sql
|
||||
SELECT groupArrayInsertAt('-', 5)(toString(number), number * 2) FROM numbers(5);
|
||||
```
|
||||
|
||||
结果:
|
||||
|
||||
``` text
|
||||
┌─groupArrayInsertAt('-', 5)(toString(number), multiply(number, 2))─┐
|
||||
│ ['0','-','1','-','2'] │
|
||||
└───────────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
在一个位置多线程插入数据。
|
||||
|
||||
查询:
|
||||
|
||||
``` sql
|
||||
SELECT groupArrayInsertAt(number, 0) FROM numbers_mt(10) SETTINGS max_block_size = 1;
|
||||
```
|
||||
|
||||
作为这个查询的结果,你会得到 `[0,9]` 范围的随机整数。 例如:
|
||||
|
||||
``` text
|
||||
┌─groupArrayInsertAt(number, 0)─┐
|
||||
│ [7] │
|
||||
└───────────────────────────────┘
|
||||
```
|
@ -0,0 +1,85 @@
|
||||
---
|
||||
toc_priority: 114
|
||||
---
|
||||
|
||||
# groupArrayMovingAvg {#agg_function-grouparraymovingavg}
|
||||
|
||||
计算输入值的移动平均值。
|
||||
|
||||
**语法**
|
||||
|
||||
``` sql
|
||||
groupArrayMovingAvg(numbers_for_summing)
|
||||
groupArrayMovingAvg(window_size)(numbers_for_summing)
|
||||
```
|
||||
|
||||
该函数可以将窗口大小作为参数。 如果未指定,则该函数的窗口大小等于列中的行数。
|
||||
|
||||
**参数**
|
||||
|
||||
- `numbers_for_summing` — [表达式](../../../sql-reference/syntax.md#syntax-expressions) 生成数值数据类型值。
|
||||
- `window_size` — 窗口大小。
|
||||
|
||||
**返回值**
|
||||
|
||||
- 与输入数据大小相同的数组。
|
||||
|
||||
对于输入数据类型是[Integer](../../../sql-reference/data-types/int-uint.md),
|
||||
和[floating-point](../../../sql-reference/data-types/float.md),
|
||||
对应的返回值类型是 `Float64` 。
|
||||
对于输入数据类型是[Decimal](../../../sql-reference/data-types/decimal.md) 返回值类型是 `Decimal128` 。
|
||||
|
||||
该函数对于 `Decimal128` 使用 [四舍五入到零](https://en.wikipedia.org/wiki/Rounding#Rounding_towards_zero). 它截断无意义的小数位来保证结果的数据类型。
|
||||
|
||||
**示例**
|
||||
|
||||
样表 `t`:
|
||||
|
||||
``` sql
|
||||
CREATE TABLE t
|
||||
(
|
||||
`int` UInt8,
|
||||
`float` Float32,
|
||||
`dec` Decimal32(2)
|
||||
)
|
||||
ENGINE = TinyLog
|
||||
```
|
||||
|
||||
``` text
|
||||
┌─int─┬─float─┬──dec─┐
|
||||
│ 1 │ 1.1 │ 1.10 │
|
||||
│ 2 │ 2.2 │ 2.20 │
|
||||
│ 4 │ 4.4 │ 4.40 │
|
||||
│ 7 │ 7.77 │ 7.77 │
|
||||
└─────┴───────┴──────┘
|
||||
```
|
||||
|
||||
查询:
|
||||
|
||||
``` sql
|
||||
SELECT
|
||||
groupArrayMovingAvg(int) AS I,
|
||||
groupArrayMovingAvg(float) AS F,
|
||||
groupArrayMovingAvg(dec) AS D
|
||||
FROM t
|
||||
```
|
||||
|
||||
``` text
|
||||
┌─I────────────────────┬─F─────────────────────────────────────────────────────────────────────────────┬─D─────────────────────┐
|
||||
│ [0.25,0.75,1.75,3.5] │ [0.2750000059604645,0.8250000178813934,1.9250000417232513,3.8499999940395355] │ [0.27,0.82,1.92,3.86] │
|
||||
└──────────────────────┴───────────────────────────────────────────────────────────────────────────────┴───────────────────────┘
|
||||
```
|
||||
|
||||
``` sql
|
||||
SELECT
|
||||
groupArrayMovingAvg(2)(int) AS I,
|
||||
groupArrayMovingAvg(2)(float) AS F,
|
||||
groupArrayMovingAvg(2)(dec) AS D
|
||||
FROM t
|
||||
```
|
||||
|
||||
``` text
|
||||
┌─I───────────────┬─F───────────────────────────────────────────────────────────────────────────┬─D─────────────────────┐
|
||||
│ [0.5,1.5,3,5.5] │ [0.550000011920929,1.6500000357627869,3.3000000715255737,6.049999952316284] │ [0.55,1.65,3.30,6.08] │
|
||||
└─────────────────┴─────────────────────────────────────────────────────────────────────────────┴───────────────────────┘
|
||||
```
|
@ -0,0 +1,81 @@
|
||||
---
|
||||
toc_priority: 113
|
||||
---
|
||||
|
||||
# groupArrayMovingSum {#agg_function-grouparraymovingsum}
|
||||
|
||||
|
||||
计算输入值的移动和。
|
||||
|
||||
**语法**
|
||||
|
||||
``` sql
|
||||
groupArrayMovingSum(numbers_for_summing)
|
||||
groupArrayMovingSum(window_size)(numbers_for_summing)
|
||||
```
|
||||
|
||||
该函数可以将窗口大小作为参数。 如果未指定,则该函数的窗口大小等于列中的行数。
|
||||
|
||||
**参数**
|
||||
|
||||
- `numbers_for_summing` — [表达式](../../../sql-reference/syntax.md#syntax-expressions) 生成数值数据类型值。
|
||||
- `window_size` — 窗口大小。
|
||||
|
||||
**返回值**
|
||||
|
||||
- 与输入数据大小相同的数组。
|
||||
对于输入数据类型是[Decimal](../../../sql-reference/data-types/decimal.md) 数组元素类型是 `Decimal128` 。
|
||||
对于其他的数值类型, 获取其对应的 `NearestFieldType` 。
|
||||
|
||||
**示例**
|
||||
|
||||
样表:
|
||||
|
||||
``` sql
|
||||
CREATE TABLE t
|
||||
(
|
||||
`int` UInt8,
|
||||
`float` Float32,
|
||||
`dec` Decimal32(2)
|
||||
)
|
||||
ENGINE = TinyLog
|
||||
```
|
||||
|
||||
``` text
|
||||
┌─int─┬─float─┬──dec─┐
|
||||
│ 1 │ 1.1 │ 1.10 │
|
||||
│ 2 │ 2.2 │ 2.20 │
|
||||
│ 4 │ 4.4 │ 4.40 │
|
||||
│ 7 │ 7.77 │ 7.77 │
|
||||
└─────┴───────┴──────┘
|
||||
```
|
||||
|
||||
查询:
|
||||
|
||||
``` sql
|
||||
SELECT
|
||||
groupArrayMovingSum(int) AS I,
|
||||
groupArrayMovingSum(float) AS F,
|
||||
groupArrayMovingSum(dec) AS D
|
||||
FROM t
|
||||
```
|
||||
|
||||
``` text
|
||||
┌─I──────────┬─F───────────────────────────────┬─D──────────────────────┐
|
||||
│ [1,3,7,14] │ [1.1,3.3000002,7.7000003,15.47] │ [1.10,3.30,7.70,15.47] │
|
||||
└────────────┴─────────────────────────────────┴────────────────────────┘
|
||||
```
|
||||
|
||||
``` sql
|
||||
SELECT
|
||||
groupArrayMovingSum(2)(int) AS I,
|
||||
groupArrayMovingSum(2)(float) AS F,
|
||||
groupArrayMovingSum(2)(dec) AS D
|
||||
FROM t
|
||||
```
|
||||
|
||||
``` text
|
||||
┌─I──────────┬─F───────────────────────────────┬─D──────────────────────┐
|
||||
│ [1,3,6,11] │ [1.1,3.3000002,6.6000004,12.17] │ [1.10,3.30,6.60,12.17] │
|
||||
└────────────┴─────────────────────────────────┴────────────────────────┘
|
||||
```
|
@ -0,0 +1,82 @@
|
||||
---
|
||||
toc_priority: 114
|
||||
---
|
||||
|
||||
# groupArraySample {#grouparraysample}
|
||||
|
||||
构建一个参数值的采样数组。
|
||||
结果数组的大小限制为 `max_size` 个元素。参数值被随机选择并添加到数组中。
|
||||
|
||||
**语法**
|
||||
|
||||
``` sql
|
||||
groupArraySample(max_size[, seed])(x)
|
||||
```
|
||||
|
||||
**参数**
|
||||
|
||||
- `max_size` — 结果数组的最大长度。[UInt64](../../data-types/int-uint.md)。
|
||||
- `seed` — 随机数发生器的种子。可选。[UInt64](../../data-types/int-uint.md)。默认值: `123456`。
|
||||
- `x` — 参数 (列名 或者 表达式)。
|
||||
|
||||
**返回值**
|
||||
|
||||
- 随机选取参数 `x` (的值)组成的数组。
|
||||
|
||||
类型: [Array](../../../sql-reference/data-types/array.md).
|
||||
|
||||
**示例**
|
||||
|
||||
样表 `colors`:
|
||||
|
||||
``` text
|
||||
┌─id─┬─color──┐
|
||||
│ 1 │ red │
|
||||
│ 2 │ blue │
|
||||
│ 3 │ green │
|
||||
│ 4 │ white │
|
||||
│ 5 │ orange │
|
||||
└────┴────────┘
|
||||
```
|
||||
|
||||
使用列名做参数查询:
|
||||
|
||||
``` sql
|
||||
SELECT groupArraySample(3)(color) as newcolors FROM colors;
|
||||
```
|
||||
|
||||
结果:
|
||||
|
||||
```text
|
||||
┌─newcolors──────────────────┐
|
||||
│ ['white','blue','green'] │
|
||||
└────────────────────────────┘
|
||||
```
|
||||
|
||||
使用列名和不同的(随机数)种子查询:
|
||||
|
||||
``` sql
|
||||
SELECT groupArraySample(3, 987654321)(color) as newcolors FROM colors;
|
||||
```
|
||||
|
||||
结果:
|
||||
|
||||
```text
|
||||
┌─newcolors──────────────────┐
|
||||
│ ['red','orange','green'] │
|
||||
└────────────────────────────┘
|
||||
```
|
||||
|
||||
使用表达式做参数查询:
|
||||
|
||||
``` sql
|
||||
SELECT groupArraySample(3)(concat('light-', color)) as newcolors FROM colors;
|
||||
```
|
||||
|
||||
结果:
|
||||
|
||||
```text
|
||||
┌─newcolors───────────────────────────────────┐
|
||||
│ ['light-blue','light-orange','light-green'] │
|
||||
└─────────────────────────────────────────────┘
|
||||
```
|
@ -0,0 +1,48 @@
|
||||
---
|
||||
toc_priority: 125
|
||||
---
|
||||
|
||||
# groupBitAnd {#groupbitand}
|
||||
|
||||
对于数字序列按位应用 `AND` 。
|
||||
|
||||
**语法**
|
||||
|
||||
``` sql
|
||||
groupBitAnd(expr)
|
||||
```
|
||||
|
||||
**参数**
|
||||
|
||||
`expr` – 结果为 `UInt*` 类型的表达式。
|
||||
|
||||
**返回值**
|
||||
|
||||
`UInt*` 类型的值。
|
||||
|
||||
**示例**
|
||||
|
||||
测试数据:
|
||||
|
||||
``` text
|
||||
binary decimal
|
||||
00101100 = 44
|
||||
00011100 = 28
|
||||
00001101 = 13
|
||||
01010101 = 85
|
||||
```
|
||||
|
||||
查询:
|
||||
|
||||
``` sql
|
||||
SELECT groupBitAnd(num) FROM t
|
||||
```
|
||||
|
||||
`num` 是包含测试数据的列。
|
||||
|
||||
结果:
|
||||
|
||||
``` text
|
||||
binary decimal
|
||||
00000100 = 4
|
||||
```
|
@ -0,0 +1,46 @@
|
||||
---
|
||||
toc_priority: 128
|
||||
---
|
||||
|
||||
# groupBitmap {#groupbitmap}
|
||||
|
||||
从无符号整数列进行位图或聚合计算,返回 `UInt64` 类型的基数,如果添加后缀 `State` ,则返回[位图对象](../../../sql-reference/functions/bitmap-functions.md)。
|
||||
|
||||
**语法**
|
||||
|
||||
``` sql
|
||||
groupBitmap(expr)
|
||||
```
|
||||
|
||||
**参数**
|
||||
|
||||
`expr` – 结果为 `UInt*` 类型的表达式。
|
||||
|
||||
**返回值**
|
||||
|
||||
`UInt64` 类型的值。
|
||||
|
||||
**示例**
|
||||
|
||||
测试数据:
|
||||
|
||||
``` text
|
||||
UserID
|
||||
1
|
||||
1
|
||||
2
|
||||
3
|
||||
```
|
||||
|
||||
查询:
|
||||
|
||||
``` sql
|
||||
SELECT groupBitmap(UserID) as num FROM t
|
||||
```
|
||||
|
||||
结果:
|
||||
|
||||
``` text
|
||||
num
|
||||
3
|
||||
```
|
@ -0,0 +1,48 @@
|
||||
---
|
||||
toc_priority: 129
|
||||
---
|
||||
|
||||
# groupBitmapAnd {#groupbitmapand}
|
||||
|
||||
计算位图列的 `AND` ,返回 `UInt64` 类型的基数,如果添加后缀 `State` ,则返回 [位图对象](../../../sql-reference/functions/bitmap-functions.md)。
|
||||
|
||||
**语法**
|
||||
|
||||
``` sql
|
||||
groupBitmapAnd(expr)
|
||||
```
|
||||
|
||||
**参数**
|
||||
|
||||
`expr` – 结果为 `AggregateFunction(groupBitmap, UInt*)` 类型的表达式。
|
||||
|
||||
**返回值**
|
||||
|
||||
`UInt64` 类型的值。
|
||||
|
||||
**示例**
|
||||
|
||||
``` sql
|
||||
DROP TABLE IF EXISTS bitmap_column_expr_test2;
|
||||
CREATE TABLE bitmap_column_expr_test2
|
||||
(
|
||||
tag_id String,
|
||||
z AggregateFunction(groupBitmap, UInt32)
|
||||
)
|
||||
ENGINE = MergeTree
|
||||
ORDER BY tag_id;
|
||||
|
||||
INSERT INTO bitmap_column_expr_test2 VALUES ('tag1', bitmapBuild(cast([1,2,3,4,5,6,7,8,9,10] as Array(UInt32))));
|
||||
INSERT INTO bitmap_column_expr_test2 VALUES ('tag2', bitmapBuild(cast([6,7,8,9,10,11,12,13,14,15] as Array(UInt32))));
|
||||
INSERT INTO bitmap_column_expr_test2 VALUES ('tag3', bitmapBuild(cast([2,4,6,8,10,12] as Array(UInt32))));
|
||||
|
||||
SELECT groupBitmapAnd(z) FROM bitmap_column_expr_test2 WHERE like(tag_id, 'tag%');
|
||||
┌─groupBitmapAnd(z)─┐
|
||||
│ 3 │
|
||||
└───────────────────┘
|
||||
|
||||
SELECT arraySort(bitmapToArray(groupBitmapAndState(z))) FROM bitmap_column_expr_test2 WHERE like(tag_id, 'tag%');
|
||||
┌─arraySort(bitmapToArray(groupBitmapAndState(z)))─┐
|
||||
│ [6,8,10] │
|
||||
└──────────────────────────────────────────────────┘
|
||||
```
|
@ -0,0 +1,48 @@
|
||||
---
|
||||
toc_priority: 130
|
||||
---
|
||||
|
||||
# groupBitmapOr {#groupbitmapor}
|
||||
|
||||
计算位图列的 `OR` ,返回 `UInt64` 类型的基数,如果添加后缀 `State` ,则返回 [位图对象](../../../sql-reference/functions/bitmap-functions.md)。
|
||||
|
||||
**语法**
|
||||
|
||||
``` sql
|
||||
groupBitmapOr(expr)
|
||||
```
|
||||
|
||||
**参数**
|
||||
|
||||
`expr` – 结果为 `AggregateFunction(groupBitmap, UInt*)` 类型的表达式。
|
||||
|
||||
**返回值**
|
||||
|
||||
`UInt64` 类型的值。
|
||||
|
||||
**示例**
|
||||
|
||||
``` sql
|
||||
DROP TABLE IF EXISTS bitmap_column_expr_test2;
|
||||
CREATE TABLE bitmap_column_expr_test2
|
||||
(
|
||||
tag_id String,
|
||||
z AggregateFunction(groupBitmap, UInt32)
|
||||
)
|
||||
ENGINE = MergeTree
|
||||
ORDER BY tag_id;
|
||||
|
||||
INSERT INTO bitmap_column_expr_test2 VALUES ('tag1', bitmapBuild(cast([1,2,3,4,5,6,7,8,9,10] as Array(UInt32))));
|
||||
INSERT INTO bitmap_column_expr_test2 VALUES ('tag2', bitmapBuild(cast([6,7,8,9,10,11,12,13,14,15] as Array(UInt32))));
|
||||
INSERT INTO bitmap_column_expr_test2 VALUES ('tag3', bitmapBuild(cast([2,4,6,8,10,12] as Array(UInt32))));
|
||||
|
||||
SELECT groupBitmapOr(z) FROM bitmap_column_expr_test2 WHERE like(tag_id, 'tag%');
|
||||
┌─groupBitmapOr(z)─┐
|
||||
│ 15 │
|
||||
└──────────────────┘
|
||||
|
||||
SELECT arraySort(bitmapToArray(groupBitmapOrState(z))) FROM bitmap_column_expr_test2 WHERE like(tag_id, 'tag%');
|
||||
┌─arraySort(bitmapToArray(groupBitmapOrState(z)))─┐
|
||||
│ [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15] │
|
||||
└─────────────────────────────────────────────────┘
|
||||
```
|
@ -0,0 +1,48 @@
|
||||
---
|
||||
toc_priority: 131
|
||||
---
|
||||
|
||||
# groupBitmapXor {#groupbitmapxor}
|
||||
|
||||
计算位图列的 `XOR` ,返回 `UInt64` 类型的基数,如果添加后缀 `State` ,则返回 [位图对象](../../../sql-reference/functions/bitmap-functions.md)。
|
||||
|
||||
**语法**
|
||||
|
||||
``` sql
|
||||
groupBitmapXor(expr)
|
||||
```
|
||||
|
||||
**参数**
|
||||
|
||||
`expr` – 结果为 `AggregateFunction(groupBitmap, UInt*)` 类型的表达式。
|
||||
|
||||
**返回值**
|
||||
|
||||
`UInt64` 类型的值。
|
||||
|
||||
**示例**
|
||||
|
||||
``` sql
|
||||
DROP TABLE IF EXISTS bitmap_column_expr_test2;
|
||||
CREATE TABLE bitmap_column_expr_test2
|
||||
(
|
||||
tag_id String,
|
||||
z AggregateFunction(groupBitmap, UInt32)
|
||||
)
|
||||
ENGINE = MergeTree
|
||||
ORDER BY tag_id;
|
||||
|
||||
INSERT INTO bitmap_column_expr_test2 VALUES ('tag1', bitmapBuild(cast([1,2,3,4,5,6,7,8,9,10] as Array(UInt32))));
|
||||
INSERT INTO bitmap_column_expr_test2 VALUES ('tag2', bitmapBuild(cast([6,7,8,9,10,11,12,13,14,15] as Array(UInt32))));
|
||||
INSERT INTO bitmap_column_expr_test2 VALUES ('tag3', bitmapBuild(cast([2,4,6,8,10,12] as Array(UInt32))));
|
||||
|
||||
SELECT groupBitmapXor(z) FROM bitmap_column_expr_test2 WHERE like(tag_id, 'tag%');
|
||||
┌─groupBitmapXor(z)─┐
|
||||
│ 10 │
|
||||
└───────────────────┘
|
||||
|
||||
SELECT arraySort(bitmapToArray(groupBitmapXorState(z))) FROM bitmap_column_expr_test2 WHERE like(tag_id, 'tag%');
|
||||
┌─arraySort(bitmapToArray(groupBitmapXorState(z)))─┐
|
||||
│ [1,3,5,6,8,10,11,13,14,15] │
|
||||
└──────────────────────────────────────────────────┘
|
||||
```
|
@ -0,0 +1,48 @@
|
||||
---
|
||||
toc_priority: 126
|
||||
---
|
||||
|
||||
# groupBitOr {#groupbitor}
|
||||
|
||||
对于数字序列按位应用 `OR` 。
|
||||
|
||||
**语法**
|
||||
|
||||
``` sql
|
||||
groupBitOr(expr)
|
||||
```
|
||||
|
||||
**参数**
|
||||
|
||||
`expr` – 结果为 `UInt*` 类型的表达式。
|
||||
|
||||
**返回值**
|
||||
|
||||
`UInt*` 类型的值。
|
||||
|
||||
**示例**
|
||||
|
||||
测试数据::
|
||||
|
||||
``` text
|
||||
binary decimal
|
||||
00101100 = 44
|
||||
00011100 = 28
|
||||
00001101 = 13
|
||||
01010101 = 85
|
||||
```
|
||||
|
||||
查询:
|
||||
|
||||
``` sql
|
||||
SELECT groupBitOr(num) FROM t
|
||||
```
|
||||
|
||||
`num` 是包含测试数据的列。
|
||||
|
||||
结果:
|
||||
|
||||
``` text
|
||||
binary decimal
|
||||
01111101 = 125
|
||||
```
|
@ -0,0 +1,48 @@
|
||||
---
|
||||
toc_priority: 127
|
||||
---
|
||||
|
||||
# groupBitXor {#groupbitxor}
|
||||
|
||||
对于数字序列按位应用 `XOR` 。
|
||||
|
||||
**语法**
|
||||
|
||||
``` sql
|
||||
groupBitXor(expr)
|
||||
```
|
||||
|
||||
**参数**
|
||||
|
||||
`expr` – 结果为 `UInt*` 类型的表达式。
|
||||
|
||||
**返回值**
|
||||
|
||||
`UInt*` 类型的值。
|
||||
|
||||
**示例**
|
||||
|
||||
测试数据:
|
||||
|
||||
``` text
|
||||
binary decimal
|
||||
00101100 = 44
|
||||
00011100 = 28
|
||||
00001101 = 13
|
||||
01010101 = 85
|
||||
```
|
||||
|
||||
查询:
|
||||
|
||||
``` sql
|
||||
SELECT groupBitXor(num) FROM t
|
||||
```
|
||||
|
||||
`num` 是包含测试数据的列。
|
||||
|
||||
结果:
|
||||
|
||||
``` text
|
||||
binary decimal
|
||||
01101000 = 104
|
||||
```
|
@ -0,0 +1,18 @@
|
||||
---
|
||||
toc_priority: 111
|
||||
---
|
||||
|
||||
# groupUniqArray {#groupuniqarray}
|
||||
|
||||
**语法**
|
||||
|
||||
``` sql
|
||||
groupUniqArray(x)
|
||||
或
|
||||
groupUniqArray(max_size)(x)
|
||||
```
|
||||
|
||||
从不同的参数值创建一个数组。 内存消耗和 [uniqExact](../../../sql-reference/aggregate-functions/reference/uniqexact.md) 函数是一样的。
|
||||
|
||||
第二个版本(带有 `max_size` 参数)将结果数组的大小限制为 `max_size` 个元素。
|
||||
例如, `groupUniqArray(1)(x)` 相当于 `[any(x)]`.
|
72
docs/zh/sql-reference/aggregate-functions/reference/index.md
Normal file
72
docs/zh/sql-reference/aggregate-functions/reference/index.md
Normal file
@ -0,0 +1,72 @@
|
||||
---
|
||||
toc_folder_title: Reference
|
||||
toc_priority: 36
|
||||
toc_hidden: true
|
||||
---
|
||||
|
||||
# 聚合函数列表 {#aggregate-functions-reference}
|
||||
|
||||
标准聚合函数:
|
||||
|
||||
- [count](../../../sql-reference/aggregate-functions/reference/count.md)
|
||||
- [min](../../../sql-reference/aggregate-functions/reference/min.md)
|
||||
- [max](../../../sql-reference/aggregate-functions/reference/max.md)
|
||||
- [sum](../../../sql-reference/aggregate-functions/reference/sum.md)
|
||||
- [avg](../../../sql-reference/aggregate-functions/reference/avg.md)
|
||||
- [any](../../../sql-reference/aggregate-functions/reference/any.md)
|
||||
- [stddevPop](../../../sql-reference/aggregate-functions/reference/stddevpop.md)
|
||||
- [stddevSamp](../../../sql-reference/aggregate-functions/reference/stddevsamp.md)
|
||||
- [varPop](../../../sql-reference/aggregate-functions/reference/varpop.md)
|
||||
- [varSamp](../../../sql-reference/aggregate-functions/reference/varsamp.md)
|
||||
- [covarPop](../../../sql-reference/aggregate-functions/reference/covarpop.md)
|
||||
- [covarSamp](../../../sql-reference/aggregate-functions/reference/covarsamp.md)
|
||||
|
||||
ClickHouse 特有的聚合函数:
|
||||
|
||||
- [anyHeavy](../../../sql-reference/aggregate-functions/reference/anyheavy.md)
|
||||
- [anyLast](../../../sql-reference/aggregate-functions/reference/anylast.md)
|
||||
- [argMin](../../../sql-reference/aggregate-functions/reference/argmin.md)
|
||||
- [argMax](../../../sql-reference/aggregate-functions/reference/argmax.md)
|
||||
- [avgWeighted](../../../sql-reference/aggregate-functions/reference/avgweighted.md)
|
||||
- [topK](../../../sql-reference/aggregate-functions/reference/topk.md)
|
||||
- [topKWeighted](../../../sql-reference/aggregate-functions/reference/topkweighted.md)
|
||||
- [groupArray](../../../sql-reference/aggregate-functions/reference/grouparray.md)
|
||||
- [groupUniqArray](../../../sql-reference/aggregate-functions/reference/groupuniqarray.md)
|
||||
- [groupArrayInsertAt](../../../sql-reference/aggregate-functions/reference/grouparrayinsertat.md)
|
||||
- [groupArrayMovingAvg](../../../sql-reference/aggregate-functions/reference/grouparraymovingavg.md)
|
||||
- [groupArrayMovingSum](../../../sql-reference/aggregate-functions/reference/grouparraymovingsum.md)
|
||||
- [groupBitAnd](../../../sql-reference/aggregate-functions/reference/groupbitand.md)
|
||||
- [groupBitOr](../../../sql-reference/aggregate-functions/reference/groupbitor.md)
|
||||
- [groupBitXor](../../../sql-reference/aggregate-functions/reference/groupbitxor.md)
|
||||
- [groupBitmap](../../../sql-reference/aggregate-functions/reference/groupbitmap.md)
|
||||
- [groupBitmapAnd](../../../sql-reference/aggregate-functions/reference/groupbitmapand.md)
|
||||
- [groupBitmapOr](../../../sql-reference/aggregate-functions/reference/groupbitmapor.md)
|
||||
- [groupBitmapXor](../../../sql-reference/aggregate-functions/reference/groupbitmapxor.md)
|
||||
- [sumWithOverflow](../../../sql-reference/aggregate-functions/reference/sumwithoverflow.md)
|
||||
- [sumMap](../../../sql-reference/aggregate-functions/reference/summap.md)
|
||||
- [minMap](../../../sql-reference/aggregate-functions/reference/minmap.md)
|
||||
- [maxMap](../../../sql-reference/aggregate-functions/reference/maxmap.md)
|
||||
- [skewSamp](../../../sql-reference/aggregate-functions/reference/skewsamp.md)
|
||||
- [skewPop](../../../sql-reference/aggregate-functions/reference/skewpop.md)
|
||||
- [kurtSamp](../../../sql-reference/aggregate-functions/reference/kurtsamp.md)
|
||||
- [kurtPop](../../../sql-reference/aggregate-functions/reference/kurtpop.md)
|
||||
- [uniq](../../../sql-reference/aggregate-functions/reference/uniq.md)
|
||||
- [uniqExact](../../../sql-reference/aggregate-functions/reference/uniqexact.md)
|
||||
- [uniqCombined](../../../sql-reference/aggregate-functions/reference/uniqcombined.md)
|
||||
- [uniqCombined64](../../../sql-reference/aggregate-functions/reference/uniqcombined64.md)
|
||||
- [uniqHLL12](../../../sql-reference/aggregate-functions/reference/uniqhll12.md)
|
||||
- [quantile](../../../sql-reference/aggregate-functions/reference/quantile.md)
|
||||
- [quantiles](../../../sql-reference/aggregate-functions/reference/quantiles.md)
|
||||
- [quantileExact](../../../sql-reference/aggregate-functions/reference/quantileexact.md)
|
||||
- [quantileExactLow](../../../sql-reference/aggregate-functions/reference/quantileexact.md#quantileexactlow)
|
||||
- [quantileExactHigh](../../../sql-reference/aggregate-functions/reference/quantileexact.md#quantileexacthigh)
|
||||
- [quantileExactWeighted](../../../sql-reference/aggregate-functions/reference/quantileexactweighted.md)
|
||||
- [quantileTiming](../../../sql-reference/aggregate-functions/reference/quantiletiming.md)
|
||||
- [quantileTimingWeighted](../../../sql-reference/aggregate-functions/reference/quantiletimingweighted.md)
|
||||
- [quantileDeterministic](../../../sql-reference/aggregate-functions/reference/quantiledeterministic.md)
|
||||
- [quantileTDigest](../../../sql-reference/aggregate-functions/reference/quantiletdigest.md)
|
||||
- [quantileTDigestWeighted](../../../sql-reference/aggregate-functions/reference/quantiletdigestweighted.md)
|
||||
- [simpleLinearRegression](../../../sql-reference/aggregate-functions/reference/simplelinearregression.md)
|
||||
- [stochasticLinearRegression](../../../sql-reference/aggregate-functions/reference/stochasticlinearregression.md)
|
||||
- [stochasticLogisticRegression](../../../sql-reference/aggregate-functions/reference/stochasticlogisticregression.md)
|
||||
- [categoricalInformationValue](../../../sql-reference/aggregate-functions/reference/categoricalinformationvalue.md)
|
@ -0,0 +1,37 @@
|
||||
---
|
||||
toc_priority: 150
|
||||
---
|
||||
|
||||
## initializeAggregation {#initializeaggregation}
|
||||
|
||||
初始化你输入行的聚合。用于后缀是 `State` 的函数。
|
||||
用它来测试或处理 `AggregateFunction` 和 `AggregationgMergeTree` 类型的列。
|
||||
|
||||
**语法**
|
||||
|
||||
``` sql
|
||||
initializeAggregation (aggregate_function, column_1, column_2)
|
||||
```
|
||||
|
||||
**参数**
|
||||
|
||||
- `aggregate_function` — 聚合函数名。 这个函数的状态 — 正创建的。[String](../../../sql-reference/data-types/string.md#string)。
|
||||
- `column_n` — 将其转换为函数的参数的列。[String](../../../sql-reference/data-types/string.md#string)。
|
||||
|
||||
**返回值**
|
||||
|
||||
返回输入行的聚合结果。返回类型将与 `initializeAgregation` 用作第一个参数的函数的返回类型相同。
|
||||
例如,对于后缀为 `State` 的函数,返回类型将是 `AggregateFunction`。
|
||||
|
||||
**示例**
|
||||
|
||||
查询:
|
||||
|
||||
```sql
|
||||
SELECT uniqMerge(state) FROM (SELECT initializeAggregation('uniqState', number % 3) AS state FROM system.numbers LIMIT 10000);
|
||||
```
|
||||
结果:
|
||||
|
||||
┌─uniqMerge(state)─┐
|
||||
│ 3 │
|
||||
└──────────────────┘
|
@ -0,0 +1,26 @@
|
||||
---
|
||||
toc_priority: 153
|
||||
---
|
||||
|
||||
# kurtPop {#kurtpop}
|
||||
|
||||
计算给定序列的 [峰度](https://en.wikipedia.org/wiki/Kurtosis)。
|
||||
|
||||
**语法**
|
||||
|
||||
``` sql
|
||||
kurtPop(expr)
|
||||
```
|
||||
|
||||
**参数**
|
||||
|
||||
`expr` — 结果为数字的 [表达式](../../../sql-reference/syntax.md#syntax-expressions)。
|
||||
|
||||
**返回值**
|
||||
|
||||
给定分布的峰度。 类型 — [Float64](../../../sql-reference/data-types/float.md)
|
||||
|
||||
**示例**
|
||||
|
||||
``` sql
|
||||
SELECT kurtPop(value) FROM series_with_value_column;
|
@ -0,0 +1,28 @@
|
||||
---
|
||||
toc_priority: 154
|
||||
---
|
||||
|
||||
# kurtSamp {#kurtsamp}
|
||||
|
||||
计算给定序列的 [峰度样本](https://en.wikipedia.org/wiki/Kurtosis)。
|
||||
它表示随机变量峰度的无偏估计,如果传递的值形成其样本。
|
||||
|
||||
**语法**
|
||||
|
||||
``` sql
|
||||
kurtSamp(expr)
|
||||
```
|
||||
|
||||
**参数**
|
||||
|
||||
`expr` — 结果为数字的 [表达式](../../../sql-reference/syntax.md#syntax-expressions)。
|
||||
|
||||
**返回值**
|
||||
|
||||
给定序列的峰度。类型 — [Float64](../../../sql-reference/data-types/float.md)。 如果 `n <= 1` (`n` 是样本的大小),则该函数返回 `nan`。
|
||||
|
||||
**示例**
|
||||
|
||||
``` sql
|
||||
SELECT kurtSamp(value) FROM series_with_value_column;
|
||||
```
|
@ -0,0 +1,72 @@
|
||||
---
|
||||
toc_priority: 310
|
||||
toc_title: mannWhitneyUTest
|
||||
---
|
||||
|
||||
# mannWhitneyUTest {#mannwhitneyutest}
|
||||
|
||||
对两个总体的样本应用 Mann-Whitney 秩检验。
|
||||
|
||||
**语法**
|
||||
|
||||
``` sql
|
||||
mannWhitneyUTest[(alternative[, continuity_correction])](sample_data, sample_index)
|
||||
```
|
||||
|
||||
两个样本的值都在 `sample_data` 列中。如果 `sample_index` 等于 0,则该行的值属于第一个总体的样本。 反之属于第二个总体的样本。
|
||||
零假设是两个总体随机相等。也可以检验单边假设。该检验不假设数据具有正态分布。
|
||||
|
||||
**参数**
|
||||
|
||||
- `sample_data` — 样本数据。[Integer](../../../sql-reference/data-types/int-uint.md), [Float](../../../sql-reference/data-types/float.md) 或 [Decimal](../../../sql-reference/data-types/decimal.md)。
|
||||
- `sample_index` — 样本索引。[Integer](../../../sql-reference/data-types/int-uint.md).
|
||||
|
||||
**参数**
|
||||
|
||||
- `alternative` — 供选假设。(可选,默认值是: `'two-sided'` 。) [String](../../../sql-reference/data-types/string.md)。
|
||||
- `'two-sided'`;
|
||||
- `'greater'`;
|
||||
- `'less'`。
|
||||
- `continuity_correction` — 如果不为0,那么将对p值进行正态近似的连续性修正。(可选,默认:1。) [UInt64](../../../sql-reference/data-types/int-uint.md)。
|
||||
|
||||
**返回值**
|
||||
|
||||
[元组](../../../sql-reference/data-types/tuple.md),有两个元素:
|
||||
|
||||
- 计算出U统计量。[Float64](../../../sql-reference/data-types/float.md)。
|
||||
- 计算出的p值。[Float64](../../../sql-reference/data-types/float.md)。
|
||||
|
||||
|
||||
**示例**
|
||||
|
||||
输入表:
|
||||
|
||||
``` text
|
||||
┌─sample_data─┬─sample_index─┐
|
||||
│ 10 │ 0 │
|
||||
│ 11 │ 0 │
|
||||
│ 12 │ 0 │
|
||||
│ 1 │ 1 │
|
||||
│ 2 │ 1 │
|
||||
│ 3 │ 1 │
|
||||
└─────────────┴──────────────┘
|
||||
```
|
||||
|
||||
查询:
|
||||
|
||||
``` sql
|
||||
SELECT mannWhitneyUTest('greater')(sample_data, sample_index) FROM mww_ttest;
|
||||
```
|
||||
|
||||
结果:
|
||||
|
||||
``` text
|
||||
┌─mannWhitneyUTest('greater')(sample_data, sample_index)─┐
|
||||
│ (9,0.04042779918503192) │
|
||||
└────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
**参见**
|
||||
|
||||
- [Mann–Whitney U test](https://en.wikipedia.org/wiki/Mann%E2%80%93Whitney_U_test)
|
||||
- [Stochastic ordering](https://en.wikipedia.org/wiki/Stochastic_ordering)
|
@ -0,0 +1,7 @@
|
||||
---
|
||||
toc_priority: 3
|
||||
---
|
||||
|
||||
# max {#agg_function-max}
|
||||
|
||||
计算最大值。
|
@ -0,0 +1,33 @@
|
||||
---
|
||||
toc_priority: 143
|
||||
---
|
||||
|
||||
# maxMap {#agg_functions-maxmap}
|
||||
|
||||
**语法**
|
||||
|
||||
```sql
|
||||
maxMap(key, value)
|
||||
或
|
||||
maxMap(Tuple(key, value))
|
||||
```
|
||||
|
||||
|
||||
根据 `key` 数组中指定的键对 `value` 数组计算最大值。
|
||||
|
||||
传递 `key` 和 `value` 数组的元组与传递 `key` 和 `value` 的两个数组是同义的。
|
||||
要总计的每一行的 `key` 和 `value` (数组)元素的数量必须相同。
|
||||
返回两个数组组成的元组: 排好序的`key` 和对应 `key` 的 `value` 计算值(最大值)。
|
||||
|
||||
示例:
|
||||
|
||||
``` sql
|
||||
SELECT maxMap(a, b)
|
||||
FROM values('a Array(Int32), b Array(Int64)', ([1, 2], [2, 2]), ([2, 3], [1, 1]))
|
||||
```
|
||||
|
||||
``` text
|
||||
┌─maxMap(a, b)──────┐
|
||||
│ ([1,2,3],[2,2,1]) │
|
||||
└───────────────────┘
|
||||
```
|
@ -0,0 +1,41 @@
|
||||
# median {#median}
|
||||
|
||||
`median*` 函数是 `quantile*` 函数的别名。它们计算数字数据样本的中位数。
|
||||
|
||||
函数:
|
||||
|
||||
- `median` — [quantile](#quantile)别名。
|
||||
- `medianDeterministic` — [quantileDeterministic](#quantiledeterministic)别名。
|
||||
- `medianExact` — [quantileExact](#quantileexact)别名。
|
||||
- `medianExactWeighted` — [quantileExactWeighted](#quantileexactweighted)别名。
|
||||
- `medianTiming` — [quantileTiming](#quantiletiming)别名。
|
||||
- `medianTimingWeighted` — [quantileTimingWeighted](#quantiletimingweighted)别名。
|
||||
- `medianTDigest` — [quantileTDigest](#quantiletdigest)别名。
|
||||
- `medianTDigestWeighted` — [quantileTDigestWeighted](#quantiletdigestweighted)别名。
|
||||
|
||||
**示例**
|
||||
|
||||
输入表:
|
||||
|
||||
``` text
|
||||
┌─val─┐
|
||||
│ 1 │
|
||||
│ 1 │
|
||||
│ 2 │
|
||||
│ 3 │
|
||||
└─────┘
|
||||
```
|
||||
|
||||
查询:
|
||||
|
||||
``` sql
|
||||
SELECT medianDeterministic(val, 1) FROM t
|
||||
```
|
||||
|
||||
结果:
|
||||
|
||||
``` text
|
||||
┌─medianDeterministic(val, 1)─┐
|
||||
│ 1.5 │
|
||||
└─────────────────────────────┘
|
||||
```
|
@ -0,0 +1,7 @@
|
||||
---
|
||||
toc_priority: 2
|
||||
---
|
||||
|
||||
## min {#agg_function-min}
|
||||
|
||||
计算最小值。
|
@ -0,0 +1,32 @@
|
||||
---
|
||||
toc_priority: 142
|
||||
---
|
||||
|
||||
# minMap {#agg_functions-minmap}
|
||||
|
||||
**语法**
|
||||
|
||||
```sql
|
||||
minMap(key, value)
|
||||
或
|
||||
minMap(Tuple(key, value))
|
||||
```
|
||||
|
||||
根据 `key` 数组中指定的键对 `value` 数组计算最小值。
|
||||
|
||||
传递 `key` 和 `value` 数组的元组与传递 `key` 和 `value` 的两个数组是同义的。
|
||||
要总计的每一行的 `key` 和 `value` (数组)元素的数量必须相同。
|
||||
返回两个数组组成的元组: 排好序的 `key` 和对应 `key` 的 `value` 计算值(最小值)。
|
||||
|
||||
**示例**
|
||||
|
||||
``` sql
|
||||
SELECT minMap(a, b)
|
||||
FROM values('a Array(Int32), b Array(Int64)', ([1, 2], [2, 2]), ([2, 3], [1, 1]))
|
||||
```
|
||||
|
||||
``` text
|
||||
┌─minMap(a, b)──────┐
|
||||
│ ([1,2,3],[2,1,1]) │
|
||||
└───────────────────┘
|
||||
```
|
@ -0,0 +1,65 @@
|
||||
---
|
||||
toc_priority: 200
|
||||
---
|
||||
|
||||
# quantile {#quantile}
|
||||
|
||||
计算数字序列的近似[分位数](https://en.wikipedia.org/wiki/Quantile)。
|
||||
此函数应用[水塘抽样][reservoir sampling] (https://en.wikipedia.org/wiki/Reservoir_sampling),使用高达8192的水塘大小和随机数发生器采样。
|
||||
结果是不确定的。要获得精确的分位数,使用 [quantileExact](../../../sql-reference/aggregate-functions/reference/quantileexact.md#quantileexact) 函数。
|
||||
当在一个查询中使用多个不同层次的 `quantile*` 时,内部状态不会被组合(即查询的工作效率低于组合情况)。在这种情况下,使用 [quantiles](../../../sql-reference/aggregate-functions/reference/quantiles.md#quantiles) 函数。
|
||||
|
||||
**语法**
|
||||
|
||||
``` sql
|
||||
quantile(level)(expr)
|
||||
```
|
||||
|
||||
别名: `median`。
|
||||
|
||||
**参数**
|
||||
|
||||
- `level` — 分位数层次。可选参数。从0到1的一个float类型的常量。我们推荐 `level` 值的范围为 `[0.01, 0.99]`。默认值:0.5。当 `level=0.5` 时,该函数计算 [中位数](https://en.wikipedia.org/wiki/Median)。
|
||||
- `expr` — 求值表达式,类型为数值类型[data types](../../../sql-reference/data-types/index.md#data_types), [Date](../../../sql-reference/data-types/date.md) 或 [DateTime](../../../sql-reference/data-types/datetime.md)。
|
||||
|
||||
**返回值**
|
||||
|
||||
- 指定层次的分位数。
|
||||
|
||||
类型:
|
||||
|
||||
- [Float64](../../../sql-reference/data-types/float.md) 用于数字数据类型输入。
|
||||
- [Date](../../../sql-reference/data-types/date.md) 如果输入值是 `Date` 类型。
|
||||
- [DateTime](../../../sql-reference/data-types/datetime.md) 如果输入值是 `DateTime` 类型。
|
||||
|
||||
**示例**
|
||||
|
||||
输入表:
|
||||
|
||||
``` text
|
||||
┌─val─┐
|
||||
│ 1 │
|
||||
│ 1 │
|
||||
│ 2 │
|
||||
│ 3 │
|
||||
└─────┘
|
||||
```
|
||||
|
||||
查询:
|
||||
|
||||
``` sql
|
||||
SELECT quantile(val) FROM t
|
||||
```
|
||||
|
||||
结果:
|
||||
|
||||
``` text
|
||||
┌─quantile(val)─┐
|
||||
│ 1.5 │
|
||||
└───────────────┘
|
||||
```
|
||||
|
||||
**参见**
|
||||
|
||||
- [中位数](../../../sql-reference/aggregate-functions/reference/median.md#median)
|
||||
- [分位数](../../../sql-reference/aggregate-functions/reference/quantiles.md#quantiles)
|
@ -0,0 +1,66 @@
|
||||
---
|
||||
toc_priority: 206
|
||||
---
|
||||
|
||||
# quantileDeterministic {#quantiledeterministic}
|
||||
|
||||
计算数字序列的近似[分位数](https://en.wikipedia.org/wiki/Quantile)。
|
||||
|
||||
此功能适用 [水塘抽样](https://en.wikipedia.org/wiki/Reservoir_sampling),使用储存器最大到8192和随机数发生器进行采样。 结果是非确定性的。 要获得精确的分位数,请使用 [quantileExact](../../../sql-reference/aggregate-functions/reference/quantileexact.md#quantileexact) 功能。
|
||||
|
||||
当在一个查询中使用多个不同层次的 `quantile*` 时,内部状态不会被组合(即查询的工作效率低于组合情况)。在这种情况下,使用[quantiles](../../../sql-reference/aggregate-functions/reference/quantiles.md#quantiles)功能。
|
||||
|
||||
**语法**
|
||||
|
||||
``` sql
|
||||
quantileDeterministic(level)(expr, determinator)
|
||||
```
|
||||
|
||||
别名: `medianDeterministic`。
|
||||
|
||||
**参数**
|
||||
|
||||
- `level` — 分位数层次。可选参数。从0到1的一个float类型的常量。 我们推荐 `level` 值的范围为 `[0.01, 0.99]`。默认值:0.5。 当 `level=0.5`时,该函数计算 [中位数](https://en.wikipedia.org/wiki/Median)。
|
||||
- `expr` — 求值表达式,类型为数值类型[data types](../../../sql-reference/data-types/index.md#data_types), [Date](../../../sql-reference/data-types/date.md) 或 [DateTime](../../../sql-reference/data-types/datetime.md)。
|
||||
- `determinator` — 一个数字,其hash被用来代替在水塘抽样中随机生成的数字,这样可以保证取样的确定性。你可以使用用户ID或者事件ID等任何正数,但是如果相同的 `determinator` 出现多次,那结果很可能不正确。
|
||||
**返回值**
|
||||
|
||||
- 指定层次的近似分位数。
|
||||
|
||||
类型:
|
||||
|
||||
- [Float64](../../../sql-reference/data-types/float.md) 用于数字数据类型输入。
|
||||
- [Date](../../../sql-reference/data-types/date.md) 如果输入值是 `Date` 类型。
|
||||
- [DateTime](../../../sql-reference/data-types/datetime.md) 如果输入值是 `DateTime` 类型。
|
||||
|
||||
**示例**
|
||||
|
||||
输入表:
|
||||
|
||||
``` text
|
||||
┌─val─┐
|
||||
│ 1 │
|
||||
│ 1 │
|
||||
│ 2 │
|
||||
│ 3 │
|
||||
└─────┘
|
||||
```
|
||||
|
||||
查询:
|
||||
|
||||
``` sql
|
||||
SELECT quantileDeterministic(val, 1) FROM t
|
||||
```
|
||||
|
||||
结果:
|
||||
|
||||
``` text
|
||||
┌─quantileDeterministic(val, 1)─┐
|
||||
│ 1.5 │
|
||||
└───────────────────────────────┘
|
||||
```
|
||||
|
||||
**参见**
|
||||
|
||||
- [中位数](../../../sql-reference/aggregate-functions/reference/median.md#median)
|
||||
- [分位数](../../../sql-reference/aggregate-functions/reference/quantiles.md#quantiles)
|
@ -0,0 +1,170 @@
|
||||
---
|
||||
toc_priority: 202
|
||||
---
|
||||
|
||||
# quantileExact {#quantileexact}
|
||||
|
||||
|
||||
准确计算数字序列的[分位数](https://en.wikipedia.org/wiki/Quantile)。
|
||||
|
||||
为了准确计算,所有输入的数据被合并为一个数组,并且部分的排序。因此该函数需要 `O(n)` 的内存,n为输入数据的个数。但是对于少量数据来说,该函数还是非常有效的。
|
||||
|
||||
当在一个查询中使用多个不同层次的 `quantile*` 时,内部状态不会被组合(即查询的工作效率低于组合情况)。在这种情况下,使用 [quantiles](../../../sql-reference/aggregate-functions/reference/quantiles.md#quantiles) 函数。
|
||||
|
||||
**语法**
|
||||
|
||||
``` sql
|
||||
quantileExact(level)(expr)
|
||||
```
|
||||
|
||||
别名: `medianExact`。
|
||||
|
||||
**参数**
|
||||
|
||||
- `level` — 分位数层次。可选参数。从0到1的一个float类型的常量。我们推荐 `level` 值的范围为 `[0.01, 0.99]`。默认值:0.5。当 `level=0.5` 时,该函数计算[中位数](https://en.wikipedia.org/wiki/Median)。
|
||||
- `expr` — 求值表达式,类型为数值类型[data types](../../../sql-reference/data-types/index.md#data_types), [Date](../../../sql-reference/data-types/date.md) 或 [DateTime](../../../sql-reference/data-types/datetime.md)。
|
||||
|
||||
**返回值**
|
||||
|
||||
- 指定层次的分位数。
|
||||
|
||||
|
||||
类型:
|
||||
|
||||
- [Float64](../../../sql-reference/data-types/float.md) 对于数字数据类型输入。
|
||||
- [日期](../../../sql-reference/data-types/date.md) 如果输入值具有 `Date` 类型。
|
||||
- [日期时间](../../../sql-reference/data-types/datetime.md) 如果输入值具有 `DateTime` 类型。
|
||||
|
||||
**示例**
|
||||
|
||||
查询:
|
||||
|
||||
``` sql
|
||||
SELECT quantileExact(number) FROM numbers(10)
|
||||
```
|
||||
|
||||
结果:
|
||||
|
||||
``` text
|
||||
┌─quantileExact(number)─┐
|
||||
│ 5 │
|
||||
└───────────────────────┘
|
||||
```
|
||||
|
||||
# quantileExactLow {#quantileexactlow}
|
||||
|
||||
和 `quantileExact` 相似, 准确计算数字序列的[分位数](https://en.wikipedia.org/wiki/Quantile)。
|
||||
|
||||
为了准确计算,所有输入的数据被合并为一个数组,并且全排序。这排序[算法](https://en.cppreference.com/w/cpp/algorithm/sort)的复杂度是 `O(N·log(N))`, 其中 `N = std::distance(first, last)` 比较。
|
||||
|
||||
返回值取决于分位数级别和所选取的元素数量,即如果级别是 0.5, 函数返回偶数元素的低位中位数,奇数元素的中位数。中位数计算类似于 python 中使用的[median_low](https://docs.python.org/3/library/statistics.html#statistics.median_low)的实现。
|
||||
|
||||
对于所有其他级别, 返回 `level * size_of_array` 值所对应的索引的元素值。
|
||||
|
||||
例如:
|
||||
|
||||
``` sql
|
||||
SELECT quantileExactLow(0.1)(number) FROM numbers(10)
|
||||
|
||||
┌─quantileExactLow(0.1)(number)─┐
|
||||
│ 1 │
|
||||
└───────────────────────────────┘
|
||||
```
|
||||
|
||||
当在一个查询中使用多个不同层次的 `quantile*` 时,内部状态不会被组合(即查询的工作效率低于组合情况)。在这种情况下,使用 [quantiles](../../../sql-reference/aggregate-functions/reference/quantiles.md#quantiles) 函数。
|
||||
|
||||
**语法**
|
||||
|
||||
``` sql
|
||||
quantileExactLow(level)(expr)
|
||||
```
|
||||
|
||||
别名: `medianExactLow`。
|
||||
|
||||
**参数**
|
||||
|
||||
- `level` — 分位数层次。可选参数。从0到1的一个float类型的常量。我们推荐 `level` 值的范围为 `[0.01, 0.99]`。默认值:0.5。当 `level=0.5` 时,该函数计算 [中位数](https://en.wikipedia.org/wiki/Median)。
|
||||
- `expr` — — 求值表达式,类型为数值类型[data types](../../../sql-reference/data-types/index.md#data_types), [Date](../../../sql-reference/data-types/date.md) 或 [DateTime](../../../sql-reference/data-types/datetime.md)。
|
||||
|
||||
**返回值**
|
||||
|
||||
- 指定层次的分位数。
|
||||
|
||||
类型:
|
||||
|
||||
- [Float64](../../../sql-reference/data-types/float.md) 用于数字数据类型输入。
|
||||
- [Date](../../../sql-reference/data-types/date.md) 如果输入值是 `Date` 类型。
|
||||
- [DateTime](../../../sql-reference/data-types/datetime.md) 如果输入值是 `DateTime` 类型。
|
||||
|
||||
**示例**
|
||||
|
||||
查询:
|
||||
|
||||
``` sql
|
||||
SELECT quantileExactLow(number) FROM numbers(10)
|
||||
```
|
||||
|
||||
结果:
|
||||
|
||||
``` text
|
||||
┌─quantileExactLow(number)─┐
|
||||
│ 4 │
|
||||
└──────────────────────────┘
|
||||
```
|
||||
|
||||
# quantileExactHigh {#quantileexacthigh}
|
||||
|
||||
和 `quantileExact` 相似, 准确计算数字序列的[分位数](https://en.wikipedia.org/wiki/Quantile)。
|
||||
|
||||
为了准确计算,所有输入的数据被合并为一个数组,并且全排序。这排序[算法](https://en.cppreference.com/w/cpp/algorithm/sort)的复杂度是 `O(N·log(N))`, 其中 `N = std::distance(first, last)` 比较。
|
||||
|
||||
返回值取决于分位数级别和所选取的元素数量,即如果级别是 0.5, 函数返回偶数元素的低位中位数,奇数元素的中位数。中位数计算类似于 python 中使用的[median_high](https://docs.python.org/3/library/statistics.html#statistics.median_high)的实现。
|
||||
|
||||
对于所有其他级别, 返回 `level * size_of_array` 值所对应的索引的元素值。
|
||||
|
||||
这个实现与当前的 `quantileExact` 实现完全相似。
|
||||
|
||||
当在一个查询中使用多个不同层次的 `quantile*` 时,内部状态不会被组合(即查询的工作效率低于组合情况)。在这种情况下,使用 [quantiles](../../../sql-reference/aggregate-functions/reference/quantiles.md#quantiles) 函数。
|
||||
|
||||
**语法**
|
||||
|
||||
``` sql
|
||||
quantileExactHigh(level)(expr)
|
||||
```
|
||||
|
||||
别名: `medianExactHigh`。
|
||||
|
||||
**参数**
|
||||
|
||||
- `level` — 分位数层次。可选参数。从0到1的一个float类型的常量。我们推荐 `level` 值的范围为 `[0.01, 0.99]`。默认值:0.5。当 `level=0.5` 时,该函数计算 [中位数](https://en.wikipedia.org/wiki/Median)。
|
||||
- `expr` — — 求值表达式,类型为数值类型[data types](../../../sql-reference/data-types/index.md#data_types), [Date](../../../sql-reference/data-types/date.md) 或 [DateTime](../../../sql-reference/data-types/datetime.md)。
|
||||
|
||||
**返回值**
|
||||
|
||||
- 指定层次的分位数。
|
||||
|
||||
类型:
|
||||
|
||||
- [Float64](../../../sql-reference/data-types/float.md) 用于数字数据类型输入。
|
||||
- [Date](../../../sql-reference/data-types/date.md) 如果输入值是 `Date` 类型。
|
||||
- [DateTime](../../../sql-reference/data-types/datetime.md) 如果输入值是 `DateTime` 类型。
|
||||
|
||||
**示例**
|
||||
|
||||
查询:
|
||||
|
||||
``` sql
|
||||
SELECT quantileExactHigh(number) FROM numbers(10)
|
||||
```
|
||||
|
||||
结果:
|
||||
|
||||
``` text
|
||||
┌─quantileExactHigh(number)─┐
|
||||
│ 5 │
|
||||
└───────────────────────────┘
|
||||
```
|
||||
**参见**
|
||||
|
||||
- [中位数](../../../sql-reference/aggregate-functions/reference/median.md#median)
|
||||
- [分位数](../../../sql-reference/aggregate-functions/reference/quantiles.md#quantiles)
|
@ -0,0 +1,66 @@
|
||||
---
|
||||
toc_priority: 203
|
||||
---
|
||||
|
||||
# quantileExactWeighted {#quantileexactweighted}
|
||||
|
||||
考虑到每个元素的权重,然后准确计算数值序列的[分位数](https://en.wikipedia.org/wiki/Quantile)。
|
||||
|
||||
为了准确计算,所有输入的数据被合并为一个数组,并且部分的排序。每个输入值需要根据 `weight` 计算求和。该算法使用哈希表。正因为如此,在数据重复较多的时候使用的内存是少于[quantileExact](../../../sql-reference/aggregate-functions/reference/quantileexact.md#quantileexact)的。 您可以使用此函数代替 `quantileExact` 并指定`weight`为 1 。
|
||||
|
||||
当在一个查询中使用多个不同层次的 `quantile*` 时,内部状态不会被组合(即查询的工作效率低于组合情况)。在这种情况下,使用 [quantiles](../../../sql-reference/aggregate-functions/reference/quantiles.md#quantiles) 函数。
|
||||
|
||||
**语法**
|
||||
|
||||
``` sql
|
||||
quantileExactWeighted(level)(expr, weight)
|
||||
```
|
||||
|
||||
别名: `medianExactWeighted`。
|
||||
|
||||
**参数**
|
||||
- `level` — 分位数层次。可选参数。从0到1的一个float类型的常量。我们推荐 `level` 值的范围为 `[0.01, 0.99]`. 默认值:0.5。当 `level=0.5` 时,该函数计算 [中位数](https://en.wikipedia.org/wiki/Median)。
|
||||
- `expr` — 求值表达式,类型为数值类型[data types](../../../sql-reference/data-types/index.md#data_types), [Date](../../../sql-reference/data-types/date.md) 或 [DateTime](../../../sql-reference/data-types/datetime.md)。
|
||||
- `weight` — 权重序列。 权重是一个数据出现的数值。
|
||||
|
||||
**返回值**
|
||||
|
||||
- 指定层次的分位数。
|
||||
|
||||
类型:
|
||||
|
||||
- [Float64](../../../sql-reference/data-types/float.md) 对于数字数据类型输入。
|
||||
- [日期](../../../sql-reference/data-types/date.md) 如果输入值具有 `Date` 类型。
|
||||
- [日期时间](../../../sql-reference/data-types/datetime.md) 如果输入值具有 `DateTime` 类型。
|
||||
|
||||
**示例**
|
||||
|
||||
输入表:
|
||||
|
||||
``` text
|
||||
┌─n─┬─val─┐
|
||||
│ 0 │ 3 │
|
||||
│ 1 │ 2 │
|
||||
│ 2 │ 1 │
|
||||
│ 5 │ 4 │
|
||||
└───┴─────┘
|
||||
```
|
||||
|
||||
查询:
|
||||
|
||||
``` sql
|
||||
SELECT quantileExactWeighted(n, val) FROM t
|
||||
```
|
||||
|
||||
结果:
|
||||
|
||||
``` text
|
||||
┌─quantileExactWeighted(n, val)─┐
|
||||
│ 1 │
|
||||
└───────────────────────────────┘
|
||||
```
|
||||
|
||||
**参见**
|
||||
|
||||
- [中位数](../../../sql-reference/aggregate-functions/reference/median.md#median)
|
||||
- [分位数](../../../sql-reference/aggregate-functions/reference/quantiles.md#quantiles)
|
@ -0,0 +1,12 @@
|
||||
---
|
||||
toc_priority: 201
|
||||
---
|
||||
|
||||
# quantiles {#quantiles}
|
||||
|
||||
**语法**
|
||||
``` sql
|
||||
quantiles(level1, level2, …)(x)
|
||||
```
|
||||
|
||||
所有分位数函数(quantile)也有相应的分位数(quantiles)函数: `quantiles`, `quantilesDeterministic`, `quantilesTiming`, `quantilesTimingWeighted`, `quantilesExact`, `quantilesExactWeighted`, `quantilesTDigest`。 这些函数一次计算所列的级别的所有分位数, 并返回结果值的数组。
|
@ -0,0 +1,57 @@
|
||||
---
|
||||
toc_priority: 207
|
||||
---
|
||||
|
||||
# quantileTDigest {#quantiletdigest}
|
||||
|
||||
使用[t-digest](https://github.com/tdunning/t-digest/blob/master/docs/t-digest-paper/histo.pdf) 算法计算数字序列近似[分位数](https://en.wikipedia.org/wiki/Quantile)。
|
||||
|
||||
最大误差为1%。 内存消耗为 `log(n)`,这里 `n` 是值的个数。 结果取决于运行查询的顺序,并且是不确定的。
|
||||
|
||||
该函数的性能低于 [quantile](../../../sql-reference/aggregate-functions/reference/quantile.md#quantile) 或 [quantileTiming](../../../sql-reference/aggregate-functions/reference/quantiletiming.md#quantiletiming) 的性能。 从状态大小和精度的比值来看,这个函数比 `quantile` 更优秀。
|
||||
|
||||
当在一个查询中使用多个不同层次的 `quantile*` 时,内部状态不会被组合(即查询的工作效率低于组合情况)。在这种情况下,使用 [quantiles](../../../sql-reference/aggregate-functions/reference/quantiles.md#quantiles) 函数。
|
||||
|
||||
**语法**
|
||||
|
||||
``` sql
|
||||
quantileTDigest(level)(expr)
|
||||
```
|
||||
|
||||
别名: `medianTDigest`。
|
||||
|
||||
**参数**
|
||||
|
||||
- `level` — 分位数层次。可选参数。从0到1的一个float类型的常量。我们推荐 `level` 值的范围为 `[0.01, 0.99]` 。默认值:0.5。当 `level=0.5` 时,该函数计算 [中位数](https://en.wikipedia.org/wiki/Median)。
|
||||
- `expr` — 求值表达式,类型为数值类型[data types](../../../sql-reference/data-types/index.md#data_types), [Date](../../../sql-reference/data-types/date.md) 或 [DateTime](../../../sql-reference/data-types/datetime.md)。
|
||||
|
||||
**返回值**
|
||||
|
||||
- 指定层次的分位数。
|
||||
|
||||
类型:
|
||||
|
||||
- [Float64](../../../sql-reference/data-types/float.md) 用于数字数据类型输入。
|
||||
- [Date](../../../sql-reference/data-types/date.md) 如果输入值是 `Date` 类型。
|
||||
- [DateTime](../../../sql-reference/data-types/datetime.md) 如果输入值是 `DateTime` 类型。
|
||||
|
||||
**示例**
|
||||
|
||||
查询:
|
||||
|
||||
``` sql
|
||||
SELECT quantileTDigest(number) FROM numbers(10)
|
||||
```
|
||||
|
||||
结果:
|
||||
|
||||
``` text
|
||||
┌─quantileTDigest(number)─┐
|
||||
│ 4.5 │
|
||||
└─────────────────────────┘
|
||||
```
|
||||
|
||||
**参见**
|
||||
|
||||
- [中位数](../../../sql-reference/aggregate-functions/reference/median.md#median)
|
||||
- [分位数](../../../sql-reference/aggregate-functions/reference/quantiles.md#quantiles)
|
@ -0,0 +1,58 @@
|
||||
---
|
||||
toc_priority: 208
|
||||
---
|
||||
|
||||
# quantileTDigestWeighted {#quantiletdigestweighted}
|
||||
|
||||
使用[t-digest](https://github.com/tdunning/t-digest/blob/master/docs/t-digest-paper/histo.pdf) 算法计算数字序列近似[分位数](https://en.wikipedia.org/wiki/Quantile)。该函数考虑了每个序列成员的权重。最大误差为1%。 内存消耗为 `log(n)`,这里 `n` 是值的个数。
|
||||
|
||||
该函数的性能低于 [quantile](../../../sql-reference/aggregate-functions/reference/quantile.md#quantile) 或 [quantileTiming](../../../sql-reference/aggregate-functions/reference/quantiletiming.md#quantiletiming) 的性能。 从状态大小和精度的比值来看,这个函数比 `quantile` 更优秀。
|
||||
|
||||
结果取决于运行查询的顺序,并且是不确定的。
|
||||
|
||||
当在一个查询中使用多个不同层次的 `quantile*` 时,内部状态不会被组合(即查询的工作效率低于组合情况)。在这种情况下,使用 [quantiles](../../../sql-reference/aggregate-functions/reference/quantiles.md#quantiles) 函数。
|
||||
|
||||
**语法**
|
||||
|
||||
``` sql
|
||||
quantileTDigestWeighted(level)(expr, weight)
|
||||
```
|
||||
|
||||
别名: `medianTDigestWeighted`。
|
||||
|
||||
**参数**
|
||||
|
||||
- `level` — 分位数层次。可选参数。从0到1的一个float类型的常量。我们推荐 `level` 值的范围为 `[0.01, 0.99]` 。默认值:0.5。 当 `level=0.5` 时,该函数计算 [中位数](https://en.wikipedia.org/wiki/Median)。
|
||||
- `expr` — 求值表达式,类型为数值类型[data types](../../../sql-reference/data-types/index.md#data_types), [Date](../../../sql-reference/data-types/date.md) 或 [DateTime](../../../sql-reference/data-types/datetime.md)。
|
||||
- `weight` — 权重序列。 权重是一个数据出现的数值。
|
||||
|
||||
**返回值**
|
||||
|
||||
- 指定层次的分位数。
|
||||
|
||||
类型:
|
||||
|
||||
- [Float64](../../../sql-reference/data-types/float.md) 用于数字数据类型输入。
|
||||
- [Date](../../../sql-reference/data-types/date.md) 如果输入值是 `Date` 类型。
|
||||
- [DateTime](../../../sql-reference/data-types/datetime.md) 如果输入值是 `DateTime` 类型。
|
||||
|
||||
**示例**
|
||||
|
||||
查询:
|
||||
|
||||
``` sql
|
||||
SELECT quantileTDigestWeighted(number, 1) FROM numbers(10)
|
||||
```
|
||||
|
||||
结果:
|
||||
|
||||
``` text
|
||||
┌─quantileTDigestWeighted(number, 1)─┐
|
||||
│ 4.5 │
|
||||
└────────────────────────────────────┘
|
||||
```
|
||||
|
||||
**参见**
|
||||
|
||||
- [中位数](../../../sql-reference/aggregate-functions/reference/median.md#median)
|
||||
- [分位数](../../../sql-reference/aggregate-functions/reference/quantiles.md#quantiles)
|
@ -0,0 +1,86 @@
|
||||
---
|
||||
toc_priority: 204
|
||||
---
|
||||
|
||||
# quantileTiming {#quantiletiming}
|
||||
|
||||
使用确定的精度计算数字数据序列的[分位数](https://en.wikipedia.org/wiki/Quantile)。
|
||||
|
||||
结果是确定性的(它不依赖于查询处理顺序)。该函数针对描述加载网页时间或后端响应时间等分布的序列进行了优化。
|
||||
|
||||
当在一个查询中使用多个不同层次的 `quantile*` 时,内部状态不会被组合(即查询的工作效率低于组合情况)。在这种情况下,使用[quantiles](../../../sql-reference/aggregate-functions/reference/quantiles.md#quantiles)函数。
|
||||
|
||||
**语法**
|
||||
|
||||
``` sql
|
||||
quantileTiming(level)(expr)
|
||||
```
|
||||
|
||||
别名: `medianTiming`。
|
||||
|
||||
**参数**
|
||||
|
||||
- `level` — 分位数层次。可选参数。从0到1的一个float类型的常量。我们推荐 `level` 值的范围为 `[0.01, 0.99]` 。默认值:0.5。当 `level=0.5` 时,该函数计算 [中位数](https://en.wikipedia.org/wiki/Median)。
|
||||
- `expr` — 求值[表达式](../../../sql-reference/syntax.md#syntax-expressions) 返回 [Float\*](../../../sql-reference/data-types/float.md) 类型数值。
|
||||
|
||||
- 如果输入负值,那结果是不可预期的。
|
||||
- 如果输入值大于30000(页面加载时间大于30s),那我们假设为30000。
|
||||
|
||||
**精度**
|
||||
|
||||
计算是准确的,如果:
|
||||
|
||||
|
||||
- 值的总数不超过5670。
|
||||
- 总数值超过5670,但页面加载时间小于1024ms。
|
||||
|
||||
否则,计算结果将四舍五入到16毫秒的最接近倍数。
|
||||
|
||||
!!! note "注"
|
||||
对于计算页面加载时间分位数, 此函数比[quantile](../../../sql-reference/aggregate-functions/reference/quantile.md#quantile)更有效和准确。
|
||||
|
||||
**返回值**
|
||||
|
||||
- 指定层次的分位数。
|
||||
|
||||
类型: `Float32`。
|
||||
|
||||
!!! note "注"
|
||||
如果没有值传递给函数(当使用 `quantileTimingIf`), [NaN](../../../sql-reference/data-types/float.md#data_type-float-nan-inf)被返回。 这样做的目的是将这些案例与导致零的案例区分开来。 参见 [ORDER BY clause](../../../sql-reference/statements/select/order-by.md#select-order-by) 对于 `NaN` 值排序注意事项。
|
||||
|
||||
**示例**
|
||||
|
||||
输入表:
|
||||
|
||||
``` text
|
||||
┌─response_time─┐
|
||||
│ 72 │
|
||||
│ 112 │
|
||||
│ 126 │
|
||||
│ 145 │
|
||||
│ 104 │
|
||||
│ 242 │
|
||||
│ 313 │
|
||||
│ 168 │
|
||||
│ 108 │
|
||||
└───────────────┘
|
||||
```
|
||||
|
||||
查询:
|
||||
|
||||
``` sql
|
||||
SELECT quantileTiming(response_time) FROM t
|
||||
```
|
||||
|
||||
结果:
|
||||
|
||||
``` text
|
||||
┌─quantileTiming(response_time)─┐
|
||||
│ 126 │
|
||||
└───────────────────────────────┘
|
||||
```
|
||||
|
||||
**参见**
|
||||
|
||||
- [中位数](../../../sql-reference/aggregate-functions/reference/median.md#median)
|
||||
- [分位数](../../../sql-reference/aggregate-functions/reference/quantiles.md#quantiles)
|
@ -0,0 +1,118 @@
|
||||
---
|
||||
toc_priority: 205
|
||||
---
|
||||
|
||||
# quantileTimingWeighted {#quantiletimingweighted}
|
||||
|
||||
根据每个序列成员的权重,使用确定的精度计算数字序列的[分位数](https://en.wikipedia.org/wiki/Quantile)。
|
||||
|
||||
结果是确定性的(它不依赖于查询处理顺序)。该函数针对描述加载网页时间或后端响应时间等分布的序列进行了优化。
|
||||
|
||||
当在一个查询中使用多个不同层次的 `quantile*` 时,内部状态不会被组合(即查询的工作效率低于组合情况)。在这种情况下,使用[quantiles](../../../sql-reference/aggregate-functions/reference/quantiles.md#quantiles)功能。
|
||||
|
||||
**语法**
|
||||
|
||||
``` sql
|
||||
quantileTimingWeighted(level)(expr, weight)
|
||||
```
|
||||
|
||||
别名: `medianTimingWeighted`。
|
||||
|
||||
**参数**
|
||||
|
||||
- `level` — 分位数层次。可选参数。从0到1的一个float类型的常量。我们推荐 `level` 值的范围为 `[0.01, 0.99]` 。默认值:0.5。当 `level=0.5` 时,该函数计算 [中位数](https://en.wikipedia.org/wiki/Median)。
|
||||
- `expr` — 求值[表达式](../../../sql-reference/syntax.md#syntax-expressions) 返回 [Float\*](../../../sql-reference/data-types/float.md) 类型数值。
|
||||
|
||||
- 如果输入负值,那结果是不可预期的。
|
||||
- 如果输入值大于30000(页面加载时间大于30s),那我们假设为30000。
|
||||
|
||||
- `weight` — 权重序列。 权重是一个数据出现的数值。
|
||||
|
||||
**精度**
|
||||
|
||||
计算是准确的,如果:
|
||||
|
||||
|
||||
- 值的总数不超过5670。
|
||||
- 总数值超过5670,但页面加载时间小于1024ms。
|
||||
|
||||
否则,计算结果将四舍五入到16毫秒的最接近倍数。
|
||||
|
||||
!!! note "注"
|
||||
对于计算页面加载时间分位数, 此函数比[quantile](../../../sql-reference/aggregate-functions/reference/quantile.md#quantile)更有效和准确。
|
||||
|
||||
**返回值**
|
||||
|
||||
- 指定层次的分位数。
|
||||
|
||||
类型: `Float32`。
|
||||
|
||||
!!! note "注"
|
||||
如果没有值传递给函数(当使用 `quantileTimingIf`), [NaN](../../../sql-reference/data-types/float.md#data_type-float-nan-inf)被返回。 这样做的目的是将这些案例与导致零的案例区分开来。 参见 [ORDER BY clause](../../../sql-reference/statements/select/order-by.md#select-order-by) 对于 `NaN` 值排序注意事项。
|
||||
|
||||
**示例**
|
||||
|
||||
输入表:
|
||||
|
||||
``` text
|
||||
┌─response_time─┬─weight─┐
|
||||
│ 68 │ 1 │
|
||||
│ 104 │ 2 │
|
||||
│ 112 │ 3 │
|
||||
│ 126 │ 2 │
|
||||
│ 138 │ 1 │
|
||||
│ 162 │ 1 │
|
||||
└───────────────┴────────┘
|
||||
```
|
||||
|
||||
查询:
|
||||
|
||||
``` sql
|
||||
SELECT quantileTimingWeighted(response_time, weight) FROM t
|
||||
```
|
||||
|
||||
结果:
|
||||
|
||||
``` text
|
||||
┌─quantileTimingWeighted(response_time, weight)─┐
|
||||
│ 112 │
|
||||
└───────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
# quantilesTimingWeighted {#quantilestimingweighted}
|
||||
|
||||
类似于 `quantileTimingWeighted` , 但接受多个分位数层次参数,并返回一个由这些分位数值组成的数组。
|
||||
|
||||
**示例**
|
||||
|
||||
输入表:
|
||||
|
||||
``` text
|
||||
┌─response_time─┬─weight─┐
|
||||
│ 68 │ 1 │
|
||||
│ 104 │ 2 │
|
||||
│ 112 │ 3 │
|
||||
│ 126 │ 2 │
|
||||
│ 138 │ 1 │
|
||||
│ 162 │ 1 │
|
||||
└───────────────┴────────┘
|
||||
```
|
||||
|
||||
查询:
|
||||
|
||||
``` sql
|
||||
SELECT quantilesTimingWeighted(0,5, 0.99)(response_time, weight) FROM t
|
||||
```
|
||||
|
||||
结果:
|
||||
|
||||
``` text
|
||||
┌─quantilesTimingWeighted(0.5, 0.99)(response_time, weight)─┐
|
||||
│ [112,162] │
|
||||
└───────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
**参见**
|
||||
|
||||
- [中位数](../../../sql-reference/aggregate-functions/reference/median.md#median)
|
||||
- [分位数](../../../sql-reference/aggregate-functions/reference/quantiles.md#quantiles)
|
@ -0,0 +1,53 @@
|
||||
## rankCorr {#agg_function-rankcorr}
|
||||
|
||||
计算等级相关系数。
|
||||
|
||||
**语法**
|
||||
|
||||
``` sql
|
||||
rankCorr(x, y)
|
||||
```
|
||||
|
||||
**参数**
|
||||
|
||||
- `x` — 任意值。[Float32](../../../sql-reference/data-types/float.md#float32-float64) 或 [Float64](../../../sql-reference/data-types/float.md#float32-float64)。
|
||||
- `y` — 任意值。[Float32](../../../sql-reference/data-types/float.md#float32-float64) 或 [Float64](../../../sql-reference/data-types/float.md#float32-float64)。
|
||||
|
||||
**返回值**
|
||||
|
||||
- Returns a rank correlation coefficient of the ranks of x and y. The value of the correlation coefficient ranges from -1 to +1. If less than two arguments are passed, the function will return an exception. The value close to +1 denotes a high linear relationship, and with an increase of one random variable, the second random variable also increases. The value close to -1 denotes a high linear relationship, and with an increase of one random variable, the second random variable decreases. The value close or equal to 0 denotes no relationship between the two random variables.
|
||||
|
||||
类型: [Float64](../../../sql-reference/data-types/float.md#float32-float64)。
|
||||
|
||||
**示例**
|
||||
|
||||
查询:
|
||||
|
||||
``` sql
|
||||
SELECT rankCorr(number, number) FROM numbers(100);
|
||||
```
|
||||
|
||||
结果:
|
||||
|
||||
``` text
|
||||
┌─rankCorr(number, number)─┐
|
||||
│ 1 │
|
||||
└──────────────────────────┘
|
||||
```
|
||||
|
||||
查询:
|
||||
|
||||
``` sql
|
||||
SELECT roundBankers(rankCorr(exp(number), sin(number)), 3) FROM numbers(100);
|
||||
```
|
||||
|
||||
结果:
|
||||
|
||||
``` text
|
||||
┌─roundBankers(rankCorr(exp(number), sin(number)), 3)─┐
|
||||
│ -0.037 │
|
||||
└─────────────────────────────────────────────────────┘
|
||||
```
|
||||
**参见**
|
||||
|
||||
- 斯皮尔曼等级相关系数[Spearman's rank correlation coefficient](https://en.wikipedia.org/wiki/Spearman%27s_rank_correlation_coefficient)
|
@ -0,0 +1,44 @@
|
||||
---
|
||||
toc_priority: 220
|
||||
---
|
||||
|
||||
# simpleLinearRegression {#simplelinearregression}
|
||||
|
||||
执行简单(一维)线性回归。
|
||||
|
||||
**语法**
|
||||
|
||||
``` sql
|
||||
simpleLinearRegression(x, y)
|
||||
```
|
||||
|
||||
**参数**
|
||||
|
||||
- `x` — x轴。
|
||||
- `y` — y轴。
|
||||
|
||||
**返回值**
|
||||
|
||||
符合`y = a*x + b`的常量 `(a, b)` 。
|
||||
|
||||
**示例**
|
||||
|
||||
``` sql
|
||||
SELECT arrayReduce('simpleLinearRegression', [0, 1, 2, 3], [0, 1, 2, 3])
|
||||
```
|
||||
|
||||
``` text
|
||||
┌─arrayReduce('simpleLinearRegression', [0, 1, 2, 3], [0, 1, 2, 3])─┐
|
||||
│ (1,0) │
|
||||
└───────────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
``` sql
|
||||
SELECT arrayReduce('simpleLinearRegression', [0, 1, 2, 3], [3, 4, 5, 6])
|
||||
```
|
||||
|
||||
``` text
|
||||
┌─arrayReduce('simpleLinearRegression', [0, 1, 2, 3], [3, 4, 5, 6])─┐
|
||||
│ (1,3) │
|
||||
└───────────────────────────────────────────────────────────────────┘
|
||||
```
|
@ -0,0 +1,27 @@
|
||||
---
|
||||
toc_priority: 150
|
||||
---
|
||||
|
||||
# skewPop {#skewpop}
|
||||
|
||||
计算给定序列的 [偏度] (https://en.wikipedia.org/wiki/Skewness)。
|
||||
|
||||
**语法**
|
||||
|
||||
``` sql
|
||||
skewPop(expr)
|
||||
```
|
||||
|
||||
**参数**
|
||||
|
||||
`expr` — [表达式](../../../sql-reference/syntax.md#syntax-expressions) 返回一个数字。
|
||||
|
||||
**返回值**
|
||||
|
||||
给定分布的偏度。类型 — [Float64](../../../sql-reference/data-types/float.md)
|
||||
|
||||
**示例**
|
||||
|
||||
``` sql
|
||||
SELECT skewPop(value) FROM series_with_value_column;
|
||||
```
|
@ -0,0 +1,29 @@
|
||||
---
|
||||
toc_priority: 151
|
||||
---
|
||||
|
||||
# skewSamp {#skewsamp}
|
||||
|
||||
计算给定序列的 [样本偏度] (https://en.wikipedia.org/wiki/Skewness)。
|
||||
|
||||
如果传递的值形成其样本,它代表了一个随机变量的偏度的无偏估计。
|
||||
|
||||
**语法**
|
||||
|
||||
``` sql
|
||||
skewSamp(expr)
|
||||
```
|
||||
|
||||
**参数**
|
||||
|
||||
`expr` — [表达式](../../../sql-reference/syntax.md#syntax-expressions) 返回一个数字。
|
||||
|
||||
**返回值**
|
||||
|
||||
给定分布的偏度。 类型 — [Float64](../../../sql-reference/data-types/float.md)。 如果 `n <= 1` (`n` 样本的大小), 函数返回 `nan`。
|
||||
|
||||
**示例**
|
||||
|
||||
``` sql
|
||||
SELECT skewSamp(value) FROM series_with_value_column;
|
||||
```
|
@ -0,0 +1,10 @@
|
||||
---
|
||||
toc_priority: 30
|
||||
---
|
||||
|
||||
# stddevPop {#stddevpop}
|
||||
|
||||
结果等于 [varPop] (../../../sql-reference/aggregate-functions/reference/varpop.md)的平方根。
|
||||
|
||||
!!! note "注"
|
||||
该函数使用数值不稳定的算法。 如果你需要 [数值稳定性](https://en.wikipedia.org/wiki/Numerical_stability) 在计算中,使用 `stddevPopStable` 函数。 它的工作速度较慢,但提供较低的计算错误。
|
@ -0,0 +1,10 @@
|
||||
---
|
||||
toc_priority: 31
|
||||
---
|
||||
|
||||
# stddevSamp {#stddevsamp}
|
||||
|
||||
结果等于 [varSamp] (../../../sql-reference/aggregate-functions/reference/varsamp.md)的平方根。
|
||||
|
||||
!!! note "注"
|
||||
该函数使用数值不稳定的算法。 如果你需要 [数值稳定性](https://en.wikipedia.org/wiki/Numerical_stability) 在计算中,使用 `stddevSampStable` 函数。 它的工作速度较慢,但提供较低的计算错误。
|
@ -0,0 +1,77 @@
|
||||
---
|
||||
toc_priority: 221
|
||||
---
|
||||
|
||||
# stochasticLinearRegression {#agg_functions-stochasticlinearregression}
|
||||
|
||||
该函数实现随机线性回归。 它支持自定义参数的学习率、L2正则化系数、微批,并且具有少量更新权重的方法([Adam](https://en.wikipedia.org/wiki/Stochastic_gradient_descent#Adam) (默认), [simple SGD](https://en.wikipedia.org/wiki/Stochastic_gradient_descent), [Momentum](https://en.wikipedia.org/wiki/Stochastic_gradient_descent#Momentum), [Nesterov](https://mipt.ru/upload/medialibrary/d7e/41-91.pdf))。
|
||||
|
||||
### 参数 {#agg_functions-stochasticlinearregression-parameters}
|
||||
|
||||
有4个可自定义的参数。它们按顺序传递给函数,但不需要传递所有四个参数——将使用默认值,然而好的模型需要一些参数调整。
|
||||
|
||||
**语法**
|
||||
|
||||
``` sql
|
||||
stochasticLinearRegression(1.0, 1.0, 10, 'SGD')
|
||||
```
|
||||
|
||||
1. `learning rate` 当执行梯度下降步骤时,步长的系数。 过大的学习率可能会导致模型的权重无限大。 默认值为 `0.00001`。
|
||||
2. `l2 regularization coefficient` 这可能有助于防止过度拟合。 默认值为 `0.1`。
|
||||
3. `mini-batch size` 设置元素的数量,这些元素将被计算和求和以执行梯度下降的一个步骤。纯随机下降使用一个元素,但是具有小批量(约10个元素)使梯度步骤更稳定。 默认值为 `15`。
|
||||
4. `method for updating weights` 他们是: `Adam` (默认情况下), `SGD`, `Momentum`, `Nesterov`。`Momentum` 和 `Nesterov` 需要更多的计算和内存,但是它们恰好在收敛速度和随机梯度方法的稳定性方面是有用的。
|
||||
|
||||
### 使用 {#agg_functions-stochasticlinearregression-usage}
|
||||
|
||||
`stochasticLinearRegression` 用于两个步骤:拟合模型和预测新数据。 为了拟合模型并保存其状态以供以后使用,我们使用 `-State` 组合器,它基本上保存了状态(模型权重等)。
|
||||
为了预测我们使用函数 [evalMLMethod](../../../sql-reference/functions/machine-learning-functions.md#machine_learning_methods-evalmlmethod), 这需要一个状态作为参数以及特征来预测。
|
||||
|
||||
<a name="stochasticlinearregression-usage-fitting"></a>
|
||||
|
||||
**1.** 拟合
|
||||
|
||||
可以使用这种查询。
|
||||
|
||||
``` sql
|
||||
CREATE TABLE IF NOT EXISTS train_data
|
||||
(
|
||||
param1 Float64,
|
||||
param2 Float64,
|
||||
target Float64
|
||||
) ENGINE = Memory;
|
||||
|
||||
CREATE TABLE your_model ENGINE = Memory AS SELECT
|
||||
stochasticLinearRegressionState(0.1, 0.0, 5, 'SGD')(target, param1, param2)
|
||||
AS state FROM train_data;
|
||||
```
|
||||
|
||||
在这里,我们还需要将数据插入到 `train_data` 表。参数的数量不是固定的,它只取决于传入 `linearRegressionState` 的参数数量。它们都必须是数值。
|
||||
注意,目标值(我们想学习预测的)列作为第一个参数插入。
|
||||
|
||||
**2.** 预测
|
||||
|
||||
在将状态保存到表中之后,我们可以多次使用它进行预测,甚至与其他状态合并,创建新的更好的模型。
|
||||
|
||||
``` sql
|
||||
WITH (SELECT state FROM your_model) AS model SELECT
|
||||
evalMLMethod(model, param1, param2) FROM test_data
|
||||
```
|
||||
|
||||
查询将返回一列预测值。注意,`evalMLMethod` 的第一个参数是 `AggregateFunctionState` 对象, 接下来是特征列。
|
||||
|
||||
`test_data` 是一个类似 `train_data` 的表 但可能不包含目标值。
|
||||
|
||||
### 注 {#agg_functions-stochasticlinearregression-notes}
|
||||
|
||||
1. 要合并两个模型,用户可以创建这样的查询:
|
||||
`sql SELECT state1 + state2 FROM your_models`
|
||||
其中 `your_models` 表包含这两个模型。此查询将返回新的 `AggregateFunctionState` 对象。
|
||||
|
||||
2. 如果没有使用 `-State` 组合器,用户可以为自己的目的获取所创建模型的权重,而不保存模型 。
|
||||
`sql SELECT stochasticLinearRegression(0.01)(target, param1, param2) FROM train_data`
|
||||
这样的查询将拟合模型,并返回其权重——首先是权重,对应模型的参数,最后一个是偏差。 所以在上面的例子中,查询将返回一个具有3个值的列。
|
||||
|
||||
**参见**
|
||||
|
||||
- [随机指标逻辑回归](../../../sql-reference/aggregate-functions/reference/stochasticlogisticregression.md#agg_functions-stochasticlogisticregression)
|
||||
- [线性回归和逻辑回归之间的差异](https://stackoverflow.com/questions/12146914/what-is-the-difference-between-linear-regression-and-logistic-regression)
|
@ -0,0 +1,56 @@
|
||||
---
|
||||
toc_priority: 222
|
||||
---
|
||||
|
||||
# stochasticLogisticRegression {#agg_functions-stochasticlogisticregression}
|
||||
|
||||
该函数实现随机逻辑回归。 它可以用于二进制分类问题,支持与stochasticLinearRegression相同的自定义参数,并以相同的方式工作。
|
||||
|
||||
### 参数 {#agg_functions-stochasticlogisticregression-parameters}
|
||||
|
||||
参数与stochasticLinearRegression中的参数完全相同:
|
||||
`learning rate`, `l2 regularization coefficient`, `mini-batch size`, `method for updating weights`.
|
||||
欲了解更多信息,参见 [参数] (#agg_functions-stochasticlinearregression-parameters).
|
||||
|
||||
**语法**
|
||||
|
||||
``` sql
|
||||
stochasticLogisticRegression(1.0, 1.0, 10, 'SGD')
|
||||
```
|
||||
|
||||
**1.** 拟合
|
||||
|
||||
<!-- -->
|
||||
|
||||
参考[stochasticLinearRegression](#stochasticlinearregression-usage-fitting) `拟合` 章节文档。
|
||||
|
||||
预测标签的取值范围为\[-1, 1\]
|
||||
|
||||
**2.** 预测
|
||||
|
||||
<!-- -->
|
||||
|
||||
使用已经保存的state我们可以预测标签为 `1` 的对象的概率。
|
||||
``` sql
|
||||
WITH (SELECT state FROM your_model) AS model SELECT
|
||||
evalMLMethod(model, param1, param2) FROM test_data
|
||||
```
|
||||
|
||||
查询结果返回一个列的概率。注意 `evalMLMethod` 的第一个参数是 `AggregateFunctionState` 对象,接下来的参数是列的特性。
|
||||
|
||||
我们也可以设置概率的范围, 这样需要给元素指定不同的标签。
|
||||
|
||||
``` sql
|
||||
SELECT ans < 1.1 AND ans > 0.5 FROM
|
||||
(WITH (SELECT state FROM your_model) AS model SELECT
|
||||
evalMLMethod(model, param1, param2) AS ans FROM test_data)
|
||||
```
|
||||
|
||||
结果是标签。
|
||||
|
||||
`test_data` 是一个像 `train_data` 一样的表,但是不包含目标值。
|
||||
|
||||
**参见**
|
||||
|
||||
- [随机指标线性回归](../../../sql-reference/aggregate-functions/reference/stochasticlinearregression.md#agg_functions-stochasticlinearregression)
|
||||
- [线性回归和逻辑回归之间的差异](https://stackoverflow.com/questions/12146914/what-is-the-difference-between-linear-regression-and-logistic-regression)
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user