mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-29 11:02:08 +00:00
Merge branch 'master' into no_background_pool_no_more
This commit is contained in:
commit
40fc512e79
@ -409,7 +409,7 @@
|
||||
|
||||
## ClickHouse release 20.6
|
||||
|
||||
### ClickHouse release v20.6.3.28-stable
|
||||
### ClickHouse release v20.6.3.28-stable
|
||||
|
||||
#### New Feature
|
||||
|
||||
@ -2362,7 +2362,7 @@ No changes compared to v20.4.3.16-stable.
|
||||
* `Live View` table engine refactoring. [#8519](https://github.com/ClickHouse/ClickHouse/pull/8519) ([vzakaznikov](https://github.com/vzakaznikov))
|
||||
* Add additional checks for external dictionaries created from DDL-queries. [#8127](https://github.com/ClickHouse/ClickHouse/pull/8127) ([alesapin](https://github.com/alesapin))
|
||||
* Fix error `Column ... already exists` while using `FINAL` and `SAMPLE` together, e.g. `select count() from table final sample 1/2`. Fixes [#5186](https://github.com/ClickHouse/ClickHouse/issues/5186). [#7907](https://github.com/ClickHouse/ClickHouse/pull/7907) ([Nikolai Kochetov](https://github.com/KochetovNicolai))
|
||||
* Now table the first argument of `joinGet` function can be table indentifier. [#7707](https://github.com/ClickHouse/ClickHouse/pull/7707) ([Amos Bird](https://github.com/amosbird))
|
||||
* Now table the first argument of `joinGet` function can be table identifier. [#7707](https://github.com/ClickHouse/ClickHouse/pull/7707) ([Amos Bird](https://github.com/amosbird))
|
||||
* Allow using `MaterializedView` with subqueries above `Kafka` tables. [#8197](https://github.com/ClickHouse/ClickHouse/pull/8197) ([filimonov](https://github.com/filimonov))
|
||||
* Now background moves between disks run it the seprate thread pool. [#7670](https://github.com/ClickHouse/ClickHouse/pull/7670) ([Vladimir Chebotarev](https://github.com/excitoon))
|
||||
* `SYSTEM RELOAD DICTIONARY` now executes synchronously. [#8240](https://github.com/ClickHouse/ClickHouse/pull/8240) ([Vitaly Baranov](https://github.com/vitlibar))
|
||||
|
@ -59,25 +59,6 @@ set(CMAKE_DEBUG_POSTFIX "d" CACHE STRING "Generate debug library name with a pos
|
||||
# For more info see https://cmake.org/cmake/help/latest/prop_gbl/USE_FOLDERS.html
|
||||
set_property(GLOBAL PROPERTY USE_FOLDERS ON)
|
||||
|
||||
# cmake 3.9+ needed.
|
||||
# Usually impractical.
|
||||
# See also ${ENABLE_THINLTO}
|
||||
option(ENABLE_IPO "Full link time optimization")
|
||||
|
||||
if(ENABLE_IPO)
|
||||
cmake_policy(SET CMP0069 NEW)
|
||||
include(CheckIPOSupported)
|
||||
check_ipo_supported(RESULT IPO_SUPPORTED OUTPUT IPO_NOT_SUPPORTED)
|
||||
if(IPO_SUPPORTED)
|
||||
message(STATUS "IPO/LTO is supported, enabling")
|
||||
set(CMAKE_INTERPROCEDURAL_OPTIMIZATION TRUE)
|
||||
else()
|
||||
message (${RECONFIGURE_MESSAGE_LEVEL} "IPO/LTO is not supported: <${IPO_NOT_SUPPORTED}>")
|
||||
endif()
|
||||
else()
|
||||
message(STATUS "IPO/LTO not enabled.")
|
||||
endif()
|
||||
|
||||
# Check that submodules are present only if source was downloaded with git
|
||||
if (EXISTS "${CMAKE_CURRENT_SOURCE_DIR}/.git" AND NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/boost/boost")
|
||||
message (FATAL_ERROR "Submodules are not initialized. Run\n\tgit submodule update --init --recursive")
|
||||
|
@ -17,4 +17,6 @@ ClickHouse is an open-source column-oriented database management system that all
|
||||
|
||||
## Upcoming Events
|
||||
|
||||
* [ClickHouse virtual office hours](https://www.eventbrite.com/e/clickhouse-october-virtual-meetup-office-hours-tickets-123129500651) on October 22, 2020.
|
||||
* [The Second ClickHouse Meetup East (online)](https://www.eventbrite.com/e/the-second-clickhouse-meetup-east-tickets-126787955187) on October 31, 2020.
|
||||
* [ClickHouse for Enterprise Meetup (online in Russian)](https://arenadata-events.timepad.ru/event/1465249/) on November 10, 2020.
|
||||
|
||||
|
@ -51,7 +51,7 @@ struct StringRef
|
||||
};
|
||||
|
||||
/// Here constexpr doesn't implicate inline, see https://www.viva64.com/en/w/v1043/
|
||||
/// nullptr can't be used because the StringRef values are used in SipHash's pointer arithmetics
|
||||
/// nullptr can't be used because the StringRef values are used in SipHash's pointer arithmetic
|
||||
/// and the UBSan thinks that something like nullptr + 8 is UB.
|
||||
constexpr const inline char empty_string_ref_addr{};
|
||||
constexpr const inline StringRef EMPTY_STRING_REF{&empty_string_ref_addr, 0};
|
||||
|
@ -11,11 +11,11 @@ CFLAGS (GLOBAL -DDBMS_VERSION_MAJOR=${VERSION_MAJOR})
|
||||
CFLAGS (GLOBAL -DDBMS_VERSION_MINOR=${VERSION_MINOR})
|
||||
CFLAGS (GLOBAL -DDBMS_VERSION_PATCH=${VERSION_PATCH})
|
||||
CFLAGS (GLOBAL -DVERSION_FULL=\"\\\"${VERSION_FULL}\\\"\")
|
||||
CFLAGS (GLOBAL -DVERSION_MAJOR=${VERSION_MAJOR})
|
||||
CFLAGS (GLOBAL -DVERSION_MINOR=${VERSION_MINOR})
|
||||
CFLAGS (GLOBAL -DVERSION_MAJOR=${VERSION_MAJOR})
|
||||
CFLAGS (GLOBAL -DVERSION_MINOR=${VERSION_MINOR})
|
||||
CFLAGS (GLOBAL -DVERSION_PATCH=${VERSION_PATCH})
|
||||
|
||||
# TODO: not supported yet, not sure if ya.make supports arithmetics.
|
||||
# TODO: not supported yet, not sure if ya.make supports arithmetic.
|
||||
CFLAGS (GLOBAL -DVERSION_INTEGER=0)
|
||||
|
||||
CFLAGS (GLOBAL -DVERSION_NAME=\"\\\"${VERSION_NAME}\\\"\")
|
||||
|
@ -192,7 +192,7 @@ set(SRCS
|
||||
${HDFS3_SOURCE_DIR}/common/FileWrapper.h
|
||||
)
|
||||
|
||||
# old kernels (< 3.17) doens't have SYS_getrandom. Always use POSIX implementation to have better compatibility
|
||||
# old kernels (< 3.17) doesn't have SYS_getrandom. Always use POSIX implementation to have better compatibility
|
||||
set_source_files_properties(${HDFS3_SOURCE_DIR}/rpc/RpcClient.cpp PROPERTIES COMPILE_FLAGS "-DBOOST_UUID_RANDOM_PROVIDER_FORCE_POSIX=1")
|
||||
|
||||
# target
|
||||
|
2
contrib/mariadb-connector-c
vendored
2
contrib/mariadb-connector-c
vendored
@ -1 +1 @@
|
||||
Subproject commit f5638e954a79f50bac7c7a5deaa5a241e0ce8b5f
|
||||
Subproject commit 1485b0de3eaa1508dfe49a5ba1e4aa2a71fd8335
|
@ -31,10 +31,6 @@ RUN curl -O https://clickhouse-builds.s3.yandex.net/utils/1/dpkg-deb \
|
||||
&& chmod +x dpkg-deb \
|
||||
&& cp dpkg-deb /usr/bin
|
||||
|
||||
RUN export CODENAME="$(lsb_release --codename --short | tr 'A-Z' 'a-z')" \
|
||||
&& wget -nv -O /tmp/arrow-keyring.deb "https://apache.bintray.com/arrow/ubuntu/apache-arrow-archive-keyring-latest-${CODENAME}.deb" \
|
||||
&& dpkg -i /tmp/arrow-keyring.deb
|
||||
|
||||
# Libraries from OS are only needed to test the "unbundled" build (this is not used in production).
|
||||
RUN apt-get update \
|
||||
&& apt-get install \
|
||||
|
@ -1,6 +1,10 @@
|
||||
# docker build -t yandex/clickhouse-unbundled-builder .
|
||||
FROM yandex/clickhouse-deb-builder
|
||||
|
||||
RUN export CODENAME="$(lsb_release --codename --short | tr 'A-Z' 'a-z')" \
|
||||
&& wget -nv -O /tmp/arrow-keyring.deb "https://apache.bintray.com/arrow/ubuntu/apache-arrow-archive-keyring-latest-${CODENAME}.deb" \
|
||||
&& dpkg -i /tmp/arrow-keyring.deb
|
||||
|
||||
# Libraries from OS are only needed to test the "unbundled" build (that is not used in production).
|
||||
RUN apt-get update \
|
||||
&& apt-get install \
|
||||
|
8
docker/server/.dockerignore
Normal file
8
docker/server/.dockerignore
Normal file
@ -0,0 +1,8 @@
|
||||
# post / preinstall scripts (not needed, we do it in Dockerfile)
|
||||
alpine-root/install/*
|
||||
|
||||
# docs (looks useless)
|
||||
alpine-root/usr/share/doc/*
|
||||
|
||||
# packages, etc. (used by prepare.sh)
|
||||
alpine-root/tgz-packages/*
|
1
docker/server/.gitignore
vendored
Normal file
1
docker/server/.gitignore
vendored
Normal file
@ -0,0 +1 @@
|
||||
alpine-root/*
|
26
docker/server/Dockerfile.alpine
Normal file
26
docker/server/Dockerfile.alpine
Normal file
@ -0,0 +1,26 @@
|
||||
FROM alpine
|
||||
|
||||
ENV LANG=en_US.UTF-8 \
|
||||
LANGUAGE=en_US:en \
|
||||
LC_ALL=en_US.UTF-8 \
|
||||
TZ=UTC \
|
||||
CLICKHOUSE_CONFIG=/etc/clickhouse-server/config.xml
|
||||
|
||||
COPY alpine-root/ /
|
||||
|
||||
# from https://github.com/ClickHouse/ClickHouse/blob/master/debian/clickhouse-server.postinst
|
||||
RUN addgroup clickhouse \
|
||||
&& adduser -S -H -h /nonexistent -s /bin/false -G clickhouse -g "ClickHouse server" clickhouse \
|
||||
&& chown clickhouse:clickhouse /var/lib/clickhouse \
|
||||
&& chmod 700 /var/lib/clickhouse \
|
||||
&& chown root:clickhouse /var/log/clickhouse-server \
|
||||
&& chmod 775 /var/log/clickhouse-server \
|
||||
&& chmod +x /entrypoint.sh \
|
||||
&& apk add --no-cache su-exec
|
||||
|
||||
EXPOSE 9000 8123 9009
|
||||
|
||||
VOLUME /var/lib/clickhouse \
|
||||
/var/log/clickhouse-server
|
||||
|
||||
ENTRYPOINT ["/entrypoint.sh"]
|
59
docker/server/alpine-build.sh
Executable file
59
docker/server/alpine-build.sh
Executable file
@ -0,0 +1,59 @@
|
||||
#!/bin/bash
|
||||
set -x
|
||||
|
||||
REPO_CHANNEL="${REPO_CHANNEL:-stable}" # lts / testing / prestable / etc
|
||||
REPO_URL="${REPO_URL:-"https://repo.yandex.ru/clickhouse/tgz/${REPO_CHANNEL}"}"
|
||||
VERSION="${VERSION:-20.9.3.45}"
|
||||
|
||||
# where original files live
|
||||
DOCKER_BUILD_FOLDER="${BASH_SOURCE%/*}"
|
||||
|
||||
# we will create root for our image here
|
||||
CONTAINER_ROOT_FOLDER="${DOCKER_BUILD_FOLDER}/alpine-root"
|
||||
|
||||
# where to put downloaded tgz
|
||||
TGZ_PACKAGES_FOLDER="${CONTAINER_ROOT_FOLDER}/tgz-packages"
|
||||
|
||||
# clean up the root from old runs
|
||||
rm -rf "$CONTAINER_ROOT_FOLDER"
|
||||
|
||||
mkdir -p "$TGZ_PACKAGES_FOLDER"
|
||||
|
||||
PACKAGES=( "clickhouse-client" "clickhouse-server" "clickhouse-common-static" )
|
||||
|
||||
# download tars from the repo
|
||||
for package in "${PACKAGES[@]}"
|
||||
do
|
||||
wget -q --show-progress "${REPO_URL}/${package}-${VERSION}.tgz" -O "${TGZ_PACKAGES_FOLDER}/${package}-${VERSION}.tgz"
|
||||
done
|
||||
|
||||
# unpack tars
|
||||
for package in "${PACKAGES[@]}"
|
||||
do
|
||||
tar xvzf "${TGZ_PACKAGES_FOLDER}/${package}-${VERSION}.tgz" --strip-components=2 -C "$CONTAINER_ROOT_FOLDER"
|
||||
done
|
||||
|
||||
# prepare few more folders
|
||||
mkdir -p "${CONTAINER_ROOT_FOLDER}/etc/clickhouse-server/users.d" \
|
||||
"${CONTAINER_ROOT_FOLDER}/etc/clickhouse-server/config.d" \
|
||||
"${CONTAINER_ROOT_FOLDER}/var/log/clickhouse-server" \
|
||||
"${CONTAINER_ROOT_FOLDER}/var/lib/clickhouse" \
|
||||
"${CONTAINER_ROOT_FOLDER}/docker-entrypoint-initdb.d" \
|
||||
"${CONTAINER_ROOT_FOLDER}/lib64"
|
||||
|
||||
cp "${DOCKER_BUILD_FOLDER}/docker_related_config.xml" "${CONTAINER_ROOT_FOLDER}/etc/clickhouse-server/config.d/"
|
||||
cp "${DOCKER_BUILD_FOLDER}/entrypoint.alpine.sh" "${CONTAINER_ROOT_FOLDER}/entrypoint.sh"
|
||||
|
||||
## get glibc components from ubuntu 20.04 and put them to expected place
|
||||
docker pull ubuntu:20.04
|
||||
ubuntu20image=$(docker create --rm ubuntu:20.04)
|
||||
docker cp -L ${ubuntu20image}:/lib/x86_64-linux-gnu/libc.so.6 "${CONTAINER_ROOT_FOLDER}/lib"
|
||||
docker cp -L ${ubuntu20image}:/lib/x86_64-linux-gnu/libdl.so.2 "${CONTAINER_ROOT_FOLDER}/lib"
|
||||
docker cp -L ${ubuntu20image}:/lib/x86_64-linux-gnu/libm.so.6 "${CONTAINER_ROOT_FOLDER}/lib"
|
||||
docker cp -L ${ubuntu20image}:/lib/x86_64-linux-gnu/libpthread.so.0 "${CONTAINER_ROOT_FOLDER}/lib"
|
||||
docker cp -L ${ubuntu20image}:/lib/x86_64-linux-gnu/librt.so.1 "${CONTAINER_ROOT_FOLDER}/lib"
|
||||
docker cp -L ${ubuntu20image}:/lib/x86_64-linux-gnu/libnss_dns.so.2 "${CONTAINER_ROOT_FOLDER}/lib"
|
||||
docker cp -L ${ubuntu20image}:/lib/x86_64-linux-gnu/libresolv.so.2 "${CONTAINER_ROOT_FOLDER}/lib"
|
||||
docker cp -L ${ubuntu20image}:/lib64/ld-linux-x86-64.so.2 "${CONTAINER_ROOT_FOLDER}/lib64"
|
||||
|
||||
docker build "$DOCKER_BUILD_FOLDER" -f Dockerfile.alpine -t "yandex/clickhouse-server:${VERSION}-alpine" --pull
|
152
docker/server/entrypoint.alpine.sh
Executable file
152
docker/server/entrypoint.alpine.sh
Executable file
@ -0,0 +1,152 @@
|
||||
#!/bin/sh
|
||||
#set -x
|
||||
|
||||
DO_CHOWN=1
|
||||
if [ "$CLICKHOUSE_DO_NOT_CHOWN" = 1 ]; then
|
||||
DO_CHOWN=0
|
||||
fi
|
||||
|
||||
CLICKHOUSE_UID="${CLICKHOUSE_UID:-"$(id -u clickhouse)"}"
|
||||
CLICKHOUSE_GID="${CLICKHOUSE_GID:-"$(id -g clickhouse)"}"
|
||||
|
||||
# support --user
|
||||
if [ "$(id -u)" = "0" ]; then
|
||||
USER=$CLICKHOUSE_UID
|
||||
GROUP=$CLICKHOUSE_GID
|
||||
# busybox has setuidgid & chpst buildin
|
||||
gosu="su-exec $USER:$GROUP"
|
||||
else
|
||||
USER="$(id -u)"
|
||||
GROUP="$(id -g)"
|
||||
gosu=""
|
||||
DO_CHOWN=0
|
||||
fi
|
||||
|
||||
# set some vars
|
||||
CLICKHOUSE_CONFIG="${CLICKHOUSE_CONFIG:-/etc/clickhouse-server/config.xml}"
|
||||
|
||||
# port is needed to check if clickhouse-server is ready for connections
|
||||
HTTP_PORT="$(clickhouse extract-from-config --config-file $CLICKHOUSE_CONFIG --key=http_port)"
|
||||
|
||||
# get CH directories locations
|
||||
DATA_DIR="$(clickhouse extract-from-config --config-file $CLICKHOUSE_CONFIG --key=path || true)"
|
||||
TMP_DIR="$(clickhouse extract-from-config --config-file $CLICKHOUSE_CONFIG --key=tmp_path || true)"
|
||||
USER_PATH="$(clickhouse extract-from-config --config-file $CLICKHOUSE_CONFIG --key=user_files_path || true)"
|
||||
LOG_PATH="$(clickhouse extract-from-config --config-file $CLICKHOUSE_CONFIG --key=logger.log || true)"
|
||||
LOG_DIR="$(dirname $LOG_PATH || true)"
|
||||
ERROR_LOG_PATH="$(clickhouse extract-from-config --config-file $CLICKHOUSE_CONFIG --key=logger.errorlog || true)"
|
||||
ERROR_LOG_DIR="$(dirname $ERROR_LOG_PATH || true)"
|
||||
FORMAT_SCHEMA_PATH="$(clickhouse extract-from-config --config-file $CLICKHOUSE_CONFIG --key=format_schema_path || true)"
|
||||
|
||||
CLICKHOUSE_USER="${CLICKHOUSE_USER:-default}"
|
||||
CLICKHOUSE_PASSWORD="${CLICKHOUSE_PASSWORD:-}"
|
||||
CLICKHOUSE_DB="${CLICKHOUSE_DB:-}"
|
||||
|
||||
for dir in "$DATA_DIR" \
|
||||
"$ERROR_LOG_DIR" \
|
||||
"$LOG_DIR" \
|
||||
"$TMP_DIR" \
|
||||
"$USER_PATH" \
|
||||
"$FORMAT_SCHEMA_PATH"
|
||||
do
|
||||
# check if variable not empty
|
||||
[ -z "$dir" ] && continue
|
||||
# ensure directories exist
|
||||
if ! mkdir -p "$dir"; then
|
||||
echo "Couldn't create necessary directory: $dir"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ "$DO_CHOWN" = "1" ]; then
|
||||
# ensure proper directories permissions
|
||||
chown -R "$USER:$GROUP" "$dir"
|
||||
elif [ "$(stat -c %u "$dir")" != "$USER" ]; then
|
||||
echo "Necessary directory '$dir' isn't owned by user with id '$USER'"
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
|
||||
# if clickhouse user is defined - create it (user "default" already exists out of box)
|
||||
if [ -n "$CLICKHOUSE_USER" ] && [ "$CLICKHOUSE_USER" != "default" ] || [ -n "$CLICKHOUSE_PASSWORD" ]; then
|
||||
echo "$0: create new user '$CLICKHOUSE_USER' instead 'default'"
|
||||
cat <<EOT > /etc/clickhouse-server/users.d/default-user.xml
|
||||
<yandex>
|
||||
<!-- Docs: <https://clickhouse.tech/docs/en/operations/settings/settings_users/> -->
|
||||
<users>
|
||||
<!-- Remove default user -->
|
||||
<default remove="remove">
|
||||
</default>
|
||||
|
||||
<${CLICKHOUSE_USER}>
|
||||
<profile>default</profile>
|
||||
<networks>
|
||||
<ip>::/0</ip>
|
||||
</networks>
|
||||
<password>${CLICKHOUSE_PASSWORD}</password>
|
||||
<quota>default</quota>
|
||||
</${CLICKHOUSE_USER}>
|
||||
</users>
|
||||
</yandex>
|
||||
EOT
|
||||
fi
|
||||
|
||||
if [ -n "$(ls /docker-entrypoint-initdb.d/)" ] || [ -n "$CLICKHOUSE_DB" ]; then
|
||||
# Listen only on localhost until the initialization is done
|
||||
$gosu /usr/bin/clickhouse-server --config-file=$CLICKHOUSE_CONFIG -- --listen_host=127.0.0.1 &
|
||||
pid="$!"
|
||||
|
||||
# check if clickhouse is ready to accept connections
|
||||
# will try to send ping clickhouse via http_port (max 6 retries, with 1 sec timeout and 1 sec delay between retries)
|
||||
tries=6
|
||||
while ! wget --spider -T 1 -q "http://localhost:$HTTP_PORT/ping" 2>/dev/null; do
|
||||
if [ "$tries" -le "0" ]; then
|
||||
echo >&2 'ClickHouse init process failed.'
|
||||
exit 1
|
||||
fi
|
||||
tries=$(( tries-1 ))
|
||||
sleep 1
|
||||
done
|
||||
|
||||
if [ ! -z "$CLICKHOUSE_PASSWORD" ]; then
|
||||
printf -v WITH_PASSWORD '%s %q' "--password" "$CLICKHOUSE_PASSWORD"
|
||||
fi
|
||||
|
||||
clickhouseclient="clickhouse-client --multiquery -u $CLICKHOUSE_USER $WITH_PASSWORD "
|
||||
|
||||
# create default database, if defined
|
||||
if [ -n "$CLICKHOUSE_DB" ]; then
|
||||
echo "$0: create database '$CLICKHOUSE_DB'"
|
||||
"$clickhouseclient" -q "CREATE DATABASE IF NOT EXISTS $CLICKHOUSE_DB";
|
||||
fi
|
||||
|
||||
for f in /docker-entrypoint-initdb.d/*; do
|
||||
case "$f" in
|
||||
*.sh)
|
||||
if [ -x "$f" ]; then
|
||||
echo "$0: running $f"
|
||||
"$f"
|
||||
else
|
||||
echo "$0: sourcing $f"
|
||||
. "$f"
|
||||
fi
|
||||
;;
|
||||
*.sql) echo "$0: running $f"; cat "$f" | "$clickhouseclient" ; echo ;;
|
||||
*.sql.gz) echo "$0: running $f"; gunzip -c "$f" | "$clickhouseclient"; echo ;;
|
||||
*) echo "$0: ignoring $f" ;;
|
||||
esac
|
||||
echo
|
||||
done
|
||||
|
||||
if ! kill -s TERM "$pid" || ! wait "$pid"; then
|
||||
echo >&2 'Finishing of ClickHouse init process failed.'
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
# if no args passed to `docker run` or first argument start with `--`, then the user is passing clickhouse-server arguments
|
||||
if [[ $# -lt 1 ]] || [[ "$1" == "--"* ]]; then
|
||||
exec $gosu /usr/bin/clickhouse-server --config-file=$CLICKHOUSE_CONFIG "$@"
|
||||
fi
|
||||
|
||||
# Otherwise, we assume the user want to run his own process, for example a `bash` shell to explore this image
|
||||
exec "$@"
|
@ -82,6 +82,7 @@ RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone
|
||||
|
||||
ENV COMMIT_SHA=''
|
||||
ENV PULL_REQUEST_NUMBER=''
|
||||
ENV COPY_CLICKHOUSE_BINARY_TO_OUTPUT=0
|
||||
|
||||
COPY run.sh /
|
||||
CMD ["/bin/bash", "/run.sh"]
|
||||
|
@ -172,6 +172,9 @@ function build
|
||||
(
|
||||
cd "$FASTTEST_BUILD"
|
||||
time ninja clickhouse-bundle | ts '%Y-%m-%d %H:%M:%S' | tee "$FASTTEST_OUTPUT/build_log.txt"
|
||||
if [ "$COPY_CLICKHOUSE_BINARY_TO_OUTPUT" -eq "1" ]; then
|
||||
cp programs/clickhouse "$FASTTEST_OUTPUT/clickhouse"
|
||||
fi
|
||||
ccache --show-stats ||:
|
||||
)
|
||||
}
|
||||
@ -268,7 +271,7 @@ TESTS_TO_SKIP=(
|
||||
00974_query_profiler
|
||||
|
||||
# Look at DistributedFilesToInsert, so cannot run in parallel.
|
||||
01457_DistributedFilesToInsert
|
||||
01460_DistributedFilesToInsert
|
||||
|
||||
01541_max_memory_usage_for_user
|
||||
|
||||
|
@ -63,7 +63,7 @@ function configure
|
||||
# Make copies of the original db for both servers. Use hardlinks instead
|
||||
# of copying to save space. Before that, remove preprocessed configs and
|
||||
# system tables, because sharing them between servers with hardlinks may
|
||||
# lead to weird effects.
|
||||
# lead to weird effects.
|
||||
rm -r left/db ||:
|
||||
rm -r right/db ||:
|
||||
rm -r db0/preprocessed_configs ||:
|
||||
@ -77,15 +77,12 @@ function restart
|
||||
while killall clickhouse-server; do echo . ; sleep 1 ; done
|
||||
echo all killed
|
||||
|
||||
# Disable percpu arenas because they segfault when the process is bound to
|
||||
# a particular NUMA node: https://github.com/jemalloc/jemalloc/pull/1939
|
||||
#
|
||||
# About the jemalloc settings:
|
||||
# Change the jemalloc settings here.
|
||||
# https://github.com/jemalloc/jemalloc/wiki/Getting-Started
|
||||
export MALLOC_CONF="percpu_arena:disabled,confirm_conf:true"
|
||||
export MALLOC_CONF="confirm_conf:true"
|
||||
|
||||
set -m # Spawn servers in their own process groups
|
||||
|
||||
|
||||
left/clickhouse-server --config-file=left/config/config.xml \
|
||||
-- --path left/db --user_files_path left/db/user_files \
|
||||
&>> left-server-log.log &
|
||||
@ -211,7 +208,7 @@ function run_tests
|
||||
echo test "$test_name"
|
||||
|
||||
# Don't profile if we're past the time limit.
|
||||
# Use awk because bash doesn't support floating point arithmetics.
|
||||
# Use awk because bash doesn't support floating point arithmetic.
|
||||
profile_seconds=$(awk "BEGIN { print ($profile_seconds_left > 0 ? 10 : 0) }")
|
||||
|
||||
TIMEFORMAT=$(printf "$test_name\t%%3R\t%%3U\t%%3S\n")
|
||||
@ -544,10 +541,10 @@ create table queries engine File(TSVWithNamesAndTypes, 'report/queries.tsv')
|
||||
as select
|
||||
abs(diff) > report_threshold and abs(diff) > stat_threshold as changed_fail,
|
||||
abs(diff) > report_threshold - 0.05 and abs(diff) > stat_threshold as changed_show,
|
||||
|
||||
|
||||
not changed_fail and stat_threshold > report_threshold + 0.10 as unstable_fail,
|
||||
not changed_show and stat_threshold > report_threshold - 0.05 as unstable_show,
|
||||
|
||||
|
||||
left, right, diff, stat_threshold,
|
||||
if(report_threshold > 0, report_threshold, 0.10) as report_threshold,
|
||||
query_metric_stats.test test, query_metric_stats.query_index query_index,
|
||||
@ -770,7 +767,7 @@ create table all_tests_report engine File(TSV, 'report/all-queries.tsv') as
|
||||
-- The threshold for 2) is significantly larger than the threshold for 1), to
|
||||
-- avoid jitter.
|
||||
create view shortness
|
||||
as select
|
||||
as select
|
||||
(test, query_index) in
|
||||
(select * from file('analyze/marked-short-queries.tsv', TSV,
|
||||
'test text, query_index int'))
|
||||
|
@ -17,14 +17,24 @@ service clickhouse-server start && sleep 5
|
||||
if grep -q -- "--use-skip-list" /usr/bin/clickhouse-test; then
|
||||
SKIP_LIST_OPT="--use-skip-list"
|
||||
fi
|
||||
# We can have several additional options so we path them as array because it's
|
||||
# more idiologically correct.
|
||||
read -ra ADDITIONAL_OPTIONS <<< "${ADDITIONAL_OPTIONS:-}"
|
||||
|
||||
function run_tests()
|
||||
{
|
||||
# We can have several additional options so we path them as array because it's
|
||||
# more idiologically correct.
|
||||
read -ra ADDITIONAL_OPTIONS <<< "${ADDITIONAL_OPTIONS:-}"
|
||||
|
||||
# Skip these tests, because they fail when we rerun them multiple times
|
||||
if [ "$NUM_TRIES" -gt "1" ]; then
|
||||
ADDITIONAL_OPTIONS+=('--skip')
|
||||
ADDITIONAL_OPTIONS+=('00000_no_tests_to_skip')
|
||||
fi
|
||||
|
||||
for i in $(seq 1 $NUM_TRIES); do
|
||||
clickhouse-test --testname --shard --zookeeper --hung-check --print-time "$SKIP_LIST_OPT" "${ADDITIONAL_OPTIONS[@]}" "$SKIP_TESTS_OPTION" 2>&1 | ts '%Y-%m-%d %H:%M:%S' | tee -a test_output/test_result.txt
|
||||
clickhouse-test --testname --shard --zookeeper --hung-check --print-time "$SKIP_LIST_OPT" "${ADDITIONAL_OPTIONS[@]}" 2>&1 | ts '%Y-%m-%d %H:%M:%S' | tee -a test_output/test_result.txt
|
||||
if [ ${PIPESTATUS[0]} -ne "0" ]; then
|
||||
break;
|
||||
fi
|
||||
done
|
||||
}
|
||||
|
||||
|
@ -35,7 +35,7 @@ RUN apt-get update \
|
||||
ENV TZ=Europe/Moscow
|
||||
RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone
|
||||
|
||||
RUN pip3 install urllib3 testflows==1.6.57 docker-compose docker dicttoxml kazoo tzlocal
|
||||
RUN pip3 install urllib3 testflows==1.6.59 docker-compose docker dicttoxml kazoo tzlocal
|
||||
|
||||
ENV DOCKER_CHANNEL stable
|
||||
ENV DOCKER_VERSION 17.09.1-ce
|
||||
|
@ -18,4 +18,14 @@ toc_title: Cloud
|
||||
- Encryption and isolation
|
||||
- Automated maintenance
|
||||
|
||||
## Altinity.Cloud {#altinity.cloud}
|
||||
|
||||
[Altinity.Cloud](https://altinity.com/cloud-database/) is a fully managed ClickHouse-as-a-Service for the Amazon public cloud.
|
||||
- Fast deployment of ClickHouse clusters on Amazon resources
|
||||
- Easy scale-out/scale-in as well as vertical scaling of nodes
|
||||
- Isolated per-tenant VPCs with public endpoint or VPC peering
|
||||
- Configurable storage types and volume configurations
|
||||
- Cross-AZ scaling for performance and high availability
|
||||
- Built-in monitoring and SQL query editor
|
||||
|
||||
{## [Original article](https://clickhouse.tech/docs/en/commercial/cloud/) ##}
|
||||
|
@ -51,7 +51,7 @@ Optional parameters:
|
||||
- `rabbitmq_row_delimiter` – Delimiter character, which ends the message.
|
||||
- `rabbitmq_schema` – Parameter that must be used if the format requires a schema definition. For example, [Cap’n Proto](https://capnproto.org/) requires the path to the schema file and the name of the root `schema.capnp:Message` object.
|
||||
- `rabbitmq_num_consumers` – The number of consumers per table. Default: `1`. Specify more consumers if the throughput of one consumer is insufficient.
|
||||
- `rabbitmq_num_queues` – The number of queues per consumer. Default: `1`. Specify more queues if the capacity of one queue per consumer is insufficient.
|
||||
- `rabbitmq_num_queues` – Total number of queues. Default: `1`. Increasing this number can significantly improve performance.
|
||||
- `rabbitmq_queue_base` - Specify a hint for queue names. Use cases of this setting are described below.
|
||||
- `rabbitmq_deadletter_exchange` - Specify name for a [dead letter exchange](https://www.rabbitmq.com/dlx.html). You can create another table with this exchange name and collect messages in cases when they are republished to dead letter exchange. By default dead letter exchange is not specified.
|
||||
- `rabbitmq_persistent` - If set to 1 (true), in insert query delivery mode will be set to 2 (marks messages as 'persistent'). Default: `0`.
|
||||
@ -148,4 +148,5 @@ Example:
|
||||
- `_channel_id` - ChannelID, on which consumer, who received the message, was declared.
|
||||
- `_delivery_tag` - DeliveryTag of the received message. Scoped per channel.
|
||||
- `_redelivered` - `redelivered` flag of the message.
|
||||
- `_message_id` - MessageID of the received message; non-empty if was set, when message was published.
|
||||
- `_message_id` - messageID of the received message; non-empty if was set, when message was published.
|
||||
- `_timestamp` - timestamp of the received message; non-empty if was set, when message was published.
|
||||
|
@ -30,4 +30,4 @@ Instead of inserting data manually, you might consider to use one of [client lib
|
||||
- `input_format_import_nested_json` allows to insert nested JSON objects into columns of [Nested](../../sql-reference/data-types/nested-data-structures/nested.md) type.
|
||||
|
||||
!!! note "Note"
|
||||
Settings are specified as `GET` parameters for the HTTP interface or as additional command-line arguments prefixed with `--` for the CLI interface.
|
||||
Settings are specified as `GET` parameters for the HTTP interface or as additional command-line arguments prefixed with `--` for the `CLI` interface.
|
@ -1,5 +1,5 @@
|
||||
---
|
||||
toc_priority: 17
|
||||
toc_priority: 19
|
||||
toc_title: AMPLab Big Data Benchmark
|
||||
---
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
---
|
||||
toc_priority: 19
|
||||
toc_priority: 18
|
||||
toc_title: Terabyte Click Logs from Criteo
|
||||
---
|
||||
|
||||
|
@ -1,6 +1,6 @@
|
||||
---
|
||||
toc_folder_title: Example Datasets
|
||||
toc_priority: 15
|
||||
toc_priority: 14
|
||||
toc_title: Introduction
|
||||
---
|
||||
|
||||
@ -18,4 +18,4 @@ The list of documented datasets:
|
||||
- [New York Taxi Data](../../getting-started/example-datasets/nyc-taxi.md)
|
||||
- [OnTime](../../getting-started/example-datasets/ontime.md)
|
||||
|
||||
[Original article](https://clickhouse.tech/docs/en/getting_started/example_datasets) <!--hide-->
|
||||
[Original article](https://clickhouse.tech/docs/en/getting_started/example_datasets) <!--hide-->
|
@ -1,5 +1,5 @@
|
||||
---
|
||||
toc_priority: 14
|
||||
toc_priority: 15
|
||||
toc_title: Yandex.Metrica Data
|
||||
---
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
---
|
||||
toc_priority: 16
|
||||
toc_priority: 20
|
||||
toc_title: New York Taxi Data
|
||||
---
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
---
|
||||
toc_priority: 15
|
||||
toc_priority: 21
|
||||
toc_title: OnTime
|
||||
---
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
---
|
||||
toc_priority: 20
|
||||
toc_priority: 16
|
||||
toc_title: Star Schema Benchmark
|
||||
---
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
---
|
||||
toc_priority: 18
|
||||
toc_priority: 17
|
||||
toc_title: WikiStat
|
||||
---
|
||||
|
||||
|
@ -460,7 +460,7 @@ See also the [JSONEachRow](#jsoneachrow) format.
|
||||
|
||||
## JSONString {#jsonstring}
|
||||
|
||||
Differs from JSON only in that data fields are output in strings, not in typed json values.
|
||||
Differs from JSON only in that data fields are output in strings, not in typed JSON values.
|
||||
|
||||
Example:
|
||||
|
||||
@ -596,7 +596,7 @@ When inserting the data, you should provide a separate JSON value for each row.
|
||||
## JSONEachRowWithProgress {#jsoneachrowwithprogress}
|
||||
## JSONStringEachRowWithProgress {#jsonstringeachrowwithprogress}
|
||||
|
||||
Differs from JSONEachRow/JSONStringEachRow in that ClickHouse will also yield progress information as JSON objects.
|
||||
Differs from `JSONEachRow`/`JSONStringEachRow` in that ClickHouse will also yield progress information as JSON values.
|
||||
|
||||
```json
|
||||
{"row":{"'hello'":"hello","multiply(42, number)":"0","range(5)":[0,1,2,3,4]}}
|
||||
@ -608,7 +608,7 @@ Differs from JSONEachRow/JSONStringEachRow in that ClickHouse will also yield pr
|
||||
## JSONCompactEachRowWithNamesAndTypes {#jsoncompacteachrowwithnamesandtypes}
|
||||
## JSONCompactStringEachRowWithNamesAndTypes {#jsoncompactstringeachrowwithnamesandtypes}
|
||||
|
||||
Differs from JSONCompactEachRow/JSONCompactStringEachRow in that the column names and types are written as the first two rows.
|
||||
Differs from `JSONCompactEachRow`/`JSONCompactStringEachRow` in that the column names and types are written as the first two rows.
|
||||
|
||||
```json
|
||||
["'hello'", "multiply(42, number)", "range(5)"]
|
||||
|
@ -6,7 +6,7 @@ toc_title: Client Libraries
|
||||
# Client Libraries from Third-party Developers {#client-libraries-from-third-party-developers}
|
||||
|
||||
!!! warning "Disclaimer"
|
||||
Yandex does **not** maintain the libraries listed below and haven’t done any extensive testing to ensure their quality.
|
||||
Yandex does **not** maintain the libraries listed below and hasn’t done any extensive testing to ensure their quality.
|
||||
|
||||
- Python
|
||||
- [infi.clickhouse_orm](https://github.com/Infinidat/infi.clickhouse_orm)
|
||||
|
69
docs/en/operations/opentelemetry.md
Normal file
69
docs/en/operations/opentelemetry.md
Normal file
@ -0,0 +1,69 @@
|
||||
---
|
||||
toc_priority: 62
|
||||
toc_title: OpenTelemetry Support
|
||||
---
|
||||
|
||||
# [experimental] OpenTelemetry Support
|
||||
|
||||
[OpenTelemetry](https://opentelemetry.io/) is an open standard for collecting
|
||||
traces and metrics from distributed application. ClickHouse has some support
|
||||
for OpenTelemetry.
|
||||
|
||||
!!! warning "Warning"
|
||||
This is an experimental feature that will change in backwards-incompatible ways in the future releases.
|
||||
|
||||
|
||||
## Supplying Trace Context to ClickHouse
|
||||
|
||||
ClickHouse accepts trace context HTTP headers, as described by
|
||||
the [W3C recommendation](https://www.w3.org/TR/trace-context/).
|
||||
It also accepts trace context over native protocol that is used for
|
||||
communication between ClickHouse servers or between the client and server.
|
||||
For manual testing, trace context headers conforming to the Trace Context
|
||||
recommendation can be supplied to `clickhouse-client` using
|
||||
`--opentelemetry-traceparent` and `--opentelemetry-tracestate` flags.
|
||||
|
||||
If no parent trace context is supplied, ClickHouse can start a new trace, with
|
||||
probability controlled by the `opentelemetry_start_trace_probability` setting.
|
||||
|
||||
|
||||
## Propagating the Trace Context
|
||||
|
||||
The trace context is propagated to downstream services in the following cases:
|
||||
|
||||
* Queries to remote ClickHouse servers, such as when using `Distributed` table
|
||||
engine.
|
||||
|
||||
* `URL` table function. Trace context information is sent in HTTP headers.
|
||||
|
||||
|
||||
## Tracing the ClickHouse Itself
|
||||
|
||||
ClickHouse creates _trace spans_ for each query and some of the query execution
|
||||
stages, such as query planning or distributed queries.
|
||||
|
||||
To be useful, the tracing information has to be exported to a monitoring system
|
||||
that supports OpenTelemetry, such as Jaeger or Prometheus. ClickHouse avoids
|
||||
a dependency on a particular monitoring system, instead only
|
||||
providing the tracing data conforming to the standard. A natural way to do so
|
||||
in an SQL RDBMS is a system table. OpenTelemetry trace span information
|
||||
[required by the standard](https://github.com/open-telemetry/opentelemetry-specification/blob/master/specification/overview.md#span)
|
||||
is stored in the system table called `system.opentelemetry_span_log`.
|
||||
|
||||
The table must be enabled in the server configuration, see the `opentelemetry_span_log`
|
||||
element in the default config file `config.xml`. It is enabled by default.
|
||||
|
||||
The table has the following columns:
|
||||
|
||||
- `trace_id`
|
||||
- `span_id`
|
||||
- `parent_span_id`
|
||||
- `operation_name`
|
||||
- `start_time`
|
||||
- `finish_time`
|
||||
- `finish_date`
|
||||
- `attribute.name`
|
||||
- `attribute.values`
|
||||
|
||||
The tags or attributes are saved as two parallel arrays, containing the keys
|
||||
and values. Use `ARRAY JOIN` to work with them.
|
@ -20,8 +20,8 @@ The `system.query_log` table registers two kinds of queries:
|
||||
|
||||
Each query creates one or two rows in the `query_log` table, depending on the status (see the `type` column) of the query:
|
||||
|
||||
1. If the query execution was successful, two rows with the `QueryStart` and `QueryFinish` types are created .
|
||||
2. If an error occurred during query processing, two events with the `QueryStart` and `ExceptionWhileProcessing` types are created .
|
||||
1. If the query execution was successful, two rows with the `QueryStart` and `QueryFinish` types are created.
|
||||
2. If an error occurred during query processing, two events with the `QueryStart` and `ExceptionWhileProcessing` types are created.
|
||||
3. If an error occurred before launching the query, a single event with the `ExceptionBeforeStart` type is created.
|
||||
|
||||
Columns:
|
||||
@ -37,8 +37,8 @@ Columns:
|
||||
- `query_start_time` ([DateTime](../../sql-reference/data-types/datetime.md)) — Start time of query execution.
|
||||
- `query_start_time_microseconds` ([DateTime64](../../sql-reference/data-types/datetime64.md)) — Start time of query execution with microsecond precision.
|
||||
- `query_duration_ms` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Duration of query execution in milliseconds.
|
||||
- `read_rows` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Total number or rows read from all tables and table functions participated in query. It includes usual subqueries, subqueries for `IN` and `JOIN`. For distributed queries `read_rows` includes the total number of rows read at all replicas. Each replica sends it’s `read_rows` value, and the server-initiator of the query summarize all received and local values. The cache volumes doesn’t affect this value.
|
||||
- `read_bytes` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Total number or bytes read from all tables and table functions participated in query. It includes usual subqueries, subqueries for `IN` and `JOIN`. For distributed queries `read_bytes` includes the total number of rows read at all replicas. Each replica sends it’s `read_bytes` value, and the server-initiator of the query summarize all received and local values. The cache volumes doesn’t affect this value.
|
||||
- `read_rows` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Total number of rows read from all tables and table functions participated in query. It includes usual subqueries, subqueries for `IN` and `JOIN`. For distributed queries `read_rows` includes the total number of rows read at all replicas. Each replica sends it’s `read_rows` value, and the server-initiator of the query summarizes all received and local values. The cache volumes don’t affect this value.
|
||||
- `read_bytes` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Total number of bytes read from all tables and table functions participated in query. It includes usual subqueries, subqueries for `IN` and `JOIN`. For distributed queries `read_bytes` includes the total number of rows read at all replicas. Each replica sends it’s `read_bytes` value, and the server-initiator of the query summarizes all received and local values. The cache volumes don’t affect this value.
|
||||
- `written_rows` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — For `INSERT` queries, the number of written rows. For other queries, the column value is 0.
|
||||
- `written_bytes` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — For `INSERT` queries, the number of written bytes. For other queries, the column value is 0.
|
||||
- `result_rows` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Number of rows in a result of the `SELECT` query, or a number of rows in the `INSERT` query.
|
||||
|
@ -1,6 +1,6 @@
|
||||
# system.query_thread_log {#system_tables-query_thread_log}
|
||||
|
||||
Contains information about threads which execute queries, for example, thread name, thread start time, duration of query processing.
|
||||
Contains information about threads that execute queries, for example, thread name, thread start time, duration of query processing.
|
||||
|
||||
To start logging:
|
||||
|
||||
|
@ -1,6 +1,6 @@
|
||||
# system.text_log {#system_tables-text_log}
|
||||
|
||||
Contains logging entries. Logging level which goes to this table can be limited with `text_log.level` server setting.
|
||||
Contains logging entries. The logging level which goes to this table can be limited to the `text_log.level` server setting.
|
||||
|
||||
Columns:
|
||||
|
||||
|
@ -18,7 +18,7 @@ Columns:
|
||||
|
||||
- `revision` ([UInt32](../../sql-reference/data-types/int-uint.md)) — ClickHouse server build revision.
|
||||
|
||||
When connecting to server by `clickhouse-client`, you see the string similar to `Connected to ClickHouse server version 19.18.1 revision 54429.`. This field contains the `revision`, but not the `version` of a server.
|
||||
When connecting to the server by `clickhouse-client`, you see the string similar to `Connected to ClickHouse server version 19.18.1 revision 54429.`. This field contains the `revision`, but not the `version` of a server.
|
||||
|
||||
- `timer_type` ([Enum8](../../sql-reference/data-types/enum.md)) — Timer type:
|
||||
|
||||
|
@ -80,4 +80,4 @@ Code: 43. DB::Exception: Received from localhost:9000. DB::Exception: Wrong argu
|
||||
## See Also {#see-also}
|
||||
|
||||
- [INTERVAL](../../../sql-reference/operators/index.md#operator-interval) operator
|
||||
- [toInterval](../../../sql-reference/functions/type-conversion-functions.md#function-tointerval) type convertion functions
|
||||
- [toInterval](../../../sql-reference/functions/type-conversion-functions.md#function-tointerval) type conversion functions
|
||||
|
@ -6,7 +6,7 @@ toc_title: Encoding
|
||||
# Encoding Functions {#encoding-functions}
|
||||
|
||||
## char {#char}
|
||||
|
||||
|
||||
Returns the string with the length as the number of passed arguments and each byte has the value of corresponding argument. Accepts multiple arguments of numeric types. If the value of argument is out of range of UInt8 data type, it is converted to UInt8 with possible rounding and overflow.
|
||||
|
||||
**Syntax**
|
||||
|
@ -551,7 +551,7 @@ formatReadableTimeDelta(column[, maximum_unit])
|
||||
**Parameters**
|
||||
|
||||
- `column` — A column with numeric time delta.
|
||||
- `maximum_unit` — Optional. Maximum unit to show. Acceptable values seconds, minutes, hours, days, months, years.
|
||||
- `maximum_unit` — Optional. Maximum unit to show. Acceptable values seconds, minutes, hours, days, months, years.
|
||||
|
||||
Example:
|
||||
|
||||
@ -626,7 +626,12 @@ neighbor(column, offset[, default_value])
|
||||
```
|
||||
|
||||
The result of the function depends on the affected data blocks and the order of data in the block.
|
||||
If you make a subquery with ORDER BY and call the function from outside the subquery, you can get the expected result.
|
||||
|
||||
!!! warning "Warning"
|
||||
It can reach the neighbor rows only inside the currently processed data block.
|
||||
|
||||
The rows order used during the calculation of `neighbor` can differ from the order of rows returned to the user.
|
||||
To prevent that you can make a subquery with ORDER BY and call the function from outside the subquery.
|
||||
|
||||
**Parameters**
|
||||
|
||||
@ -731,8 +736,13 @@ Result:
|
||||
Calculates the difference between successive row values in the data block.
|
||||
Returns 0 for the first row and the difference from the previous row for each subsequent row.
|
||||
|
||||
!!! warning "Warning"
|
||||
It can reach the previos row only inside the currently processed data block.
|
||||
|
||||
The result of the function depends on the affected data blocks and the order of data in the block.
|
||||
If you make a subquery with ORDER BY and call the function from outside the subquery, you can get the expected result.
|
||||
|
||||
The rows order used during the calculation of `runningDifference` can differ from the order of rows returned to the user.
|
||||
To prevent that you can make a subquery with ORDER BY and call the function from outside the subquery.
|
||||
|
||||
Example:
|
||||
|
||||
@ -1584,7 +1594,7 @@ isDecimalOverflow(d, [p])
|
||||
**Parameters**
|
||||
|
||||
- `d` — value. [Decimal](../../sql-reference/data-types/decimal.md).
|
||||
- `p` — precision. Optional. If omitted, the initial presicion of the first argument is used. Using of this paratemer could be helpful for data extraction to another DBMS or file. [UInt8](../../sql-reference/data-types/int-uint.md#uint-ranges).
|
||||
- `p` — precision. Optional. If omitted, the initial precision of the first argument is used. Using of this paratemer could be helpful for data extraction to another DBMS or file. [UInt8](../../sql-reference/data-types/int-uint.md#uint-ranges).
|
||||
|
||||
**Returned values**
|
||||
|
||||
|
@ -61,6 +61,54 @@ SELECT toUUID('61f0c404-5cb3-11e7-907b-a6006ad3dba0') AS uuid
|
||||
└──────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## toUUIDOrNull (x) {#touuidornull-x}
|
||||
|
||||
It takes an argument of type String and tries to parse it into UUID. If failed, returns NULL.
|
||||
|
||||
``` sql
|
||||
toUUIDOrNull(String)
|
||||
```
|
||||
|
||||
**Returned value**
|
||||
|
||||
The Nullable(UUID) type value.
|
||||
|
||||
**Usage example**
|
||||
|
||||
``` sql
|
||||
SELECT toUUIDOrNull('61f0c404-5cb3-11e7-907b-a6006ad3dba0T') AS uuid
|
||||
```
|
||||
|
||||
``` text
|
||||
┌─uuid─┐
|
||||
│ ᴺᵁᴸᴸ │
|
||||
└──────┘
|
||||
```
|
||||
|
||||
## toUUIDOrZero (x) {#touuidorzero-x}
|
||||
|
||||
It takes an argument of type String and tries to parse it into UUID. If failed, returns zero UUID.
|
||||
|
||||
``` sql
|
||||
toUUIDOrZero(String)
|
||||
```
|
||||
|
||||
**Returned value**
|
||||
|
||||
The UUID type value.
|
||||
|
||||
**Usage example**
|
||||
|
||||
``` sql
|
||||
SELECT toUUIDOrZero('61f0c404-5cb3-11e7-907b-a6006ad3dba0T') AS uuid
|
||||
```
|
||||
|
||||
``` text
|
||||
┌─────────────────────────────────uuid─┐
|
||||
│ 00000000-0000-0000-0000-000000000000 │
|
||||
└──────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## UUIDStringToNum {#uuidstringtonum}
|
||||
|
||||
Accepts a string containing 36 characters in the format `xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx`, and returns it as a set of bytes in a [FixedString(16)](../../sql-reference/data-types/fixedstring.md).
|
||||
|
@ -1,5 +1,5 @@
|
||||
---
|
||||
toc_priority: 37
|
||||
toc_priority: 38
|
||||
toc_title: Operators
|
||||
---
|
||||
|
||||
@ -169,7 +169,7 @@ SELECT now() AS current_date_time, current_date_time + INTERVAL 4 DAY + INTERVAL
|
||||
**See Also**
|
||||
|
||||
- [Interval](../../sql-reference/data-types/special-data-types/interval.md) data type
|
||||
- [toInterval](../../sql-reference/functions/type-conversion-functions.md#function-tointerval) type convertion functions
|
||||
- [toInterval](../../sql-reference/functions/type-conversion-functions.md#function-tointerval) type conversion functions
|
||||
|
||||
## Logical Negation Operator {#logical-negation-operator}
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
---
|
||||
toc_priority: 36
|
||||
toc_priority: 35
|
||||
toc_title: ALTER
|
||||
---
|
||||
|
||||
|
@ -5,16 +5,16 @@ toc_title: SAMPLE BY
|
||||
|
||||
# Manipulating Sampling-Key Expressions {#manipulations-with-sampling-key-expressions}
|
||||
|
||||
Syntax:
|
||||
|
||||
``` sql
|
||||
ALTER TABLE [db].name [ON CLUSTER cluster] MODIFY SAMPLE BY new_expression
|
||||
```
|
||||
|
||||
The command changes the [sampling key](../../../engines/table-engines/mergetree-family/mergetree.md) of the table to `new_expression` (an expression or a tuple of expressions).
|
||||
|
||||
The command is lightweight in a sense that it only changes metadata. The primary key must contain the new sample key.
|
||||
The command is lightweight in the sense that it only changes metadata. The primary key must contain the new sample key.
|
||||
|
||||
!!! note "Note"
|
||||
It only works for tables in the [`MergeTree`](../../../engines/table-engines/mergetree-family/mergetree.md) family (including
|
||||
[replicated](../../../engines/table-engines/mergetree-family/replication.md) tables).
|
||||
|
||||
|
||||
It only works for tables in the [MergeTree](../../../engines/table-engines/mergetree-family/mergetree.md) family (including
|
||||
[replicated](../../../engines/table-engines/mergetree-family/replication.md) tables).
|
@ -1,5 +1,5 @@
|
||||
---
|
||||
toc_priority: 42
|
||||
toc_priority: 40
|
||||
toc_title: ATTACH
|
||||
---
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
---
|
||||
toc_priority: 43
|
||||
toc_priority: 41
|
||||
toc_title: CHECK
|
||||
---
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
---
|
||||
toc_priority: 1
|
||||
toc_priority: 35
|
||||
toc_title: DATABASE
|
||||
---
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
---
|
||||
toc_priority: 4
|
||||
toc_priority: 38
|
||||
toc_title: DICTIONARY
|
||||
---
|
||||
|
||||
|
@ -1,6 +1,6 @@
|
||||
---
|
||||
toc_folder_title: CREATE
|
||||
toc_priority: 35
|
||||
toc_priority: 34
|
||||
toc_title: Overview
|
||||
---
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
---
|
||||
toc_priority: 8
|
||||
toc_priority: 42
|
||||
toc_title: QUOTA
|
||||
---
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
---
|
||||
toc_priority: 6
|
||||
toc_priority: 40
|
||||
toc_title: ROLE
|
||||
---
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
---
|
||||
toc_priority: 7
|
||||
toc_priority: 41
|
||||
toc_title: ROW POLICY
|
||||
---
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
---
|
||||
toc_priority: 9
|
||||
toc_priority: 43
|
||||
toc_title: SETTINGS PROFILE
|
||||
---
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
---
|
||||
toc_priority: 2
|
||||
toc_priority: 36
|
||||
toc_title: TABLE
|
||||
---
|
||||
|
||||
@ -121,7 +121,7 @@ Defines storage time for values. Can be specified only for MergeTree-family tabl
|
||||
|
||||
## Column Compression Codecs {#codecs}
|
||||
|
||||
By default, ClickHouse applies the `lz4` compression method. For `MergeTree`-engine family you can change the default compression method in the [compression](../../../operations/server-configuration-parameters/settings.md#server-settings-compression) section of a server configuration.
|
||||
By default, ClickHouse applies the `lz4` compression method. For `MergeTree`-engine family you can change the default compression method in the [compression](../../../operations/server-configuration-parameters/settings.md#server-settings-compression) section of a server configuration.
|
||||
|
||||
You can also define the compression method for each individual column in the `CREATE TABLE` query.
|
||||
|
||||
@ -138,7 +138,7 @@ ENGINE = <Engine>
|
||||
...
|
||||
```
|
||||
|
||||
The `Default` codec can be specified to reference default compression which may dependend on different settings (and properties of data) in runtime.
|
||||
The `Default` codec can be specified to reference default compression which may depend on different settings (and properties of data) in runtime.
|
||||
Example: `value UInt64 CODEC(Default)` — the same as lack of codec specification.
|
||||
|
||||
Also you can remove current CODEC from the column and use default compression from config.xml:
|
||||
@ -149,7 +149,7 @@ ALTER TABLE codec_example MODIFY COLUMN float_value CODEC(Default);
|
||||
|
||||
Codecs can be combined in a pipeline, for example, `CODEC(Delta, Default)`.
|
||||
|
||||
To select the best codec combination for you project, pass benchmarks similar to described in the Altinity [New Encodings to Improve ClickHouse Efficiency](https://www.altinity.com/blog/2019/7/new-encodings-to-improve-clickhouse) article. One thing to note is that codec can't be applied for ALIAS column type.
|
||||
To select the best codec combination for you project, pass benchmarks similar to described in the Altinity [New Encodings to Improve ClickHouse Efficiency](https://www.altinity.com/blog/2019/7/new-encodings-to-improve-clickhouse) article. One thing to note is that codec can't be applied for ALIAS column type.
|
||||
|
||||
!!! warning "Warning"
|
||||
You can’t decompress ClickHouse database files with external utilities like `lz4`. Instead, use the special [clickhouse-compressor](https://github.com/ClickHouse/ClickHouse/tree/master/programs/compressor) utility.
|
||||
|
@ -1,5 +1,5 @@
|
||||
---
|
||||
toc_priority: 5
|
||||
toc_priority: 39
|
||||
toc_title: USER
|
||||
---
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
---
|
||||
toc_priority: 3
|
||||
toc_priority: 37
|
||||
toc_title: VIEW
|
||||
---
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
---
|
||||
toc_priority: 44
|
||||
toc_priority: 42
|
||||
toc_title: DESCRIBE
|
||||
---
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
---
|
||||
toc_priority: 45
|
||||
toc_priority: 43
|
||||
toc_title: DETACH
|
||||
---
|
||||
|
||||
|
@ -1,88 +1,100 @@
|
||||
---
|
||||
toc_priority: 46
|
||||
toc_priority: 44
|
||||
toc_title: DROP
|
||||
---
|
||||
|
||||
# DROP Statements {#drop}
|
||||
|
||||
Deletes existing entity. If `IF EXISTS` clause is specified, these queries doesn’t return an error if the entity doesn’t exist.
|
||||
Deletes existing entity. If the `IF EXISTS` clause is specified, these queries don’t return an error if the entity doesn’t exist.
|
||||
|
||||
## DROP DATABASE {#drop-database}
|
||||
|
||||
Deletes all tables inside the `db` database, then deletes the `db` database itself.
|
||||
|
||||
Syntax:
|
||||
|
||||
``` sql
|
||||
DROP DATABASE [IF EXISTS] db [ON CLUSTER cluster]
|
||||
```
|
||||
|
||||
Deletes all tables inside the `db` database, then deletes the ‘db’ database itself.
|
||||
|
||||
## DROP TABLE {#drop-table}
|
||||
|
||||
Deletes the table.
|
||||
|
||||
Syntax:
|
||||
|
||||
``` sql
|
||||
DROP [TEMPORARY] TABLE [IF EXISTS] [db.]name [ON CLUSTER cluster]
|
||||
```
|
||||
|
||||
Deletes the table.
|
||||
|
||||
## DROP DICTIONARY {#drop-dictionary}
|
||||
|
||||
Deletes the dictionary.
|
||||
|
||||
Syntax:
|
||||
|
||||
``` sql
|
||||
DROP DICTIONARY [IF EXISTS] [db.]name
|
||||
```
|
||||
|
||||
Deletes the dictionary.
|
||||
|
||||
## DROP USER {#drop-user-statement}
|
||||
|
||||
Deletes a user.
|
||||
|
||||
Syntax:
|
||||
|
||||
``` sql
|
||||
DROP USER [IF EXISTS] name [,...] [ON CLUSTER cluster_name]
|
||||
```
|
||||
|
||||
Deletes a user.
|
||||
|
||||
## DROP ROLE {#drop-role-statement}
|
||||
|
||||
Deletes a role. The deleted role is revoked from all the entities where it was assigned.
|
||||
|
||||
Syntax:
|
||||
|
||||
``` sql
|
||||
DROP ROLE [IF EXISTS] name [,...] [ON CLUSTER cluster_name]
|
||||
```
|
||||
|
||||
Deletes a role.
|
||||
|
||||
Deleted role is revoked from all the entities where it was assigned.
|
||||
|
||||
## DROP ROW POLICY {#drop-row-policy-statement}
|
||||
|
||||
Deletes a row policy. Deleted row policy is revoked from all the entities where it was assigned.
|
||||
|
||||
Syntax:
|
||||
|
||||
``` sql
|
||||
DROP [ROW] POLICY [IF EXISTS] name [,...] ON [database.]table [,...] [ON CLUSTER cluster_name]
|
||||
```
|
||||
|
||||
Deletes a row policy.
|
||||
|
||||
Deleted row policy is revoked from all the entities where it was assigned.
|
||||
|
||||
## DROP QUOTA {#drop-quota-statement}
|
||||
|
||||
Deletes a quota. The deleted quota is revoked from all the entities where it was assigned.
|
||||
|
||||
Syntax:
|
||||
|
||||
``` sql
|
||||
DROP QUOTA [IF EXISTS] name [,...] [ON CLUSTER cluster_name]
|
||||
```
|
||||
|
||||
Deletes a quota.
|
||||
|
||||
Deleted quota is revoked from all the entities where it was assigned.
|
||||
|
||||
## DROP SETTINGS PROFILE {#drop-settings-profile-statement}
|
||||
|
||||
Deletes a settings profile. The deleted settings profile is revoked from all the entities where it was assigned.
|
||||
|
||||
Syntax:
|
||||
|
||||
``` sql
|
||||
DROP [SETTINGS] PROFILE [IF EXISTS] name [,...] [ON CLUSTER cluster_name]
|
||||
```
|
||||
|
||||
Deletes a settings profile.
|
||||
|
||||
Deleted settings profile is revoked from all the entities where it was assigned.
|
||||
|
||||
## DROP VIEW {#drop-view}
|
||||
|
||||
Deletes a view. Views can be deleted by a `DROP TABLE` command as well but `DROP VIEW` checks that `[db.]name` is a view.
|
||||
|
||||
Syntax:
|
||||
|
||||
``` sql
|
||||
DROP VIEW [IF EXISTS] [db.]name [ON CLUSTER cluster]
|
||||
```
|
||||
|
||||
Deletes a view. Views can be deleted by a `DROP TABLE` command as well but `DROP VIEW` checks that `[db.]name` is a view.
|
||||
[Оriginal article](https://clickhouse.tech/docs/en/sql-reference/statements/drop/) <!--hide-->
|
@ -1,5 +1,5 @@
|
||||
---
|
||||
toc_priority: 47
|
||||
toc_priority: 45
|
||||
toc_title: EXISTS
|
||||
---
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
---
|
||||
toc_priority: 39
|
||||
toc_priority: 38
|
||||
toc_title: GRANT
|
||||
---
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
---
|
||||
toc_priority: 34
|
||||
toc_priority: 33
|
||||
toc_title: INSERT INTO
|
||||
---
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
---
|
||||
toc_priority: 48
|
||||
toc_priority: 46
|
||||
toc_title: KILL
|
||||
---
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
---
|
||||
toc_priority: 49
|
||||
toc_priority: 47
|
||||
toc_title: OPTIMIZE
|
||||
---
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
---
|
||||
toc_priority: 50
|
||||
toc_priority: 48
|
||||
toc_title: RENAME
|
||||
---
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
---
|
||||
toc_priority: 40
|
||||
toc_priority: 39
|
||||
toc_title: REVOKE
|
||||
---
|
||||
|
||||
|
@ -1,7 +1,7 @@
|
||||
---
|
||||
title: SELECT Query
|
||||
toc_folder_title: SELECT
|
||||
toc_priority: 33
|
||||
toc_priority: 32
|
||||
toc_title: Overview
|
||||
---
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
---
|
||||
toc_priority: 52
|
||||
toc_priority: 51
|
||||
toc_title: SET ROLE
|
||||
---
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
---
|
||||
toc_priority: 51
|
||||
toc_priority: 49
|
||||
toc_title: SET
|
||||
---
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
---
|
||||
toc_priority: 38
|
||||
toc_priority: 37
|
||||
toc_title: SHOW
|
||||
---
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
---
|
||||
toc_priority: 37
|
||||
toc_priority: 36
|
||||
toc_title: SYSTEM
|
||||
---
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
---
|
||||
toc_priority: 53
|
||||
toc_priority: 52
|
||||
toc_title: TRUNCATE
|
||||
---
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
---
|
||||
toc_priority: 54
|
||||
toc_priority: 53
|
||||
toc_title: USE
|
||||
---
|
||||
|
||||
|
@ -1,3 +1,8 @@
|
||||
---
|
||||
toc_priority: 1
|
||||
toc_title: "\u041f\u043e\u0441\u0442\u0430\u0432\u0449\u0438\u043a\u0438\u0020\u043e\u0431\u043b\u0430\u0447\u043d\u044b\u0445\u0020\u0443\u0441\u043b\u0443\u0433\u0020\u0043\u006c\u0069\u0063\u006b\u0048\u006f\u0075\u0073\u0065"
|
||||
---
|
||||
|
||||
# Поставщики облачных услуг ClickHouse {#clickhouse-cloud-service-providers}
|
||||
|
||||
!!! info "Инфо"
|
||||
|
@ -1,3 +1,8 @@
|
||||
---
|
||||
toc_priority: 62
|
||||
toc_title: "\u041e\u0431\u0437\u043e\u0440\u0020\u0430\u0440\u0445\u0438\u0442\u0435\u043a\u0442\u0443\u0440\u044b\u0020\u0043\u006c\u0069\u0063\u006b\u0048\u006f\u0075\u0073\u0065"
|
||||
---
|
||||
|
||||
# Обзор архитектуры ClickHouse {#overview-of-clickhouse-architecture}
|
||||
|
||||
ClickHouse - полноценная колоночная СУБД. Данные хранятся в колонках, а в процессе обработки - в массивах (векторах или фрагментах (chunk’ах) колонок). По возможности операции выполняются на массивах, а не на индивидуальных значениях. Это называется “векторизованное выполнения запросов” (vectorized query execution), и помогает снизить стоимость фактической обработки данных.
|
||||
|
@ -1,3 +1,9 @@
|
||||
---
|
||||
toc_priority: 71
|
||||
toc_title: "\u041d\u0430\u0432\u0438\u0433\u0430\u0446\u0438\u044f\u0020\u043f\u043e\u0020\u043a\u043e\u0434\u0443\u0020\u0043\u006c\u0069\u0063\u006b\u0048\u006f\u0075\u0073\u0065"
|
||||
---
|
||||
|
||||
|
||||
# Навигация по коду ClickHouse {#navigatsiia-po-kodu-clickhouse}
|
||||
|
||||
Для навигации по коду онлайн доступен **Woboq**, он расположен [здесь](https://clickhouse.tech/codebrowser/html_report///ClickHouse/src/index.html). В нём реализовано удобное перемещение между исходными файлами, семантическая подсветка, подсказки, индексация и поиск. Слепок кода обновляется ежедневно.
|
||||
|
@ -1,3 +1,9 @@
|
||||
---
|
||||
toc_priority: 70
|
||||
toc_title: "\u0418\u0441\u043f\u043e\u043b\u044c\u0437\u0443\u0435\u043c\u044b\u0435\u0020\u0441\u0442\u043e\u0440\u043e\u043d\u043d\u0438\u0435\u0020\u0431\u0438\u0431\u043b\u0438\u043e\u0442\u0435\u043a\u0438"
|
||||
---
|
||||
|
||||
|
||||
# Используемые сторонние библиотеки {#ispolzuemye-storonnie-biblioteki}
|
||||
|
||||
| Библиотека | Лицензия |
|
||||
|
@ -1,3 +1,8 @@
|
||||
---
|
||||
toc_priority: 61
|
||||
toc_title: "\u0418\u043d\u0441\u0442\u0440\u0443\u043a\u0446\u0438\u044f\u0020\u0434\u043b\u044f\u0020\u0440\u0430\u0437\u0440\u0430\u0431\u043e\u0442\u0447\u0438\u043a\u043e\u0432"
|
||||
---
|
||||
|
||||
# Инструкция для разработчиков
|
||||
|
||||
Сборка ClickHouse поддерживается на Linux, FreeBSD, Mac OS X.
|
||||
|
@ -1,3 +1,9 @@
|
||||
---
|
||||
toc_priority: 68
|
||||
toc_title: "\u041a\u0430\u043a\u0020\u043f\u0438\u0441\u0430\u0442\u044c\u0020\u043a\u043e\u0434\u0020\u043d\u0430\u0020\u0043\u002b\u002b"
|
||||
---
|
||||
|
||||
|
||||
# Как писать код на C++ {#kak-pisat-kod-na-c}
|
||||
|
||||
## Общее {#obshchee}
|
||||
|
@ -1,3 +1,10 @@
|
||||
---
|
||||
toc_folder_title: "\u0414\u0432\u0438\u0436\u043a\u0438\u0020\u0431\u0430\u0437\u0020\u0434\u0430\u043d\u043d\u044b\u0445"
|
||||
toc_priority: 27
|
||||
toc_title: "\u0412\u0432\u0435\u0434\u0435\u043d\u0438\u0435"
|
||||
---
|
||||
|
||||
|
||||
# Движки баз данных {#dvizhki-baz-dannykh}
|
||||
|
||||
Движки баз данных обеспечивают работу с таблицами.
|
||||
|
@ -1,3 +1,8 @@
|
||||
---
|
||||
toc_priority: 31
|
||||
toc_title: Lazy
|
||||
---
|
||||
|
||||
# Lazy {#lazy}
|
||||
|
||||
Сохраняет таблицы только в оперативной памяти `expiration_time_in_seconds` через несколько секунд после последнего доступа. Может использоваться только с таблицами \*Log.
|
||||
|
@ -1,3 +1,8 @@
|
||||
---
|
||||
toc_priority: 30
|
||||
toc_title: MySQL
|
||||
---
|
||||
|
||||
# MySQL {#mysql}
|
||||
|
||||
Позволяет подключаться к базам данных на удалённом MySQL сервере и выполнять запросы `INSERT` и `SELECT` для обмена данными между ClickHouse и MySQL.
|
||||
|
@ -1,6 +1,6 @@
|
||||
---
|
||||
toc_folder_title: "\u0414\u0432\u0438\u0436\u043A\u0438"
|
||||
toc_folder_title: "\u0045\u006e\u0067\u0069\u006e\u0065\u0073"
|
||||
toc_hidden: true
|
||||
toc_priority: 25
|
||||
toc_title: hidden
|
||||
---
|
||||
|
||||
|
||||
|
@ -1,3 +1,10 @@
|
||||
---
|
||||
toc_folder_title: "\u0414\u0432\u0438\u0436\u043a\u0438\u0020\u0442\u0430\u0431\u043b\u0438\u0446"
|
||||
toc_priority: 26
|
||||
toc_title: "\u0412\u0432\u0435\u0434\u0435\u043d\u0438\u0435"
|
||||
---
|
||||
|
||||
|
||||
# Движки таблиц {#table_engines}
|
||||
|
||||
Движок таблицы (тип таблицы) определяет:
|
||||
|
@ -1,3 +1,8 @@
|
||||
---
|
||||
toc_priority: 4
|
||||
toc_title: HDFS
|
||||
---
|
||||
|
||||
# HDFS {#table_engines-hdfs}
|
||||
|
||||
Управляет данными в HDFS. Данный движок похож на движки [File](../special/file.md#table_engines-file) и [URL](../special/url.md#table_engines-url).
|
||||
|
@ -1,5 +1,5 @@
|
||||
---
|
||||
toc_folder_title: Integrations
|
||||
toc_folder_title: "\u0414\u0432\u0438\u0436\u043a\u0438\u0020\u0442\u0430\u0431\u043b\u0438\u0446\u0020\u0434\u043b\u044f\u0020\u0438\u043d\u0442\u0435\u0433\u0440\u0430\u0446\u0438\u0438"
|
||||
toc_priority: 30
|
||||
---
|
||||
|
||||
|
@ -1,3 +1,8 @@
|
||||
---
|
||||
toc_priority: 2
|
||||
toc_title: JDBC
|
||||
---
|
||||
|
||||
# JDBC {#table-engine-jdbc}
|
||||
|
||||
Позволяет ClickHouse подключаться к внешним базам данных с помощью [JDBC](https://en.wikipedia.org/wiki/Java_Database_Connectivity).
|
||||
|
@ -1,3 +1,8 @@
|
||||
---
|
||||
toc_priority: 5
|
||||
toc_title: Kafka
|
||||
---
|
||||
|
||||
# Kafka {#kafka}
|
||||
|
||||
Движок работает с [Apache Kafka](http://kafka.apache.org/).
|
||||
|
@ -1,3 +1,8 @@
|
||||
---
|
||||
toc_priority: 3
|
||||
toc_title: MySQL
|
||||
---
|
||||
|
||||
# MySQL {#mysql}
|
||||
|
||||
Движок MySQL позволяет выполнять запросы `SELECT` над данными, хранящимися на удалённом MySQL сервере.
|
||||
|
@ -1,3 +1,8 @@
|
||||
---
|
||||
toc_priority: 1
|
||||
toc_title: ODBC
|
||||
---
|
||||
|
||||
# ODBC {#table-engine-odbc}
|
||||
|
||||
Позволяет ClickHouse подключаться к внешним базам данных с помощью [ODBC](https://en.wikipedia.org/wiki/Open_Database_Connectivity).
|
||||
|
@ -45,7 +45,7 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster]
|
||||
- `rabbitmq_row_delimiter` – символ-разделитель, который завершает сообщение.
|
||||
- `rabbitmq_schema` – опциональный параметр, необходимый, если используется формат, требующий определения схемы. Например, [Cap’n Proto](https://capnproto.org/) требует путь к файлу со схемой и название корневого объекта `schema.capnp:Message`.
|
||||
- `rabbitmq_num_consumers` – количество потребителей на таблицу. По умолчанию: `1`. Укажите больше потребителей, если пропускная способность одного потребителя недостаточна.
|
||||
- `rabbitmq_num_queues` – количество очередей на потребителя. По умолчанию: `1`. Укажите больше потребителей, если пропускная способность одной очереди на потребителя недостаточна.
|
||||
- `rabbitmq_num_queues` – количество очередей. По умолчанию: `1`. Большее число очередей может сильно увеличить пропускную способность.
|
||||
- `rabbitmq_queue_base` - настройка для имен очередей. Сценарии использования описаны ниже.
|
||||
- `rabbitmq_persistent` - флаг, от которого зависит настройка 'durable' для сообщений при запросах `INSERT`. По умолчанию: `0`.
|
||||
- `rabbitmq_skip_broken_messages` – максимальное количество некорректных сообщений в блоке. Если `rabbitmq_skip_broken_messages = N`, то движок отбрасывает `N` сообщений, которые не получилось обработать. Одно сообщение в точности соответствует одной записи (строке). Значение по умолчанию – 0.
|
||||
@ -140,4 +140,5 @@ Example:
|
||||
- `_channel_id` - идентификатор канала `ChannelID`, на котором было получено сообщение.
|
||||
- `_delivery_tag` - значение `DeliveryTag` полученного сообщения. Уникально в рамках одного канала.
|
||||
- `_redelivered` - флаг `redelivered`. (Не равно нулю, если есть возможность, что сообщение было получено более, чем одним каналом.)
|
||||
- `_message_id` - значение `MessageID` полученного сообщения. Данное поле непусто, если указано в параметрах при отправке сообщения.
|
||||
- `_message_id` - значение поля `messageID` полученного сообщения. Данное поле непусто, если указано в параметрах при отправке сообщения.
|
||||
- `_timestamp` - значение поля `timestamp` полученного сообщения. Данное поле непусто, если указано в параметрах при отправке сообщения.
|
||||
|
@ -1,6 +1,6 @@
|
||||
---
|
||||
toc_folder_title: Семейство Log
|
||||
toc_title: Введение
|
||||
toc_folder_title: "\u0421\u0435\u043c\u0435\u0439\u0441\u0442\u0432\u043e\u0020\u004c\u006f\u0067"
|
||||
toc_title: "\u0412\u0432\u0435\u0434\u0435\u043d\u0438\u0435"
|
||||
toc_priority: 29
|
||||
---
|
||||
|
||||
|
@ -1,3 +1,8 @@
|
||||
---
|
||||
toc_priority: 33
|
||||
toc_title: Log
|
||||
---
|
||||
|
||||
# Log {#log}
|
||||
|
||||
Движок относится к семейству движков Log. Смотрите общие свойства и различия движков в статье [Семейство Log](index.md).
|
||||
|
@ -1,3 +1,8 @@
|
||||
---
|
||||
toc_priority: 32
|
||||
toc_title: StripeLog
|
||||
---
|
||||
|
||||
# StripeLog {#stripelog}
|
||||
|
||||
Движок относится к семейству движков Log. Смотрите общие свойства и различия движков в статье [Семейство Log](index.md).
|
||||
|
@ -1,3 +1,8 @@
|
||||
---
|
||||
toc_priority: 34
|
||||
toc_title: TinyLog
|
||||
---
|
||||
|
||||
# TinyLog {#tinylog}
|
||||
|
||||
Движок относится к семейству движков Log. Смотрите общие свойства и различия движков в статье [Семейство Log](index.md).
|
||||
|
@ -1,3 +1,8 @@
|
||||
---
|
||||
toc_priority: 35
|
||||
toc_title: AggregatingMergeTree
|
||||
---
|
||||
|
||||
# AggregatingMergeTree {#aggregatingmergetree}
|
||||
|
||||
Движок наследует функциональность [MergeTree](mergetree.md#table_engines-mergetree), изменяя логику слияния кусков данных. Все строки с одинаковым первичным ключом (точнее, с одинаковым [ключом сортировки](mergetree.md)) ClickHouse заменяет на одну (в пределах одного куска данных), которая хранит объединение состояний агрегатных функций.
|
||||
|
@ -1,3 +1,8 @@
|
||||
---
|
||||
toc_priority: 36
|
||||
toc_title: CollapsingMergeTree
|
||||
---
|
||||
|
||||
# CollapsingMergeTree {#table_engine-collapsingmergetree}
|
||||
|
||||
Движок наследует функциональность от [MergeTree](mergetree.md) и добавляет в алгоритм слияния кусков данных логику сворачивания (удаления) строк.
|
||||
|
@ -1,3 +1,9 @@
|
||||
---
|
||||
toc_priority: 32
|
||||
toc_title: "\u041f\u0440\u043e\u0438\u0437\u0432\u043e\u043b\u044c\u043d\u044b\u0439\u0020\u043a\u043b\u044e\u0447\u0020\u043f\u0430\u0440\u0442\u0438\u0446\u0438\u043e\u043d\u0438\u0440\u043e\u0432\u0430\u043d\u0438\u044f"
|
||||
---
|
||||
|
||||
|
||||
# Произвольный ключ партиционирования {#proizvolnyi-kliuch-partitsionirovaniia}
|
||||
|
||||
Партиционирование данных доступно для таблиц семейства [MergeTree](mergetree.md) (включая [реплицированные таблицы](replication.md)). Таблицы [MaterializedView](../special/materializedview.md#materializedview), созданные на основе таблиц MergeTree, также поддерживают партиционирование.
|
||||
|
@ -1,3 +1,8 @@
|
||||
---
|
||||
toc_priority: 38
|
||||
toc_title: GraphiteMergeTree
|
||||
---
|
||||
|
||||
# GraphiteMergeTree {#graphitemergetree}
|
||||
|
||||
Движок предназначен для прореживания и агрегирования/усреднения (rollup) данных [Graphite](http://graphite.readthedocs.io/en/latest/index.html). Он может быть интересен разработчикам, которые хотят использовать ClickHouse как хранилище данных для Graphite.
|
||||
|
@ -1,6 +1,5 @@
|
||||
---
|
||||
toc_folder_title: MergeTree Family
|
||||
toc_priority: 28
|
||||
toc_title: "\u0412\u0432\u0435\u0434\u0435\u043d\u0438\u0435"
|
||||
---
|
||||
|
||||
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user