mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-20 14:42:02 +00:00
Merge branch 'master' into grouping-sets-fix
This commit is contained in:
commit
61deae7105
@ -37,6 +37,7 @@ Checks: '-*,
|
||||
|
||||
readability-avoid-const-params-in-decls,
|
||||
readability-const-return-type,
|
||||
readability-container-contains,
|
||||
readability-container-size-empty,
|
||||
readability-convert-member-functions-to-static,
|
||||
readability-delete-null-pointer,
|
||||
|
2
.github/workflows/debug.yml
vendored
2
.github/workflows/debug.yml
vendored
@ -2,7 +2,7 @@
|
||||
name: Debug
|
||||
|
||||
'on':
|
||||
[push, pull_request, release]
|
||||
[push, pull_request, release, workflow_dispatch]
|
||||
|
||||
jobs:
|
||||
DebugInfo:
|
||||
|
2
.github/workflows/docs_check.yml
vendored
2
.github/workflows/docs_check.yml
vendored
@ -94,7 +94,7 @@ jobs:
|
||||
path: ${{ runner.temp }}/changed_images.json
|
||||
DocsCheck:
|
||||
needs: DockerHubPush
|
||||
runs-on: [self-hosted, func-tester]
|
||||
runs-on: [self-hosted, func-tester-aarch64]
|
||||
steps:
|
||||
- name: Set envs
|
||||
run: |
|
||||
|
27
.github/workflows/master.yml
vendored
27
.github/workflows/master.yml
vendored
@ -979,15 +979,14 @@ jobs:
|
||||
############################################################################################
|
||||
BuilderReport:
|
||||
needs:
|
||||
- BuilderDebRelease
|
||||
- BuilderDebAarch64
|
||||
- BuilderBinRelease
|
||||
- BuilderBinGCC
|
||||
- BuilderDebAarch64
|
||||
- BuilderDebAsan
|
||||
- BuilderDebDebug
|
||||
- BuilderDebMsan
|
||||
- BuilderDebRelease
|
||||
- BuilderDebTsan
|
||||
- BuilderDebUBsan
|
||||
- BuilderDebMsan
|
||||
- BuilderDebDebug
|
||||
runs-on: [self-hosted, style-checker]
|
||||
steps:
|
||||
- name: Set envs
|
||||
@ -1026,17 +1025,23 @@ jobs:
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
BuilderSpecialReport:
|
||||
needs:
|
||||
- BuilderBinAarch64
|
||||
- BuilderBinDarwin
|
||||
- BuilderBinDarwinAarch64
|
||||
- BuilderBinFreeBSD
|
||||
- BuilderBinGCC
|
||||
- BuilderBinPPC64
|
||||
- BuilderBinTidy
|
||||
- BuilderDebSplitted
|
||||
- BuilderBinDarwin
|
||||
- BuilderBinAarch64
|
||||
- BuilderBinFreeBSD
|
||||
- BuilderBinDarwinAarch64
|
||||
- BuilderBinPPC64
|
||||
runs-on: [self-hosted, style-checker]
|
||||
steps:
|
||||
- name: Set envs
|
||||
run: |
|
||||
DEPENDENCIES=$(cat << 'EOF' | jq '. | length'
|
||||
${{ toJSON(needs) }}
|
||||
EOF
|
||||
)
|
||||
echo "DEPENDENCIES=$DEPENDENCIES" >> "$GITHUB_ENV"
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/report_check
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
@ -1056,7 +1061,7 @@ jobs:
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||
python3 build_report_check.py "$CHECK_NAME"
|
||||
python3 build_report_check.py "$CHECK_NAME" "$DEPENDENCIES"
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
|
25
.github/workflows/pull_request.yml
vendored
25
.github/workflows/pull_request.yml
vendored
@ -1036,15 +1036,14 @@ jobs:
|
||||
############################################################################################
|
||||
BuilderReport:
|
||||
needs:
|
||||
- BuilderDebRelease
|
||||
- BuilderDebAarch64
|
||||
- BuilderBinRelease
|
||||
- BuilderBinGCC
|
||||
- BuilderDebAarch64
|
||||
- BuilderDebAsan
|
||||
- BuilderDebDebug
|
||||
- BuilderDebMsan
|
||||
- BuilderDebRelease
|
||||
- BuilderDebTsan
|
||||
- BuilderDebUBsan
|
||||
- BuilderDebMsan
|
||||
- BuilderDebDebug
|
||||
runs-on: [self-hosted, style-checker]
|
||||
if: ${{ success() || failure() }}
|
||||
steps:
|
||||
@ -1083,18 +1082,24 @@ jobs:
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
BuilderSpecialReport:
|
||||
needs:
|
||||
- BuilderDebSplitted
|
||||
- BuilderBinTidy
|
||||
- BuilderBinDarwin
|
||||
- BuilderBinAarch64
|
||||
- BuilderBinFreeBSD
|
||||
- BuilderBinDarwin
|
||||
- BuilderBinDarwinAarch64
|
||||
- BuilderBinFreeBSD
|
||||
- BuilderBinGCC
|
||||
- BuilderBinPPC64
|
||||
- BuilderBinTidy
|
||||
- BuilderDebSplitted
|
||||
runs-on: [self-hosted, style-checker]
|
||||
if: ${{ success() || failure() }}
|
||||
steps:
|
||||
- name: Set envs
|
||||
run: |
|
||||
DEPENDENCIES=$(cat << 'EOF' | jq '. | length'
|
||||
${{ toJSON(needs) }}
|
||||
EOF
|
||||
)
|
||||
echo "DEPENDENCIES=$DEPENDENCIES" >> "$GITHUB_ENV"
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/report_check
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
@ -1114,7 +1119,7 @@ jobs:
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||
python3 build_report_check.py "$CHECK_NAME"
|
||||
python3 build_report_check.py "$CHECK_NAME" "$DEPENDENCIES"
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
|
@ -314,6 +314,15 @@ if (ENABLE_BUILD_PATH_MAPPING)
|
||||
set (CMAKE_ASM_FLAGS "${CMAKE_ASM_FLAGS} -ffile-prefix-map=${CMAKE_SOURCE_DIR}=.")
|
||||
endif ()
|
||||
|
||||
option (ENABLE_BUILD_PROFILING "Enable profiling of build time" OFF)
|
||||
if (ENABLE_BUILD_PROFILING)
|
||||
if (COMPILER_CLANG)
|
||||
set (COMPILER_FLAGS "${COMPILER_FLAGS} -ftime-trace")
|
||||
else ()
|
||||
message (${RECONFIGURE_MESSAGE_LEVEL} "Build profiling is only available with CLang")
|
||||
endif ()
|
||||
endif ()
|
||||
|
||||
if (${CMAKE_VERSION} VERSION_LESS "3.12.4")
|
||||
# CMake < 3.12 doesn't support setting 20 as a C++ standard version.
|
||||
# We will add C++ standard controlling flag in CMAKE_CXX_FLAGS manually for now.
|
||||
|
@ -14,8 +14,8 @@
|
||||
|
||||
uint64_t getAvailableMemoryAmountOrZero()
|
||||
{
|
||||
#if defined(_SC_AVPHYS_PAGES) // linux
|
||||
return getPageSize() * sysconf(_SC_AVPHYS_PAGES);
|
||||
#if defined(_SC_PHYS_PAGES) // linux
|
||||
return getPageSize() * sysconf(_SC_PHYS_PAGES);
|
||||
#elif defined(__FreeBSD__)
|
||||
struct vmtotal vmt;
|
||||
size_t vmt_size = sizeof(vmt);
|
||||
|
@ -2,7 +2,7 @@
|
||||
#pragma clang diagnostic ignored "-Wreserved-identifier"
|
||||
#endif
|
||||
|
||||
/// This code was based on the code by Fedor Korotkiy (prime@yandex-team.ru) for YT product in Yandex.
|
||||
/// This code was based on the code by Fedor Korotkiy https://www.linkedin.com/in/fedor-korotkiy-659a1838/
|
||||
|
||||
#include <base/defines.h>
|
||||
|
||||
|
@ -1,6 +1,6 @@
|
||||
#pragma once
|
||||
|
||||
/// This code was based on the code by Fedor Korotkiy (prime@yandex-team.ru) for YT product in Yandex.
|
||||
/// This code was based on the code by Fedor Korotkiy https://www.linkedin.com/in/fedor-korotkiy-659a1838/
|
||||
|
||||
/** Collects all dl_phdr_info items and caches them in a static array.
|
||||
* Also rewrites dl_iterate_phdr with a lock-free version which consults the above cache
|
||||
|
@ -76,10 +76,10 @@ public:
|
||||
/// return none if daemon doesn't exist, reference to the daemon otherwise
|
||||
static std::optional<std::reference_wrapper<BaseDaemon>> tryGetInstance() { return tryGetInstance<BaseDaemon>(); }
|
||||
|
||||
/// В Graphite компоненты пути(папки) разделяются точкой.
|
||||
/// У нас принят путь формата root_path.hostname_yandex_ru.key
|
||||
/// root_path по умолчанию one_min
|
||||
/// key - лучше группировать по смыслу. Например "meminfo.cached" или "meminfo.free", "meminfo.total"
|
||||
/// Graphite metric name has components separated by dots.
|
||||
/// We used to have the following format: root_path.hostname_clickhouse_com.key
|
||||
/// root_path - one_min by default
|
||||
/// key - something that makes sense. Examples: "meminfo.cached" or "meminfo.free", "meminfo.total".
|
||||
template <class T>
|
||||
void writeToGraphite(const std::string & key, const T & value, const std::string & config_name = DEFAULT_GRAPHITE_CONFIG_NAME, time_t timestamp = 0, const std::string & custom_root_path = "")
|
||||
{
|
||||
|
@ -155,7 +155,12 @@ target_include_directories(_jemalloc SYSTEM PRIVATE
|
||||
target_compile_definitions(_jemalloc PRIVATE -DJEMALLOC_NO_PRIVATE_NAMESPACE)
|
||||
|
||||
if (CMAKE_BUILD_TYPE_UC STREQUAL "DEBUG")
|
||||
target_compile_definitions(_jemalloc PRIVATE -DJEMALLOC_DEBUG=1)
|
||||
target_compile_definitions(_jemalloc PRIVATE
|
||||
-DJEMALLOC_DEBUG=1
|
||||
# Usage examples:
|
||||
# - MALLOC_CONF=log:.
|
||||
# - MALLOC_CONF='log:core.malloc.exit|core.sallocx.entry|core.sdallocx.entry'
|
||||
-DJEMALLOC_LOG=1)
|
||||
endif ()
|
||||
|
||||
target_compile_definitions(_jemalloc PRIVATE -DJEMALLOC_PROF=1)
|
||||
|
@ -5,6 +5,6 @@ set(SRCS
|
||||
"${LIBRARY_DIR}/src/RdrLemmatizer.cpp"
|
||||
)
|
||||
|
||||
add_library(_lemmagen STATIC ${SRCS})
|
||||
add_library(_lemmagen ${SRCS})
|
||||
target_include_directories(_lemmagen SYSTEM PUBLIC "${LEMMAGEN_INCLUDE_DIR}")
|
||||
add_library(ch_contrib::lemmagen ALIAS _lemmagen)
|
||||
|
@ -27,6 +27,6 @@ FOREACH ( LINE ${_CONTENT} )
|
||||
endforeach ()
|
||||
|
||||
# all the sources parsed. Now just add the lib
|
||||
add_library(_stemmer STATIC ${_SOURCES} ${_HEADERS} )
|
||||
add_library(_stemmer ${_SOURCES} ${_HEADERS} )
|
||||
target_include_directories(_stemmer SYSTEM PUBLIC "${STEMMER_INCLUDE_DIR}")
|
||||
add_library(ch_contrib::stemmer ALIAS _stemmer)
|
||||
|
@ -239,7 +239,7 @@ endif()
|
||||
set(LIBMARIADB_SOURCES ${LIBMARIADB_SOURCES} ${CC_SOURCE_DIR}/libmariadb/mariadb_async.c ${CC_SOURCE_DIR}/libmariadb/ma_context.c)
|
||||
|
||||
|
||||
add_library(_mariadbclient STATIC ${LIBMARIADB_SOURCES})
|
||||
add_library(_mariadbclient ${LIBMARIADB_SOURCES})
|
||||
target_link_libraries(_mariadbclient ${SYSTEM_LIBS})
|
||||
|
||||
target_include_directories(_mariadbclient PRIVATE ${CC_BINARY_DIR}/include-private)
|
||||
|
@ -539,7 +539,7 @@ if(WITH_FOLLY_DISTRIBUTED_MUTEX)
|
||||
"${ROCKSDB_SOURCE_DIR}/third-party/folly/folly/synchronization/WaitOptions.cpp")
|
||||
endif()
|
||||
|
||||
add_library(_rocksdb STATIC ${SOURCES})
|
||||
add_library(_rocksdb ${SOURCES})
|
||||
add_library(ch_contrib::rocksdb ALIAS _rocksdb)
|
||||
target_link_libraries(_rocksdb PRIVATE ${THIRDPARTY_LIBS} ${SYSTEM_LIBS})
|
||||
# SYSTEM is required to overcome some issues
|
||||
|
2
contrib/sysroot
vendored
2
contrib/sysroot
vendored
@ -1 +1 @@
|
||||
Subproject commit bbcac834526d90d1e764164b861be426891d1743
|
||||
Subproject commit e9fb375d0a1e5ebfd74c043f088f2342552103f8
|
18
debian/.gitignore
vendored
18
debian/.gitignore
vendored
@ -1,18 +0,0 @@
|
||||
control
|
||||
copyright
|
||||
tmp/
|
||||
clickhouse-benchmark/
|
||||
clickhouse-client.docs
|
||||
clickhouse-client/
|
||||
clickhouse-common-static-dbg/
|
||||
clickhouse-common-static.docs
|
||||
clickhouse-common-static/
|
||||
clickhouse-server-base/
|
||||
clickhouse-server-common/
|
||||
clickhouse-server/
|
||||
debhelper-build-stamp
|
||||
files
|
||||
*.debhelper.log
|
||||
*.debhelper
|
||||
*.substvars
|
||||
|
223
debian/.pbuilderrc
vendored
223
debian/.pbuilderrc
vendored
@ -1,223 +0,0 @@
|
||||
#
|
||||
# sudo apt install pbuilder fakeroot debhelper debian-archive-keyring debian-keyring
|
||||
#
|
||||
# ubuntu:
|
||||
# prepare old (trusty or earlier) host system:
|
||||
|
||||
# sudo ln -s gutsy /usr/share/debootstrap/scripts/eoan
|
||||
# sudo ln -s gutsy /usr/share/debootstrap/scripts/disco
|
||||
# sudo ln -s gutsy /usr/share/debootstrap/scripts/cosmic
|
||||
# sudo ln -s gutsy /usr/share/debootstrap/scripts/artful
|
||||
# sudo ln -s gutsy /usr/share/debootstrap/scripts/bionic
|
||||
# sudo ln -s sid /usr/share/debootstrap/scripts/buster
|
||||
# build ubuntu:
|
||||
# sudo DIST=bionic pbuilder create --configfile debian/.pbuilderrc && DIST=bionic pdebuild --configfile debian/.pbuilderrc
|
||||
# sudo DIST=cosmic pbuilder create --configfile debian/.pbuilderrc && DIST=cosmic pdebuild --configfile debian/.pbuilderrc
|
||||
# sudo DIST=disco pbuilder create --configfile debian/.pbuilderrc && DIST=disco pdebuild --configfile debian/.pbuilderrc
|
||||
# sudo DIST=eoan pbuilder create --configfile debian/.pbuilderrc && DIST=eoan pdebuild --configfile debian/.pbuilderrc
|
||||
# sudo DIST=devel pbuilder create --configfile debian/.pbuilderrc && DIST=devel pdebuild --configfile debian/.pbuilderrc
|
||||
# build debian:
|
||||
# sudo DIST=stable pbuilder create --configfile debian/.pbuilderrc && DIST=stable pdebuild --configfile debian/.pbuilderrc
|
||||
# sudo DIST=testing pbuilder create --configfile debian/.pbuilderrc && DIST=testing pdebuild --configfile debian/.pbuilderrc
|
||||
# sudo DIST=unstable pbuilder create --configfile debian/.pbuilderrc && DIST=unstable pdebuild --configfile debian/.pbuilderrc
|
||||
# sudo DIST=experimental pbuilder create --configfile debian/.pbuilderrc && DIST=experimental pdebuild --configfile debian/.pbuilderrc
|
||||
# build i386 experimental:
|
||||
# sudo DIST=trusty ARCH=i386 pbuilder create --configfile debian/.pbuilderrc && DIST=trusty ARCH=i386 pdebuild --configfile debian/.pbuilderrc
|
||||
# sudo DIST=xenial ARCH=i386 pbuilder create --configfile debian/.pbuilderrc && DIST=xenial ARCH=i386 pdebuild --configfile debian/.pbuilderrc
|
||||
# sudo DIST=zesty ARCH=i386 pbuilder create --configfile debian/.pbuilderrc && DIST=zesty ARCH=i386 pdebuild --configfile debian/.pbuilderrc
|
||||
# sudo DIST=artful ARCH=i386 pbuilder create --configfile debian/.pbuilderrc && DIST=artful ARCH=i386 pdebuild --configfile debian/.pbuilderrc
|
||||
# sudo DIST=bionic ARCH=i386 pbuilder create --configfile debian/.pbuilderrc && DIST=bionic ARCH=i386 pdebuild --configfile debian/.pbuilderrc
|
||||
# sudo DIST=stable ARCH=i386 pbuilder create --configfile debian/.pbuilderrc && DIST=stable ARCH=i386 pdebuild --configfile debian/.pbuilderrc
|
||||
# sudo DIST=testing ARCH=i386 pbuilder create --configfile debian/.pbuilderrc && DIST=testing ARCH=i386 pdebuild --configfile debian/.pbuilderrc
|
||||
# sudo DIST=experimental ARCH=i386 pbuilder create --configfile debian/.pbuilderrc && DIST=experimental ARCH=i386 pdebuild --configfile debian/.pbuilderrc
|
||||
# test gcc-9
|
||||
# env DEB_CC=gcc-9 DEB_CXX=g++-9 EXTRAPACKAGES="g++-9 gcc-9" DIST=disco pdebuild --configfile debian/.pbuilderrc
|
||||
# use only clang:
|
||||
# env DEB_CC=clang-8 DEB_CXX=clang++-8 EXTRAPACKAGES=clang-8 DIST=disco pdebuild --configfile debian/.pbuilderrc
|
||||
# env DEB_CC=clang-5.0 DEB_CXX=clang++-5.0 EXTRAPACKAGES=clang-5.0 DIST=artful pdebuild --configfile debian/.pbuilderrc
|
||||
# clang+asan:
|
||||
# env DEB_CC=clang-5.0 DEB_CXX=clang++-5.0 EXTRAPACKAGES="clang-5.0 libc++abi-dev libc++-dev" CMAKE_FLAGS="-DENABLE_TCMALLOC=0 -DENABLE_UNWIND=0 -DCMAKE_BUILD_TYPE=Asan" DIST=artful pdebuild --configfile debian/.pbuilderrc
|
||||
# clang+tsan:
|
||||
# env DEB_CC=clang-5.0 DEB_CXX=clang++-5.0 EXTRAPACKAGES="clang-5.0 libc++abi-dev libc++-dev" CMAKE_FLAGS="-DCMAKE_BUILD_TYPE=Tsan" DIST=artful pdebuild --configfile debian/.pbuilderrc
|
||||
# without sse for old systems and some VM:
|
||||
# env DH_VERBOSE=1 CMAKE_FLAGS="-DHAVE_SSE41=0 -DHAVE_SSE42=0 -DHAVE_POPCNT=0 -DHAVE_SSE2_INTRIN=0 -DSSE2FLAG=' ' -DHAVE_SSE42_INTRIN=0 -DSSE4FLAG=' ' -DHAVE_PCLMULQDQ_INTRIN=0 -DPCLMULFLAG=' '" DIST=artful pdebuild --configfile debian/.pbuilderrc
|
||||
|
||||
# Note: on trusty host creating some future dists can fail (debootstrap error).
|
||||
|
||||
# Your packages built here: /var/cache/pbuilder/*-*/result
|
||||
|
||||
# from https://wiki.debian.org/PbuilderTricks :
|
||||
|
||||
# Codenames for Debian suites according to their alias. Update these when
|
||||
# needed.
|
||||
UNSTABLE_CODENAME="sid"
|
||||
TESTING_CODENAME="buster"
|
||||
STABLE_CODENAME="stretch"
|
||||
STABLE_BACKPORTS_SUITE="$STABLE_CODENAME-backports"
|
||||
|
||||
# List of Debian suites.
|
||||
DEBIAN_SUITES=($UNSTABLE_CODENAME $TESTING_CODENAME $STABLE_CODENAME $STABLE_BACKPORTS_SUITE
|
||||
"experimental" "unstable" "testing" "stable")
|
||||
|
||||
# List of Ubuntu suites. Update these when needed.
|
||||
UBUNTU_SUITES=("eoan" "disco" "cosmic" "bionic" "artful" "zesty" "xenial" "trusty" "devel")
|
||||
|
||||
# Set a default distribution if none is used. Note that you can set your own default (i.e. ${DIST:="unstable"}).
|
||||
HOST_DIST=`lsb_release --short --codename`
|
||||
: ${DIST:="$HOST_DIST"}
|
||||
|
||||
# Optionally change Debian codenames in $DIST to their aliases.
|
||||
case "$DIST" in
|
||||
$UNSTABLE_CODENAME)
|
||||
DIST="unstable"
|
||||
;;
|
||||
$TESTING_CODENAME)
|
||||
DIST="testing"
|
||||
;;
|
||||
$STABLE_CODENAME)
|
||||
DIST="stable"
|
||||
;;
|
||||
esac
|
||||
|
||||
# Optionally set the architecture to the host architecture if none set. Note
|
||||
# that you can set your own default (i.e. ${ARCH:="i386"}).
|
||||
: ${ARCH:="$(dpkg --print-architecture)"}
|
||||
|
||||
NAME="$DIST"
|
||||
if [ -n "${ARCH}" ]; then
|
||||
NAME="$NAME-$ARCH"
|
||||
DEBOOTSTRAPOPTS=("--arch" "$ARCH" "${DEBOOTSTRAPOPTS[@]}")
|
||||
fi
|
||||
|
||||
BASETGZ=${SET_BASETGZ}
|
||||
BASETGZ=${BASETGZ:="/var/cache/pbuilder/$NAME-base.tgz"}
|
||||
DISTRIBUTION="$DIST"
|
||||
BUILDRESULT=${SET_BUILDRESULT}
|
||||
BUILDRESULT=${BUILDRESULT:="/var/cache/pbuilder/$NAME/result/"}
|
||||
APTCACHE="/var/cache/pbuilder/$NAME/aptcache/"
|
||||
BUILDPLACE="/var/cache/pbuilder/build/"
|
||||
ALLOWUNTRUSTED=${SET_ALLOWUNTRUSTED:=${ALLOWUNTRUSTED}}
|
||||
|
||||
#DEBOOTSTRAPOPTS=( '--variant=buildd' $SET_DEBOOTSTRAPOPTS )
|
||||
|
||||
|
||||
if $(echo ${DEBIAN_SUITES[@]} | grep -q $DIST); then
|
||||
# Debian configuration
|
||||
OSNAME=debian
|
||||
MIRRORSITE=${SET_MIRRORSITE="http://deb.debian.org/$OSNAME/"}
|
||||
COMPONENTS="main contrib non-free"
|
||||
if $(echo "$STABLE_CODENAME stable" | grep -q $DIST); then
|
||||
OTHERMIRROR="$OTHERMIRROR | deb $MIRRORSITE $STABLE_BACKPORTS_SUITE $COMPONENTS"
|
||||
fi
|
||||
# APTKEYRINGS=/usr/share/keyrings/debian-archive-keyring.gpg
|
||||
|
||||
case "$HOST_DIST" in
|
||||
"trusty" )
|
||||
DEBOOTSTRAPOPTS+=( '--no-check-gpg' )
|
||||
;;
|
||||
*)
|
||||
DEBOOTSTRAPOPTS+=( '--keyring' '/usr/share/keyrings/debian-archive-keyring.gpg' )
|
||||
# DEBOOTSTRAPOPTS+=( '--keyring' '/usr/share/keyrings/debian-keyring.gpg' )
|
||||
esac
|
||||
elif $(echo ${UBUNTU_SUITES[@]} | grep -q $DIST); then
|
||||
# Ubuntu configuration
|
||||
OSNAME=ubuntu
|
||||
|
||||
if [[ "$ARCH" == "amd64" || "$ARCH" == "i386" ]]; then
|
||||
MIRRORSITE=${SET_MIRRORSITE="http://archive.ubuntu.com/$OSNAME/"}
|
||||
else
|
||||
MIRRORSITE=${SET_MIRRORSITE="http://ports.ubuntu.com/ubuntu-ports/"}
|
||||
fi
|
||||
|
||||
COMPONENTS="main restricted universe multiverse"
|
||||
|
||||
OTHERMIRROR="$OTHERMIRROR | deb $MIRRORSITE $DIST-updates main restricted universe multiverse"
|
||||
OTHERMIRROR="$OTHERMIRROR | deb $MIRRORSITE $DIST-security main restricted universe multiverse"
|
||||
OTHERMIRROR="$OTHERMIRROR | deb $MIRRORSITE $DIST-proposed main restricted universe multiverse"
|
||||
|
||||
case "$DIST" in
|
||||
"trusty" | "xenial" )
|
||||
OTHERMIRROR="$OTHERMIRROR | deb http://ppa.launchpad.net/ubuntu-toolchain-r/test/$OSNAME $DIST main"
|
||||
ALLOWUNTRUSTED=yes
|
||||
;;
|
||||
esac
|
||||
|
||||
# deb http://apt.llvm.org/zesty/ llvm-toolchain-zesty-5.0 main
|
||||
else
|
||||
echo "Unknown distribution: $DIST"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "using $NAME $OSNAME $DIST $ARCH $LOGNAME $MIRRORSITE"
|
||||
|
||||
case "$DIST" in
|
||||
"trusty")
|
||||
# ccache broken
|
||||
;;
|
||||
*)
|
||||
CCACHEDIR=${SET_CCACHEDIR:="/var/cache/pbuilder/ccache"}
|
||||
;;
|
||||
esac
|
||||
|
||||
# old systems with default gcc <= 6
|
||||
case "$DIST" in
|
||||
"trusty" | "xenial" | "stable" )
|
||||
export DEB_CC=gcc-7
|
||||
export DEB_CXX=g++-7
|
||||
;;
|
||||
esac
|
||||
|
||||
if [ "$ARCH" != arm64 ]; then
|
||||
case "$DIST" in
|
||||
# TODO: fix llvm-8 and use for "disco" and "eoan"
|
||||
"experimental")
|
||||
EXTRAPACKAGES+=" liblld-8-dev libclang-8-dev llvm-8-dev liblld-8 "
|
||||
export CMAKE_FLAGS="-DLLVM_VERSION=8 $CMAKE_FLAGS"
|
||||
;;
|
||||
"eoan" | "disco" | "cosmic" | "testing" | "unstable")
|
||||
EXTRAPACKAGES+=" liblld-7-dev libclang-7-dev llvm-7-dev liblld-7 "
|
||||
export CMAKE_FLAGS="-DLLVM_VERSION=7 $CMAKE_FLAGS"
|
||||
;;
|
||||
"bionic")
|
||||
EXTRAPACKAGES+=" liblld-6.0-dev libclang-6.0-dev liblld-6.0 "
|
||||
export CMAKE_FLAGS="-DLLVM_VERSION=6 $CMAKE_FLAGS"
|
||||
;;
|
||||
"artful" )
|
||||
EXTRAPACKAGES+=" liblld-5.0-dev libclang-5.0-dev liblld-5.0 "
|
||||
;;
|
||||
esac
|
||||
else
|
||||
export CMAKE_FLAGS="-DENABLE_EMBEDDED_COMPILER=0 $CMAKE_FLAGS"
|
||||
fi
|
||||
|
||||
# Will test symbols
|
||||
#EXTRAPACKAGES+=" gdb "
|
||||
|
||||
# For killall in pbuilder-hooks:
|
||||
EXTRAPACKAGES+=" psmisc "
|
||||
|
||||
[[ $CCACHE_PREFIX == 'distcc' ]] && EXTRAPACKAGES+=" $CCACHE_PREFIX " && USENETWORK=yes && export DISTCC_DIR=/var/cache/pbuilder/distcc
|
||||
|
||||
[[ $ARCH == 'i386' ]] && EXTRAPACKAGES+=" libssl-dev "
|
||||
|
||||
export DEB_BUILD_OPTIONS=parallel=`nproc`
|
||||
|
||||
# Floating bug with permissions:
|
||||
[ -n "$CCACHEDIR" ] && sudo mkdir -p $CCACHEDIR
|
||||
[ -n "$CCACHEDIR" ] && sudo chmod -R a+rwx $CCACHEDIR || true
|
||||
# chown -R $BUILDUSERID:$BUILDUSERID $CCACHEDIR
|
||||
|
||||
|
||||
# Do not create source package inside pbuilder (-b)
|
||||
# Use current dir to make package (by default should have src archive)
|
||||
# echo "3.0 (native)" > debian/source/format
|
||||
# OR
|
||||
# pdebuild -b --debbuildopts "--source-option=--format=\"3.0 (native)\""
|
||||
# OR
|
||||
DEBBUILDOPTS="-b --source-option=--format=\"3.0 (native)\""
|
||||
|
||||
HOOKDIR="debian/pbuilder-hooks"
|
||||
|
||||
#echo "DEBOOTSTRAPOPTS=${DEBOOTSTRAPOPTS[@]}"
|
||||
#echo "ALLOWUNTRUSTED=${ALLOWUNTRUSTED} OTHERMIRROR=${OTHERMIRROR}"
|
||||
#echo "EXTRAPACKAGES=${EXTRAPACKAGES}"
|
5
debian/changelog
vendored
5
debian/changelog
vendored
@ -1,5 +0,0 @@
|
||||
clickhouse (22.1.1.1) unstable; urgency=low
|
||||
|
||||
* Modified source code
|
||||
|
||||
-- clickhouse-release <clickhouse-release@yandex-team.ru> Thu, 09 Dec 2021 00:32:58 +0300
|
5
debian/changelog.in
vendored
5
debian/changelog.in
vendored
@ -1,5 +0,0 @@
|
||||
clickhouse (@VERSION_STRING@) unstable; urgency=low
|
||||
|
||||
* Modified source code
|
||||
|
||||
-- @AUTHOR@ <@EMAIL@> @DATE@
|
7
debian/clickhouse-client.install
vendored
7
debian/clickhouse-client.install
vendored
@ -1,7 +0,0 @@
|
||||
usr/bin/clickhouse-client
|
||||
usr/bin/clickhouse-local
|
||||
usr/bin/clickhouse-compressor
|
||||
usr/bin/clickhouse-benchmark
|
||||
usr/bin/clickhouse-format
|
||||
usr/bin/clickhouse-obfuscator
|
||||
etc/clickhouse-client/config.xml
|
5
debian/clickhouse-common-static.install
vendored
5
debian/clickhouse-common-static.install
vendored
@ -1,5 +0,0 @@
|
||||
usr/bin/clickhouse
|
||||
usr/bin/clickhouse-odbc-bridge
|
||||
usr/bin/clickhouse-library-bridge
|
||||
usr/bin/clickhouse-extract-from-config
|
||||
usr/share/bash-completion/completions
|
1
debian/clickhouse-server.cron.d
vendored
1
debian/clickhouse-server.cron.d
vendored
@ -1 +0,0 @@
|
||||
#*/10 * * * * root ((which service > /dev/null 2>&1 && (service clickhouse-server condstart ||:)) || /etc/init.d/clickhouse-server condstart) > /dev/null 2>&1
|
4
debian/clickhouse-server.docs
vendored
4
debian/clickhouse-server.docs
vendored
@ -1,4 +0,0 @@
|
||||
LICENSE
|
||||
AUTHORS
|
||||
README.md
|
||||
CHANGELOG.md
|
227
debian/clickhouse-server.init
vendored
227
debian/clickhouse-server.init
vendored
@ -1,227 +0,0 @@
|
||||
#!/bin/sh
|
||||
### BEGIN INIT INFO
|
||||
# Provides: clickhouse-server
|
||||
# Default-Start: 2 3 4 5
|
||||
# Default-Stop: 0 1 6
|
||||
# Should-Start: $time $network
|
||||
# Should-Stop: $network
|
||||
# Short-Description: clickhouse-server daemon
|
||||
### END INIT INFO
|
||||
#
|
||||
# NOTES:
|
||||
# - Should-* -- script can start if the listed facilities are missing, unlike Required-*
|
||||
#
|
||||
# For the documentation [1]:
|
||||
#
|
||||
# [1]: https://wiki.debian.org/LSBInitScripts
|
||||
|
||||
CLICKHOUSE_USER=clickhouse
|
||||
CLICKHOUSE_GROUP=${CLICKHOUSE_USER}
|
||||
SHELL=/bin/bash
|
||||
PROGRAM=clickhouse-server
|
||||
CLICKHOUSE_GENERIC_PROGRAM=clickhouse
|
||||
CLICKHOUSE_PROGRAM_ENV=""
|
||||
EXTRACT_FROM_CONFIG=${CLICKHOUSE_GENERIC_PROGRAM}-extract-from-config
|
||||
CLICKHOUSE_CONFDIR=/etc/$PROGRAM
|
||||
CLICKHOUSE_LOGDIR=/var/log/clickhouse-server
|
||||
CLICKHOUSE_LOGDIR_USER=root
|
||||
CLICKHOUSE_DATADIR=/var/lib/clickhouse
|
||||
if [ -d "/var/lock" ]; then
|
||||
LOCALSTATEDIR=/var/lock
|
||||
else
|
||||
LOCALSTATEDIR=/run/lock
|
||||
fi
|
||||
|
||||
if [ ! -d "$LOCALSTATEDIR" ]; then
|
||||
mkdir -p "$LOCALSTATEDIR"
|
||||
fi
|
||||
|
||||
CLICKHOUSE_BINDIR=/usr/bin
|
||||
CLICKHOUSE_CRONFILE=/etc/cron.d/clickhouse-server
|
||||
CLICKHOUSE_CONFIG=$CLICKHOUSE_CONFDIR/config.xml
|
||||
LOCKFILE=$LOCALSTATEDIR/$PROGRAM
|
||||
CLICKHOUSE_PIDDIR=/var/run/$PROGRAM
|
||||
CLICKHOUSE_PIDFILE="$CLICKHOUSE_PIDDIR/$PROGRAM.pid"
|
||||
# CLICKHOUSE_STOP_TIMEOUT=60 # Disabled by default. Place to /etc/default/clickhouse if you need.
|
||||
|
||||
# Some systems lack "flock"
|
||||
command -v flock >/dev/null && FLOCK=flock
|
||||
|
||||
# Override defaults from optional config file
|
||||
test -f /etc/default/clickhouse && . /etc/default/clickhouse
|
||||
|
||||
|
||||
die()
|
||||
{
|
||||
echo $1 >&2
|
||||
exit 1
|
||||
}
|
||||
|
||||
|
||||
# Check that configuration file is Ok.
|
||||
check_config()
|
||||
{
|
||||
if [ -x "$CLICKHOUSE_BINDIR/$EXTRACT_FROM_CONFIG" ]; then
|
||||
su -s $SHELL ${CLICKHOUSE_USER} -c "$CLICKHOUSE_BINDIR/$EXTRACT_FROM_CONFIG --config-file=\"$CLICKHOUSE_CONFIG\" --key=path" >/dev/null || die "Configuration file ${CLICKHOUSE_CONFIG} doesn't parse successfully. Won't restart server. You may use forcerestart if you are sure.";
|
||||
fi
|
||||
}
|
||||
|
||||
|
||||
initdb()
|
||||
{
|
||||
${CLICKHOUSE_GENERIC_PROGRAM} install --user "${CLICKHOUSE_USER}" --pid-path "${CLICKHOUSE_PIDDIR}" --config-path "${CLICKHOUSE_CONFDIR}" --binary-path "${CLICKHOUSE_BINDIR}"
|
||||
}
|
||||
|
||||
|
||||
start()
|
||||
{
|
||||
${CLICKHOUSE_GENERIC_PROGRAM} start --user "${CLICKHOUSE_USER}" --pid-path "${CLICKHOUSE_PIDDIR}" --config-path "${CLICKHOUSE_CONFDIR}" --binary-path "${CLICKHOUSE_BINDIR}"
|
||||
}
|
||||
|
||||
|
||||
stop()
|
||||
{
|
||||
${CLICKHOUSE_GENERIC_PROGRAM} stop --pid-path "${CLICKHOUSE_PIDDIR}"
|
||||
}
|
||||
|
||||
|
||||
restart()
|
||||
{
|
||||
${CLICKHOUSE_GENERIC_PROGRAM} restart --user "${CLICKHOUSE_USER}" --pid-path "${CLICKHOUSE_PIDDIR}" --config-path "${CLICKHOUSE_CONFDIR}" --binary-path "${CLICKHOUSE_BINDIR}"
|
||||
}
|
||||
|
||||
|
||||
forcestop()
|
||||
{
|
||||
${CLICKHOUSE_GENERIC_PROGRAM} stop --force --pid-path "${CLICKHOUSE_PIDDIR}"
|
||||
}
|
||||
|
||||
|
||||
service_or_func()
|
||||
{
|
||||
if [ -x "/bin/systemctl" ] && [ -f /etc/systemd/system/clickhouse-server.service ] && [ -d /run/systemd/system ]; then
|
||||
systemctl $1 $PROGRAM
|
||||
else
|
||||
$1
|
||||
fi
|
||||
}
|
||||
|
||||
forcerestart()
|
||||
{
|
||||
forcestop
|
||||
# Should not use 'start' function if systemd active
|
||||
service_or_func start
|
||||
}
|
||||
|
||||
use_cron()
|
||||
{
|
||||
# 1. running systemd
|
||||
if [ -x "/bin/systemctl" ] && [ -f /etc/systemd/system/clickhouse-server.service ] && [ -d /run/systemd/system ]; then
|
||||
return 1
|
||||
fi
|
||||
# 2. disabled by config
|
||||
if [ -z "$CLICKHOUSE_CRONFILE" ]; then
|
||||
return 2
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
# returns false if cron disabled (with systemd)
|
||||
enable_cron()
|
||||
{
|
||||
use_cron && sed -i 's/^#*//' "$CLICKHOUSE_CRONFILE"
|
||||
}
|
||||
# returns false if cron disabled (with systemd)
|
||||
disable_cron()
|
||||
{
|
||||
use_cron && sed -i 's/^#*/#/' "$CLICKHOUSE_CRONFILE"
|
||||
}
|
||||
|
||||
|
||||
is_cron_disabled()
|
||||
{
|
||||
use_cron || return 0
|
||||
|
||||
# Assumes that either no lines are commented or all lines are commented.
|
||||
# Also please note, that currently cron file for ClickHouse has only one line (but some time ago there was more).
|
||||
grep -q -E '^#' "$CLICKHOUSE_CRONFILE";
|
||||
}
|
||||
|
||||
|
||||
main()
|
||||
{
|
||||
# See how we were called.
|
||||
EXIT_STATUS=0
|
||||
case "$1" in
|
||||
start)
|
||||
service_or_func start && enable_cron
|
||||
;;
|
||||
stop)
|
||||
disable_cron
|
||||
service_or_func stop
|
||||
;;
|
||||
restart)
|
||||
service_or_func restart && enable_cron
|
||||
;;
|
||||
forcestop)
|
||||
disable_cron
|
||||
forcestop
|
||||
;;
|
||||
forcerestart)
|
||||
forcerestart && enable_cron
|
||||
;;
|
||||
reload)
|
||||
service_or_func restart
|
||||
;;
|
||||
condstart)
|
||||
service_or_func start
|
||||
;;
|
||||
condstop)
|
||||
service_or_func stop
|
||||
;;
|
||||
condrestart)
|
||||
service_or_func restart
|
||||
;;
|
||||
condreload)
|
||||
service_or_func restart
|
||||
;;
|
||||
initdb)
|
||||
initdb
|
||||
;;
|
||||
enable_cron)
|
||||
enable_cron
|
||||
;;
|
||||
disable_cron)
|
||||
disable_cron
|
||||
;;
|
||||
*)
|
||||
echo "Usage: $0 {start|stop|status|restart|forcestop|forcerestart|reload|condstart|condstop|condrestart|condreload|initdb}"
|
||||
exit 2
|
||||
;;
|
||||
esac
|
||||
|
||||
exit $EXIT_STATUS
|
||||
}
|
||||
|
||||
|
||||
status()
|
||||
{
|
||||
${CLICKHOUSE_GENERIC_PROGRAM} status --pid-path "${CLICKHOUSE_PIDDIR}"
|
||||
}
|
||||
|
||||
|
||||
# Running commands without need of locking
|
||||
case "$1" in
|
||||
status)
|
||||
status
|
||||
exit 0
|
||||
;;
|
||||
esac
|
||||
|
||||
|
||||
(
|
||||
if $FLOCK -n 9; then
|
||||
main "$@"
|
||||
else
|
||||
echo "Init script is already running" && exit 1
|
||||
fi
|
||||
) 9> $LOCKFILE
|
6
debian/clickhouse-server.install
vendored
6
debian/clickhouse-server.install
vendored
@ -1,6 +0,0 @@
|
||||
usr/bin/clickhouse-server
|
||||
usr/bin/clickhouse-copier
|
||||
usr/bin/clickhouse-report
|
||||
etc/clickhouse-server/config.xml
|
||||
etc/clickhouse-server/users.xml
|
||||
etc/systemd/system/clickhouse-server.service
|
47
debian/clickhouse-server.postinst
vendored
47
debian/clickhouse-server.postinst
vendored
@ -1,47 +0,0 @@
|
||||
#!/bin/sh
|
||||
set -e
|
||||
# set -x
|
||||
|
||||
PROGRAM=clickhouse-server
|
||||
CLICKHOUSE_USER=${CLICKHOUSE_USER:=clickhouse}
|
||||
CLICKHOUSE_GROUP=${CLICKHOUSE_GROUP:=${CLICKHOUSE_USER}}
|
||||
# Please note that we don't support paths with whitespaces. This is rather ignorant.
|
||||
CLICKHOUSE_CONFDIR=${CLICKHOUSE_CONFDIR:=/etc/clickhouse-server}
|
||||
CLICKHOUSE_DATADIR=${CLICKHOUSE_DATADIR:=/var/lib/clickhouse}
|
||||
CLICKHOUSE_LOGDIR=${CLICKHOUSE_LOGDIR:=/var/log/clickhouse-server}
|
||||
CLICKHOUSE_BINDIR=${CLICKHOUSE_BINDIR:=/usr/bin}
|
||||
CLICKHOUSE_GENERIC_PROGRAM=${CLICKHOUSE_GENERIC_PROGRAM:=clickhouse}
|
||||
EXTRACT_FROM_CONFIG=${CLICKHOUSE_GENERIC_PROGRAM}-extract-from-config
|
||||
CLICKHOUSE_CONFIG=$CLICKHOUSE_CONFDIR/config.xml
|
||||
CLICKHOUSE_PIDDIR=/var/run/$PROGRAM
|
||||
|
||||
[ -f /usr/share/debconf/confmodule ] && . /usr/share/debconf/confmodule
|
||||
[ -f /etc/default/clickhouse ] && . /etc/default/clickhouse
|
||||
|
||||
if [ ! -f "/etc/debian_version" ]; then
|
||||
not_deb_os=1
|
||||
fi
|
||||
|
||||
if [ "$1" = configure ] || [ -n "$not_deb_os" ]; then
|
||||
|
||||
${CLICKHOUSE_GENERIC_PROGRAM} install --user "${CLICKHOUSE_USER}" --group "${CLICKHOUSE_GROUP}" --pid-path "${CLICKHOUSE_PIDDIR}" --config-path "${CLICKHOUSE_CONFDIR}" --binary-path "${CLICKHOUSE_BINDIR}" --log-path "${CLICKHOUSE_LOGDIR}" --data-path "${CLICKHOUSE_DATADIR}"
|
||||
|
||||
if [ -x "/bin/systemctl" ] && [ -f /etc/systemd/system/clickhouse-server.service ] && [ -d /run/systemd/system ]; then
|
||||
# if old rc.d service present - remove it
|
||||
if [ -x "/etc/init.d/clickhouse-server" ] && [ -x "/usr/sbin/update-rc.d" ]; then
|
||||
/usr/sbin/update-rc.d clickhouse-server remove
|
||||
fi
|
||||
|
||||
/bin/systemctl daemon-reload
|
||||
/bin/systemctl enable clickhouse-server
|
||||
else
|
||||
# If you downgrading to version older than 1.1.54336 run: systemctl disable clickhouse-server
|
||||
if [ -x "/etc/init.d/clickhouse-server" ]; then
|
||||
if [ -x "/usr/sbin/update-rc.d" ]; then
|
||||
/usr/sbin/update-rc.d clickhouse-server defaults 19 19 >/dev/null || exit $?
|
||||
else
|
||||
echo # Other OS
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
fi
|
27
debian/clickhouse-server.service
vendored
27
debian/clickhouse-server.service
vendored
@ -1,27 +0,0 @@
|
||||
[Unit]
|
||||
Description=ClickHouse Server (analytic DBMS for big data)
|
||||
Requires=network-online.target
|
||||
# NOTE: that After/Wants=time-sync.target is not enough, you need to ensure
|
||||
# that the time was adjusted already, if you use systemd-timesyncd you are
|
||||
# safe, but if you use ntp or some other daemon, you should configure it
|
||||
# additionaly.
|
||||
After=time-sync.target network-online.target
|
||||
Wants=time-sync.target
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
User=clickhouse
|
||||
Group=clickhouse
|
||||
Restart=always
|
||||
RestartSec=30
|
||||
RuntimeDirectory=clickhouse-server
|
||||
ExecStart=/usr/bin/clickhouse-server --config=/etc/clickhouse-server/config.xml --pid-file=/run/clickhouse-server/clickhouse-server.pid
|
||||
# Minus means that this file is optional.
|
||||
EnvironmentFile=-/etc/default/clickhouse
|
||||
LimitCORE=infinity
|
||||
LimitNOFILE=500000
|
||||
CapabilityBoundingSet=CAP_NET_ADMIN CAP_IPC_LOCK CAP_SYS_NICE CAP_NET_BIND_SERVICE
|
||||
|
||||
[Install]
|
||||
# ClickHouse should not start from the rescue shell (rescue.target).
|
||||
WantedBy=multi-user.target
|
1
debian/compat
vendored
1
debian/compat
vendored
@ -1 +0,0 @@
|
||||
10
|
58
debian/control
vendored
58
debian/control
vendored
@ -1,58 +0,0 @@
|
||||
Source: clickhouse
|
||||
Section: database
|
||||
Priority: optional
|
||||
Maintainer: Alexey Milovidov <milovidov@clickhouse.com>
|
||||
Build-Depends: debhelper (>= 9),
|
||||
cmake | cmake3,
|
||||
ninja-build,
|
||||
clang-13,
|
||||
llvm-13,
|
||||
lld-13,
|
||||
libc6-dev,
|
||||
tzdata
|
||||
Standards-Version: 3.9.8
|
||||
|
||||
Package: clickhouse-client
|
||||
Architecture: all
|
||||
Depends: ${shlibs:Depends}, ${misc:Depends}, clickhouse-common-static (= ${binary:Version})
|
||||
Replaces: clickhouse-compressor
|
||||
Conflicts: clickhouse-compressor
|
||||
Description: Client binary for ClickHouse
|
||||
ClickHouse is a column-oriented database management system
|
||||
that allows generating analytical data reports in real time.
|
||||
.
|
||||
This package provides clickhouse-client , clickhouse-local and clickhouse-benchmark
|
||||
|
||||
Package: clickhouse-common-static
|
||||
Architecture: any
|
||||
Depends: ${shlibs:Depends}, ${misc:Depends}
|
||||
Suggests: clickhouse-common-static-dbg
|
||||
Replaces: clickhouse-common, clickhouse-server-base
|
||||
Provides: clickhouse-common, clickhouse-server-base
|
||||
Description: Common files for ClickHouse
|
||||
ClickHouse is a column-oriented database management system
|
||||
that allows generating analytical data reports in real time.
|
||||
.
|
||||
This package provides common files for both clickhouse server and client
|
||||
|
||||
Package: clickhouse-server
|
||||
Architecture: all
|
||||
Depends: ${shlibs:Depends}, ${misc:Depends}, clickhouse-common-static (= ${binary:Version}), adduser
|
||||
Recommends: libcap2-bin
|
||||
Replaces: clickhouse-server-common, clickhouse-server-base
|
||||
Provides: clickhouse-server-common
|
||||
Description: Server binary for ClickHouse
|
||||
ClickHouse is a column-oriented database management system
|
||||
that allows generating analytical data reports in real time.
|
||||
.
|
||||
This package provides clickhouse common configuration files
|
||||
|
||||
Package: clickhouse-common-static-dbg
|
||||
Architecture: any
|
||||
Section: debug
|
||||
Priority: optional
|
||||
Depends: ${misc:Depends}
|
||||
Replaces: clickhouse-common-dbg
|
||||
Conflicts: clickhouse-common-dbg
|
||||
Description: debugging symbols for clickhouse-common-static
|
||||
This package contains the debugging symbols for clickhouse-common.
|
132
debian/rules
vendored
132
debian/rules
vendored
@ -1,132 +0,0 @@
|
||||
#!/usr/bin/make -f
|
||||
# -*- makefile -*-
|
||||
|
||||
# Uncomment this to turn on verbose mode.
|
||||
export DH_VERBOSE=1
|
||||
|
||||
# -pie only for static mode
|
||||
export DEB_BUILD_MAINT_OPTIONS=hardening=-all
|
||||
|
||||
# because copy_headers.sh have hardcoded path to build/include_directories.txt
|
||||
BUILDDIR = obj-$(DEB_HOST_GNU_TYPE)
|
||||
CURDIR = $(shell pwd)
|
||||
DESTDIR = $(CURDIR)/debian/tmp
|
||||
|
||||
DEB_HOST_MULTIARCH ?= $(shell dpkg-architecture -qDEB_HOST_MULTIARCH)
|
||||
|
||||
ifeq ($(CCACHE_PREFIX),distcc)
|
||||
THREADS_COUNT=$(shell distcc -j)
|
||||
endif
|
||||
ifeq ($(THREADS_COUNT),)
|
||||
THREADS_COUNT=$(shell nproc || grep -c ^processor /proc/cpuinfo || sysctl -n hw.ncpu || echo 4)
|
||||
endif
|
||||
DEB_BUILD_OPTIONS+=parallel=$(THREADS_COUNT)
|
||||
|
||||
ifndef ENABLE_TESTS
|
||||
CMAKE_FLAGS += -DENABLE_TESTS=0
|
||||
else
|
||||
# To export binaries and from deb build we do not strip them. No need to run tests in deb build as we run them in CI
|
||||
DEB_BUILD_OPTIONS+= nocheck
|
||||
DEB_BUILD_OPTIONS+= nostrip
|
||||
endif
|
||||
|
||||
ifndef MAKE_TARGET
|
||||
MAKE_TARGET = clickhouse-bundle
|
||||
endif
|
||||
|
||||
CMAKE_FLAGS += -DENABLE_UTILS=0
|
||||
|
||||
DEB_CC ?= $(shell which gcc-11 gcc-10 gcc-9 gcc | head -n1)
|
||||
DEB_CXX ?= $(shell which g++-11 g++-10 g++-9 g++ | head -n1)
|
||||
|
||||
ifdef DEB_CXX
|
||||
DEB_BUILD_GNU_TYPE := $(shell dpkg-architecture -qDEB_BUILD_GNU_TYPE)
|
||||
DEB_HOST_GNU_TYPE := $(shell dpkg-architecture -qDEB_HOST_GNU_TYPE)
|
||||
ifeq ($(DEB_BUILD_GNU_TYPE),$(DEB_HOST_GNU_TYPE))
|
||||
CC := $(DEB_CC)
|
||||
CXX := $(DEB_CXX)
|
||||
else ifeq (clang,$(findstring clang,$(DEB_CXX)))
|
||||
# If we crosscompile with clang, it knows what to do
|
||||
CC := $(DEB_CC)
|
||||
CXX := $(DEB_CXX)
|
||||
else
|
||||
CC := $(DEB_HOST_GNU_TYPE)-$(DEB_CC)
|
||||
CXX := $(DEB_HOST_GNU_TYPE)-$(DEB_CXX)
|
||||
endif
|
||||
endif
|
||||
|
||||
ifdef CXX
|
||||
CMAKE_FLAGS += -DCMAKE_CXX_COMPILER=`which $(CXX)`
|
||||
endif
|
||||
ifdef CC
|
||||
CMAKE_FLAGS += -DCMAKE_C_COMPILER=`which $(CC)`
|
||||
endif
|
||||
|
||||
ifndef DISABLE_NINJA
|
||||
NINJA=$(shell which ninja)
|
||||
ifneq ($(NINJA),)
|
||||
CMAKE_FLAGS += -GNinja
|
||||
export MAKE=$(NINJA) $(NINJA_FLAGS)
|
||||
endif
|
||||
endif
|
||||
|
||||
ifndef DH_VERBOSE
|
||||
CMAKE_FLAGS += -DCMAKE_VERBOSE_MAKEFILE=0
|
||||
endif
|
||||
|
||||
# Useful for bulding on low memory systems
|
||||
ifndef DISABLE_PARALLEL
|
||||
DH_FLAGS += --parallel
|
||||
else
|
||||
THREADS_COUNT = 1
|
||||
endif
|
||||
|
||||
%:
|
||||
dh $@ $(DH_FLAGS) --buildsystem=cmake
|
||||
|
||||
override_dh_auto_configure:
|
||||
dh_auto_configure -- $(CMAKE_FLAGS)
|
||||
|
||||
override_dh_auto_build:
|
||||
# Fix for ninja. Do not add -O.
|
||||
$(MAKE) -j$(THREADS_COUNT) -C $(BUILDDIR) $(MAKE_TARGET)
|
||||
|
||||
override_dh_auto_test:
|
||||
ifeq (,$(filter nocheck,$(DEB_BUILD_OPTIONS)))
|
||||
cd $(BUILDDIR) && ctest -j$(THREADS_COUNT) -V
|
||||
endif
|
||||
|
||||
# Disable config.guess and config.sub update
|
||||
override_dh_update_autotools_config:
|
||||
|
||||
override_dh_clean:
|
||||
rm -rf debian/copyright debian/clickhouse-client.docs debian/clickhouse-common-static.docs
|
||||
dh_clean # -X contrib
|
||||
|
||||
override_dh_strip:
|
||||
#https://www.debian.org/doc/debian-policy/ch-source.html#debian-rules-and-deb-build-options
|
||||
ifeq (,$(filter nostrip,$(DEB_BUILD_OPTIONS)))
|
||||
dh_strip -pclickhouse-common-static --dbg-package=clickhouse-common-static-dbg
|
||||
endif
|
||||
|
||||
override_dh_install:
|
||||
# Making docs
|
||||
cp LICENSE debian/copyright
|
||||
|
||||
ln -sf clickhouse-server.docs debian/clickhouse-client.docs
|
||||
ln -sf clickhouse-server.docs debian/clickhouse-common-static.docs
|
||||
|
||||
# systemd compatibility
|
||||
mkdir -p $(DESTDIR)/etc/systemd/system/
|
||||
cp debian/clickhouse-server.service $(DESTDIR)/etc/systemd/system/
|
||||
|
||||
dh_install --list-missing --sourcedir=$(DESTDIR)
|
||||
|
||||
override_dh_auto_install:
|
||||
env DESTDIR=$(DESTDIR) $(MAKE) -j$(THREADS_COUNT) -C $(BUILDDIR) install
|
||||
|
||||
override_dh_shlibdeps:
|
||||
true # We depend only on libc and dh_shlibdeps gives us wrong (too strict) dependency.
|
||||
|
||||
override_dh_builddeb:
|
||||
dh_builddeb -- -Z gzip # Older systems don't have "xz", so use "gzip" instead.
|
1
debian/source/format
vendored
1
debian/source/format
vendored
@ -1 +0,0 @@
|
||||
3.0 (quilt)
|
9
debian/source/options
vendored
9
debian/source/options
vendored
@ -1,9 +0,0 @@
|
||||
tar-ignore
|
||||
tar-ignore="build_*/*"
|
||||
tar-ignore="workspace/*"
|
||||
tar-ignore="contrib/poco/openssl/*"
|
||||
tar-ignore="contrib/poco/gradle/*"
|
||||
tar-ignore="contrib/poco/Data/SQLite/*"
|
||||
tar-ignore="contrib/poco/PDF/*"
|
||||
compression-level=3
|
||||
compression=gzip
|
6
debian/watch
vendored
6
debian/watch
vendored
@ -1,6 +0,0 @@
|
||||
version=4
|
||||
|
||||
opts="filenamemangle=s%(?:.*?)?v?(\d[\d.]*)-stable\.tar\.gz%clickhouse-$1.tar.gz%" \
|
||||
https://github.com/ClickHouse/ClickHouse/tags \
|
||||
(?:.*?/)?v?(\d[\d.]*)-stable\.tar\.gz debian uupdate
|
||||
|
@ -1,43 +1,23 @@
|
||||
# docker build -t clickhouse/docs-builder .
|
||||
FROM ubuntu:20.04
|
||||
# nodejs 17 prefers ipv6 and is broken in our environment
|
||||
FROM node:16.14.2-alpine3.15
|
||||
|
||||
# ARG for quick switch to a given ubuntu mirror
|
||||
ARG apt_archive="http://archive.ubuntu.com"
|
||||
RUN sed -i "s|http://archive.ubuntu.com|$apt_archive|g" /etc/apt/sources.list
|
||||
RUN apk add --no-cache git openssh bash
|
||||
|
||||
ENV LANG=C.UTF-8
|
||||
# TODO: clean before merge!
|
||||
ARG DOCS_BRANCH=main
|
||||
|
||||
RUN apt-get update \
|
||||
&& DEBIAN_FRONTEND=noninteractive apt-get install --yes --no-install-recommends \
|
||||
wget \
|
||||
bash \
|
||||
python \
|
||||
curl \
|
||||
python3-requests \
|
||||
sudo \
|
||||
git \
|
||||
openssl \
|
||||
python3-pip \
|
||||
software-properties-common \
|
||||
language-pack-zh* \
|
||||
chinese* \
|
||||
fonts-arphic-ukai \
|
||||
fonts-arphic-uming \
|
||||
fonts-ipafont-mincho \
|
||||
fonts-ipafont-gothic \
|
||||
fonts-unfonts-core \
|
||||
xvfb \
|
||||
nodejs \
|
||||
npm \
|
||||
openjdk-11-jdk \
|
||||
ssh-client \
|
||||
&& pip --no-cache-dir install scipy \
|
||||
&& apt-get autoremove --yes \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
RUN git clone https://github.com/ClickHouse/clickhouse-docs.git \
|
||||
--depth=1 --branch=${DOCS_BRANCH} /opt/clickhouse-docs
|
||||
|
||||
RUN wget 'https://github.com/wkhtmltopdf/packaging/releases/download/0.12.6-1/wkhtmltox_0.12.6-1.focal_amd64.deb'
|
||||
WORKDIR /opt/clickhouse-docs
|
||||
|
||||
RUN npm i -g purify-css
|
||||
RUN yarn config set registry https://registry.npmjs.org \
|
||||
&& yarn install \
|
||||
&& yarn cache clean
|
||||
|
||||
RUN pip3 install --ignore-installed --upgrade setuptools pip virtualenv
|
||||
COPY run.sh /run.sh
|
||||
|
||||
ENTRYPOINT ["/run.sh"]
|
||||
|
||||
CMD ["yarn", "build"]
|
||||
|
29
docker/docs/builder/run.sh
Executable file
29
docker/docs/builder/run.sh
Executable file
@ -0,0 +1,29 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -ex
|
||||
|
||||
if [ "$GIT_DOCS_BRANCH" ]; then
|
||||
git fetch origin --depth=1 "$GIT_DOCS_BRANCH:$GIT_DOCS_BRANCH"
|
||||
git checkout "$GIT_DOCS_BRANCH"
|
||||
else
|
||||
# Update docs repo
|
||||
git pull
|
||||
fi
|
||||
|
||||
# The repo is usually mounted to /ClickHouse
|
||||
|
||||
for lang in en ru zh
|
||||
do
|
||||
if [ -d "/ClickHouse/docs/${lang}" ]; then
|
||||
cp -rf "/ClickHouse/docs/${lang}" "/opt/clickhouse-docs/docs/"
|
||||
fi
|
||||
done
|
||||
|
||||
# Force build error on wrong symlinks
|
||||
sed -i '/onBrokenMarkdownLinks:/ s/ignore/error/g' docusaurus.config.js
|
||||
|
||||
if [[ $# -lt 1 ]] || [[ "$1" == "--"* ]]; then
|
||||
exec yarn build "$@"
|
||||
fi
|
||||
|
||||
exec "$@"
|
@ -1,10 +0,0 @@
|
||||
# docker build -t clickhouse/docs-check .
|
||||
ARG FROM_TAG=latest
|
||||
FROM clickhouse/docs-builder:$FROM_TAG
|
||||
|
||||
COPY run.sh /
|
||||
|
||||
ENV REPO_PATH=/repo_path
|
||||
ENV OUTPUT_PATH=/output_path
|
||||
|
||||
CMD ["/bin/bash", "/run.sh"]
|
@ -1,10 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
cd $REPO_PATH/docs/tools
|
||||
rm -rf venv
|
||||
mkdir venv
|
||||
virtualenv -p $(which python3) venv
|
||||
source venv/bin/activate
|
||||
python3 -m pip install --ignore-installed -r requirements.txt
|
||||
./build.py --skip-git-log 2>&1 | tee $OUTPUT_PATH/output.log
|
@ -1,11 +0,0 @@
|
||||
# rebuild in #33610
|
||||
# docker build -t clickhouse/docs-release .
|
||||
ARG FROM_TAG=latest
|
||||
FROM clickhouse/docs-builder:$FROM_TAG
|
||||
|
||||
COPY run.sh /
|
||||
|
||||
ENV REPO_PATH=/repo_path
|
||||
ENV OUTPUT_PATH=/output_path
|
||||
|
||||
CMD ["/bin/bash", "/run.sh"]
|
@ -1,10 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
cd $REPO_PATH/docs/tools
|
||||
mkdir venv
|
||||
virtualenv -p $(which python3) venv
|
||||
source venv/bin/activate
|
||||
python3 -m pip install --ignore-installed -r requirements.txt
|
||||
mkdir -p ~/.ssh && ssh-keyscan -t rsa github.com >> ~/.ssh/known_hosts
|
||||
./release.sh 2>&1 | tee tee $OUTPUT_PATH/output.log
|
@ -149,16 +149,6 @@
|
||||
"docker/docs/builder": {
|
||||
"name": "clickhouse/docs-builder",
|
||||
"dependent": [
|
||||
"docker/docs/check",
|
||||
"docker/docs/release"
|
||||
]
|
||||
},
|
||||
"docker/docs/check": {
|
||||
"name": "clickhouse/docs-check",
|
||||
"dependent": []
|
||||
},
|
||||
"docker/docs/release": {
|
||||
"name": "clickhouse/docs-release",
|
||||
"dependent": []
|
||||
}
|
||||
}
|
||||
|
@ -362,19 +362,6 @@ function get_profiles
|
||||
clickhouse-client --port $RIGHT_SERVER_PORT --query "select 1"
|
||||
}
|
||||
|
||||
function build_log_column_definitions
|
||||
{
|
||||
# FIXME This loop builds column definitons from TSVWithNamesAndTypes in an
|
||||
# absolutely atrocious way. This should be done by the file() function itself.
|
||||
for x in {right,left}-{addresses,{query,query-thread,trace,{async-,}metric}-log}.tsv
|
||||
do
|
||||
paste -d' ' \
|
||||
<(sed -n '1{s/\t/\n/g;p;q}' "$x" | sed 's/\(^.*$\)/"\1"/') \
|
||||
<(sed -n '2{s/\t/\n/g;p;q}' "$x" ) \
|
||||
| tr '\n' ', ' | sed 's/,$//' > "$x.columns"
|
||||
done
|
||||
}
|
||||
|
||||
# Build and analyze randomization distribution for all queries.
|
||||
function analyze_queries
|
||||
{
|
||||
@ -382,8 +369,6 @@ rm -v analyze-commands.txt analyze-errors.log all-queries.tsv unstable-queries.t
|
||||
rm -rf analyze ||:
|
||||
mkdir analyze analyze/tmp ||:
|
||||
|
||||
build_log_column_definitions
|
||||
|
||||
# Split the raw test output into files suitable for analysis.
|
||||
# To debug calculations only for a particular test, substitute a suitable
|
||||
# wildcard here, e.g. `for test_file in modulo-raw.tsv`.
|
||||
@ -422,12 +407,10 @@ create table partial_query_times engine File(TSVWithNamesAndTypes,
|
||||
|
||||
-- Process queries that were run normally, on both servers.
|
||||
create view left_query_log as select *
|
||||
from file('left-query-log.tsv', TSVWithNamesAndTypes,
|
||||
'$(cat "left-query-log.tsv.columns")');
|
||||
from file('left-query-log.tsv', TSVWithNamesAndTypes);
|
||||
|
||||
create view right_query_log as select *
|
||||
from file('right-query-log.tsv', TSVWithNamesAndTypes,
|
||||
'$(cat "right-query-log.tsv.columns")');
|
||||
from file('right-query-log.tsv', TSVWithNamesAndTypes);
|
||||
|
||||
create view query_logs as
|
||||
select 0 version, query_id, ProfileEvents,
|
||||
@ -645,8 +628,6 @@ mkdir report report/tmp ||:
|
||||
|
||||
rm ./*.{rep,svg} test-times.tsv test-dump.tsv unstable.tsv unstable-query-ids.tsv unstable-query-metrics.tsv changed-perf.tsv unstable-tests.tsv unstable-queries.tsv bad-tests.tsv slow-on-client.tsv all-queries.tsv run-errors.tsv ||:
|
||||
|
||||
build_log_column_definitions
|
||||
|
||||
cat analyze/errors.log >> report/errors.log ||:
|
||||
cat profile-errors.log >> report/errors.log ||:
|
||||
|
||||
@ -1028,8 +1009,7 @@ create table unstable_query_runs engine File(TSVWithNamesAndTypes,
|
||||
;
|
||||
|
||||
create view query_log as select *
|
||||
from file('$version-query-log.tsv', TSVWithNamesAndTypes,
|
||||
'$(cat "$version-query-log.tsv.columns")');
|
||||
from file('$version-query-log.tsv', TSVWithNamesAndTypes);
|
||||
|
||||
create table unstable_run_metrics engine File(TSVWithNamesAndTypes,
|
||||
'unstable-run-metrics.$version.rep') as
|
||||
@ -1057,8 +1037,7 @@ create table unstable_run_metrics_2 engine File(TSVWithNamesAndTypes,
|
||||
array join v, n;
|
||||
|
||||
create view trace_log as select *
|
||||
from file('$version-trace-log.tsv', TSVWithNamesAndTypes,
|
||||
'$(cat "$version-trace-log.tsv.columns")');
|
||||
from file('$version-trace-log.tsv', TSVWithNamesAndTypes);
|
||||
|
||||
create view addresses_src as select addr,
|
||||
-- Some functions change name between builds, e.g. '__clone' or 'clone' or
|
||||
@ -1067,8 +1046,7 @@ create view addresses_src as select addr,
|
||||
[name, 'clone.S (filtered by script)', 'pthread_cond_timedwait (filtered by script)']
|
||||
-- this line is a subscript operator of the above array
|
||||
[1 + multiSearchFirstIndex(name, ['clone.S', 'pthread_cond_timedwait'])] name
|
||||
from file('$version-addresses.tsv', TSVWithNamesAndTypes,
|
||||
'$(cat "$version-addresses.tsv.columns")');
|
||||
from file('$version-addresses.tsv', TSVWithNamesAndTypes);
|
||||
|
||||
create table addresses_join_$version engine Join(any, left, address) as
|
||||
select addr address, name from addresses_src;
|
||||
@ -1195,15 +1173,12 @@ done
|
||||
|
||||
function report_metrics
|
||||
{
|
||||
build_log_column_definitions
|
||||
|
||||
rm -rf metrics ||:
|
||||
mkdir metrics
|
||||
|
||||
clickhouse-local --query "
|
||||
create view right_async_metric_log as
|
||||
select * from file('right-async-metric-log.tsv', TSVWithNamesAndTypes,
|
||||
'$(cat right-async-metric-log.tsv.columns)')
|
||||
select * from file('right-async-metric-log.tsv', TSVWithNamesAndTypes)
|
||||
;
|
||||
|
||||
-- Use the right log as time reference because it may have higher precision.
|
||||
@ -1211,8 +1186,7 @@ create table metrics engine File(TSV, 'metrics/metrics.tsv') as
|
||||
with (select min(event_time) from right_async_metric_log) as min_time
|
||||
select metric, r.event_time - min_time event_time, l.value as left, r.value as right
|
||||
from right_async_metric_log r
|
||||
asof join file('left-async-metric-log.tsv', TSVWithNamesAndTypes,
|
||||
'$(cat left-async-metric-log.tsv.columns)') l
|
||||
asof join file('left-async-metric-log.tsv', TSVWithNamesAndTypes) l
|
||||
on l.metric = r.metric and r.event_time <= l.event_time
|
||||
order by metric, event_time
|
||||
;
|
||||
|
@ -15,11 +15,11 @@ fi
|
||||
# current curl version options.
|
||||
function curl_with_retry
|
||||
{
|
||||
for _ in 1 2 3 4; do
|
||||
for _ in 1 2 3 4 5 6 7 8 9 10; do
|
||||
if curl --fail --head "$1";then
|
||||
return 0
|
||||
else
|
||||
sleep 0.5
|
||||
sleep 1
|
||||
fi
|
||||
done
|
||||
return 1
|
||||
|
@ -218,12 +218,12 @@ zgrep -Fav "ASan doesn't fully support makecontext/swapcontext functions" /test_
|
||||
rm -f /test_output/tmp
|
||||
|
||||
# OOM
|
||||
zgrep -Fa " <Fatal> Application: Child process was terminated by signal 9" /var/log/clickhouse-server/clickhouse-server.log* > /dev/null \
|
||||
zgrep -Fa " <Fatal> Application: Child process was terminated by signal 9" /var/log/clickhouse-server/clickhouse-server*.log > /dev/null \
|
||||
&& echo -e 'OOM killer (or signal 9) in clickhouse-server.log\tFAIL' >> /test_output/test_results.tsv \
|
||||
|| echo -e 'No OOM messages in clickhouse-server.log\tOK' >> /test_output/test_results.tsv
|
||||
|
||||
# Logical errors
|
||||
zgrep -Fa "Code: 49, e.displayText() = DB::Exception:" /var/log/clickhouse-server/clickhouse-server.log* > /test_output/logical_errors.txt \
|
||||
zgrep -Fa "Code: 49, e.displayText() = DB::Exception:" /var/log/clickhouse-server/clickhouse-server*.log > /test_output/logical_errors.txt \
|
||||
&& echo -e 'Logical error thrown (see clickhouse-server.log or logical_errors.txt)\tFAIL' >> /test_output/test_results.tsv \
|
||||
|| echo -e 'No logical errors\tOK' >> /test_output/test_results.tsv
|
||||
|
||||
@ -231,12 +231,12 @@ zgrep -Fa "Code: 49, e.displayText() = DB::Exception:" /var/log/clickhouse-serve
|
||||
[ -s /test_output/logical_errors.txt ] || rm /test_output/logical_errors.txt
|
||||
|
||||
# Crash
|
||||
zgrep -Fa "########################################" /var/log/clickhouse-server/clickhouse-server.log* > /dev/null \
|
||||
zgrep -Fa "########################################" /var/log/clickhouse-server/clickhouse-server*.log > /dev/null \
|
||||
&& echo -e 'Killed by signal (in clickhouse-server.log)\tFAIL' >> /test_output/test_results.tsv \
|
||||
|| echo -e 'Not crashed\tOK' >> /test_output/test_results.tsv
|
||||
|
||||
# It also checks for crash without stacktrace (printed by watchdog)
|
||||
zgrep -Fa " <Fatal> " /var/log/clickhouse-server/clickhouse-server.log* > /test_output/fatal_messages.txt \
|
||||
zgrep -Fa " <Fatal> " /var/log/clickhouse-server/clickhouse-server*.log > /test_output/fatal_messages.txt \
|
||||
&& echo -e 'Fatal message in clickhouse-server.log (see fatal_messages.txt)\tFAIL' >> /test_output/test_results.tsv \
|
||||
|| echo -e 'No fatal messages in clickhouse-server.log\tOK' >> /test_output/test_results.tsv
|
||||
|
||||
@ -375,14 +375,6 @@ else
|
||||
echo -e "Backward compatibility check: Failed to download previous release packets\tFAIL" >> /test_output/test_results.tsv
|
||||
fi
|
||||
|
||||
# Put logs into /test_output/
|
||||
for log_file in /var/log/clickhouse-server/clickhouse-server.log*
|
||||
do
|
||||
pigz < "${log_file}" > /test_output/"$(basename ${log_file})".gz
|
||||
# FIXME: remove once only github actions will be left
|
||||
rm "${log_file}"
|
||||
done
|
||||
|
||||
tar -chf /test_output/coordination.tar /var/lib/clickhouse/coordination ||:
|
||||
mv /var/log/clickhouse-server/stderr.log /test_output/
|
||||
|
||||
|
@ -1,9 +0,0 @@
|
||||
---
|
||||
toc_priority: 1
|
||||
toc_title: Cloud
|
||||
---
|
||||
|
||||
# ClickHouse Cloud Service {#clickhouse-cloud-service}
|
||||
|
||||
!!! info "Info"
|
||||
Detailed public description for ClickHouse cloud services is not ready yet, please [contact us](https://clickhouse.com/company/#contact) to learn more.
|
@ -1,13 +0,0 @@
|
||||
---
|
||||
toc_folder_title: Commercial
|
||||
toc_priority: 70
|
||||
toc_title: Introduction
|
||||
---
|
||||
|
||||
# ClickHouse Commercial Services {#clickhouse-commercial-services}
|
||||
|
||||
Service categories:
|
||||
|
||||
- [Cloud](../commercial/cloud.md)
|
||||
- [Support](../commercial/support.md)
|
||||
|
@ -1,9 +0,0 @@
|
||||
---
|
||||
toc_priority: 3
|
||||
toc_title: Support
|
||||
---
|
||||
|
||||
# ClickHouse Commercial Support Service {#clickhouse-commercial-support-service}
|
||||
|
||||
!!! info "Info"
|
||||
Detailed public description for ClickHouse support services is not ready yet, please [contact us](https://clickhouse.com/company/#contact) to learn more.
|
8
docs/en/development/_category_.yml
Normal file
8
docs/en/development/_category_.yml
Normal file
@ -0,0 +1,8 @@
|
||||
position: 101
|
||||
label: 'Building ClickHouse'
|
||||
collapsible: true
|
||||
collapsed: true
|
||||
link:
|
||||
type: generated-index
|
||||
title: Building ClickHouse
|
||||
slug: /en/development
|
@ -1,3 +1,9 @@
|
||||
---
|
||||
sidebar_label: Adding Test Queries
|
||||
sidebar_position: 63
|
||||
description: Instructions on how to add a test case to ClickHouse continuous integration
|
||||
---
|
||||
|
||||
# How to add test queries to ClickHouse CI
|
||||
|
||||
ClickHouse has hundreds (or even thousands) of features. Every commit gets checked by a complex set of tests containing many thousands of test cases.
|
||||
|
@ -1,11 +1,12 @@
|
||||
---
|
||||
toc_priority: 62
|
||||
toc_title: Architecture Overview
|
||||
sidebar_label: Architecture Overview
|
||||
sidebar_position: 62
|
||||
---
|
||||
|
||||
# Overview of ClickHouse Architecture {#overview-of-clickhouse-architecture}
|
||||
# Overview of ClickHouse Architecture
|
||||
|
||||
ClickHouse is a true column-oriented DBMS. Data is stored by columns, and during the execution of arrays (vectors or chunks of columns). Whenever possible, operations are dispatched on arrays, rather than on individual values. It is called “vectorized query execution” and it helps lower the cost of actual data processing.
|
||||
ClickHouse is a true column-oriented DBMS. Data is stored by columns, and during the execution of arrays (vectors or chunks of columns).
|
||||
Whenever possible, operations are dispatched on arrays, rather than on individual values. It is called “vectorized query execution” and it helps lower the cost of actual data processing.
|
||||
|
||||
> This idea is nothing new. It dates back to the `APL` (A programming language, 1957) and its descendants: `A +` (APL dialect), `J` (1990), `K` (1993), and `Q` (programming language from Kx Systems, 2003). Array programming is used in scientific data processing. Neither is this idea something new in relational databases: for example, it is used in the `VectorWise` system (also known as Actian Vector Analytic Database by Actian Corporation).
|
||||
|
||||
@ -154,8 +155,9 @@ The server initializes the `Context` class with the necessary environment for qu
|
||||
|
||||
We maintain full backward and forward compatibility for the server TCP protocol: old clients can talk to new servers, and new clients can talk to old servers. But we do not want to maintain it eternally, and we are removing support for old versions after about one year.
|
||||
|
||||
!!! note "Note"
|
||||
For most external applications, we recommend using the HTTP interface because it is simple and easy to use. The TCP protocol is more tightly linked to internal data structures: it uses an internal format for passing blocks of data, and it uses custom framing for compressed data. We haven’t released a C library for that protocol because it requires linking most of the ClickHouse codebase, which is not practical.
|
||||
:::note
|
||||
For most external applications, we recommend using the HTTP interface because it is simple and easy to use. The TCP protocol is more tightly linked to internal data structures: it uses an internal format for passing blocks of data, and it uses custom framing for compressed data. We haven’t released a C library for that protocol because it requires linking most of the ClickHouse codebase, which is not practical.
|
||||
:::
|
||||
|
||||
## Distributed Query Execution {#distributed-query-execution}
|
||||
|
||||
@ -193,7 +195,8 @@ Replication is physical: only compressed parts are transferred between nodes, no
|
||||
|
||||
Besides, each replica stores its state in ZooKeeper as the set of parts and its checksums. When the state on the local filesystem diverges from the reference state in ZooKeeper, the replica restores its consistency by downloading missing and broken parts from other replicas. When there is some unexpected or broken data in the local filesystem, ClickHouse does not remove it, but moves it to a separate directory and forgets it.
|
||||
|
||||
!!! note "Note"
|
||||
The ClickHouse cluster consists of independent shards, and each shard consists of replicas. The cluster is **not elastic**, so after adding a new shard, data is not rebalanced between shards automatically. Instead, the cluster load is supposed to be adjusted to be uneven. This implementation gives you more control, and it is ok for relatively small clusters, such as tens of nodes. But for clusters with hundreds of nodes that we are using in production, this approach becomes a significant drawback. We should implement a table engine that spans across the cluster with dynamically replicated regions that could be split and balanced between clusters automatically.
|
||||
:::note
|
||||
The ClickHouse cluster consists of independent shards, and each shard consists of replicas. The cluster is **not elastic**, so after adding a new shard, data is not rebalanced between shards automatically. Instead, the cluster load is supposed to be adjusted to be uneven. This implementation gives you more control, and it is ok for relatively small clusters, such as tens of nodes. But for clusters with hundreds of nodes that we are using in production, this approach becomes a significant drawback. We should implement a table engine that spans across the cluster with dynamically replicated regions that could be split and balanced between clusters automatically.
|
||||
:::
|
||||
|
||||
{## [Original article](https://clickhouse.com/docs/en/development/architecture/) ##}
|
||||
[Original article](https://clickhouse.com/docs/en/development/architecture/)
|
||||
|
@ -1,12 +1,13 @@
|
||||
---
|
||||
toc_priority: 72
|
||||
toc_title: Source Code Browser
|
||||
sidebar_label: Source Code Browser
|
||||
sidebar_position: 72
|
||||
description: Various ways to browse and edit the source code
|
||||
---
|
||||
|
||||
# Browse ClickHouse Source Code {#browse-clickhouse-source-code}
|
||||
# Browse ClickHouse Source Code
|
||||
|
||||
You can use **Woboq** online code browser available [here](https://clickhouse.com/codebrowser/ClickHouse/src/index.html). It provides code navigation and semantic highlighting, search and indexing. The code snapshot is updated daily.
|
||||
You can use the **Woboq** online code browser available [here](https://clickhouse.com/codebrowser/ClickHouse/src/index.html). It provides code navigation and semantic highlighting, search and indexing. The code snapshot is updated daily.
|
||||
|
||||
Also, you can browse sources on [GitHub](https://github.com/ClickHouse/ClickHouse) as usual.
|
||||
|
||||
If you’re interested what IDE to use, we recommend CLion, QT Creator, VS Code and KDevelop (with caveats). You can use any favourite IDE. Vim and Emacs also count.
|
||||
If you’re interested what IDE to use, we recommend CLion, QT Creator, VS Code and KDevelop (with caveats). You can use any favorite IDE. Vim and Emacs also count.
|
||||
|
@ -1,11 +1,12 @@
|
||||
---
|
||||
toc_priority: 67
|
||||
toc_title: Build on Linux for AARCH64 (ARM64)
|
||||
sidebar_position: 67
|
||||
sidebar_label: Build on Linux for AARCH64 (ARM64)
|
||||
---
|
||||
|
||||
# How to Build ClickHouse on Linux for AARCH64 (ARM64) Architecture {#how-to-build-clickhouse-on-linux-for-aarch64-arm64-architecture}
|
||||
# How to Build ClickHouse on Linux for AARCH64 (ARM64) Architecture
|
||||
|
||||
This is for the case when you have Linux machine and want to use it to build `clickhouse` binary that will run on another Linux machine with AARCH64 CPU architecture. This is intended for continuous integration checks that run on Linux servers.
|
||||
This is for the case when you have Linux machine and want to use it to build `clickhouse` binary that will run on another Linux machine with AARCH64 CPU architecture.
|
||||
This is intended for continuous integration checks that run on Linux servers.
|
||||
|
||||
The cross-build for AARCH64 is based on the [Build instructions](../development/build.md), follow them first.
|
||||
|
||||
|
@ -1,11 +1,12 @@
|
||||
---
|
||||
toc_priority: 66
|
||||
toc_title: Build on Linux for Mac OS X
|
||||
sidebar_position: 66
|
||||
sidebar_label: Build on Linux for Mac OS X
|
||||
---
|
||||
|
||||
# How to Build ClickHouse on Linux for Mac OS X {#how-to-build-clickhouse-on-linux-for-mac-os-x}
|
||||
# How to Build ClickHouse on Linux for Mac OS X
|
||||
|
||||
This is for the case when you have Linux machine and want to use it to build `clickhouse` binary that will run on OS X. This is intended for continuous integration checks that run on Linux servers. If you want to build ClickHouse directly on Mac OS X, then proceed with [another instruction](../development/build-osx.md).
|
||||
This is for the case when you have a Linux machine and want to use it to build `clickhouse` binary that will run on OS X.
|
||||
This is intended for continuous integration checks that run on Linux servers. If you want to build ClickHouse directly on Mac OS X, then proceed with [another instruction](../development/build-osx.md).
|
||||
|
||||
The cross-build for Mac OS X is based on the [Build instructions](../development/build.md), follow them first.
|
||||
|
||||
|
@ -1,9 +1,9 @@
|
||||
---
|
||||
toc_priority: 68
|
||||
toc_title: Build on Linux for RISC-V 64
|
||||
sidebar_position: 68
|
||||
sidebar_label: Build on Linux for RISC-V 64
|
||||
---
|
||||
|
||||
# How to Build ClickHouse on Linux for RISC-V 64 Architecture {#how-to-build-clickhouse-on-linux-for-risc-v-64-architecture}
|
||||
# How to Build ClickHouse on Linux for RISC-V 64 Architecture
|
||||
|
||||
As of writing (11.11.2021) building for risc-v considered to be highly experimental. Not all features can be enabled.
|
||||
|
||||
|
@ -1,16 +1,21 @@
|
||||
---
|
||||
toc_priority: 65
|
||||
toc_title: Build on Mac OS X
|
||||
sidebar_position: 65
|
||||
sidebar_label: Build on Mac OS X
|
||||
description: How to build ClickHouse on Mac OS X
|
||||
---
|
||||
|
||||
# How to Build ClickHouse on Mac OS X {#how-to-build-clickhouse-on-mac-os-x}
|
||||
# How to Build ClickHouse on Mac OS X
|
||||
|
||||
!!! info "You don't have to build ClickHouse yourself"
|
||||
You can install pre-built ClickHouse as described in [Quick Start](https://clickhouse.com/#quick-start).
|
||||
Follow `macOS (Intel)` or `macOS (Apple silicon)` installation instructions.
|
||||
:::info You don't have to build ClickHouse yourself!
|
||||
You can install pre-built ClickHouse as described in [Quick Start](https://clickhouse.com/#quick-start). Follow **macOS (Intel)** or **macOS (Apple silicon)** installation instructions.
|
||||
:::
|
||||
|
||||
Build should work on x86_64 (Intel) and arm64 (Apple silicon) based macOS 10.15 (Catalina) and higher with Homebrew's vanilla Clang.
|
||||
It is always recommended to use vanilla `clang` compiler. It is possible to use XCode's `apple-clang` or `gcc` but it's strongly discouraged.
|
||||
It is always recommended to use vanilla `clang` compiler.
|
||||
|
||||
:::note
|
||||
It is possible to use XCode's `apple-clang` or `gcc`, but it's strongly discouraged.
|
||||
:::
|
||||
|
||||
## Install Homebrew {#install-homebrew}
|
||||
|
||||
@ -89,8 +94,9 @@ cmake --build . --config RelWithDebInfo
|
||||
|
||||
If you intend to run `clickhouse-server`, make sure to increase the system’s maxfiles variable.
|
||||
|
||||
!!! info "Note"
|
||||
You’ll need to use sudo.
|
||||
:::note
|
||||
You’ll need to use sudo.
|
||||
:::
|
||||
|
||||
To do so, create the `/Library/LaunchDaemons/limit.maxfiles.plist` file with the following content:
|
||||
|
||||
|
@ -1,9 +1,10 @@
|
||||
---
|
||||
toc_priority: 64
|
||||
toc_title: Build on Linux
|
||||
sidebar_position: 64
|
||||
sidebar_label: Build on Linux
|
||||
description: How to build ClickHouse on Linux
|
||||
---
|
||||
|
||||
# How to Build ClickHouse on Linux {#how-to-build-clickhouse-for-development}
|
||||
# How to Build ClickHouse on Linux
|
||||
|
||||
Supported platforms:
|
||||
|
||||
|
@ -1,6 +1,7 @@
|
||||
---
|
||||
toc_priority: 62
|
||||
toc_title: Continuous Integration Checks
|
||||
sidebar_position: 62
|
||||
sidebar_label: Continuous Integration Checks
|
||||
description: When you submit a pull request, some automated checks are ran for your code by the ClickHouse continuous integration (CI) system
|
||||
---
|
||||
|
||||
# Continuous Integration Checks
|
||||
@ -53,7 +54,7 @@ the documentation is wrong. Go to the check report and look for `ERROR` and `WAR
|
||||
Check that the description of your pull request conforms to the template
|
||||
[PULL_REQUEST_TEMPLATE.md](https://github.com/ClickHouse/ClickHouse/blob/master/.github/PULL_REQUEST_TEMPLATE.md).
|
||||
You have to specify a changelog category for your change (e.g., Bug Fix), and
|
||||
write a user-readable message describing the change for [CHANGELOG.md](../whats-new/changelog/index.md)
|
||||
write a user-readable message describing the change for [CHANGELOG.md](../whats-new/changelog/)
|
||||
|
||||
|
||||
## Push To Dockerhub
|
||||
@ -71,8 +72,6 @@ This check means that the CI system started to process the pull request. When it
|
||||
Performs some simple regex-based checks of code style, using the [`utils/check-style/check-style`](https://github.com/ClickHouse/ClickHouse/blob/master/utils/check-style/check-style) binary (note that it can be run locally).
|
||||
If it fails, fix the style errors following the [code style guide](style.md).
|
||||
|
||||
Python code is checked with [black](https://github.com/psf/black/).
|
||||
|
||||
### Report Details
|
||||
- [Status page example](https://clickhouse-test-reports.s3.yandex.net/12550/659c78c7abb56141723af6a81bfae39335aa8cb2/style_check.html)
|
||||
- `output.txt` contains the check resulting errors (invalid tabulation etc), blank page means no errors. [Successful result example](https://clickhouse-test-reports.s3.yandex.net/12550/659c78c7abb56141723af6a81bfae39335aa8cb2/style_check/output.txt).
|
||||
@ -152,7 +151,7 @@ checks page](../development/build.md#you-dont-have-to-build-clickhouse), or buil
|
||||
|
||||
|
||||
## Functional Stateful Tests
|
||||
Runs [stateful functional tests](tests.md#functional-tests). Treat them in the same way as the functional stateless tests. The difference is that they require `hits` and `visits` tables from the [Yandex.Metrica dataset](../getting-started/example-datasets/metrica.md) to run.
|
||||
Runs [stateful functional tests](tests.md#functional-tests). Treat them in the same way as the functional stateless tests. The difference is that they require `hits` and `visits` tables from the [clickstream dataset](../getting-started/example-datasets/metrica.md) to run.
|
||||
|
||||
|
||||
## Integration Tests
|
||||
|
@ -1,9 +1,10 @@
|
||||
---
|
||||
toc_priority: 71
|
||||
toc_title: Third-Party Libraries Used
|
||||
sidebar_position: 71
|
||||
sidebar_label: Third-Party Libraries
|
||||
description: A list of third-party libraries used
|
||||
---
|
||||
|
||||
# Third-Party Libraries Used {#third-party-libraries-used}
|
||||
# Third-Party Libraries Used
|
||||
|
||||
The list of third-party libraries:
|
||||
|
||||
|
@ -1,11 +1,12 @@
|
||||
---
|
||||
toc_priority: 61
|
||||
toc_title: For Beginners
|
||||
sidebar_position: 61
|
||||
sidebar_label: Getting Started
|
||||
description: Prerequisites and an overview of how to build ClickHouse
|
||||
---
|
||||
|
||||
# The Beginner ClickHouse Developer Instruction {#the-beginner-clickhouse-developer-instruction}
|
||||
# Getting Started Guide for Building ClickHouse
|
||||
|
||||
Building of ClickHouse is supported on Linux, FreeBSD and Mac OS X.
|
||||
The building of ClickHouse is supported on Linux, FreeBSD and Mac OS X.
|
||||
|
||||
If you use Windows, you need to create a virtual machine with Ubuntu. To start working with a virtual machine please install VirtualBox. You can download Ubuntu from the website: https://www.ubuntu.com/#download. Please create a virtual machine from the downloaded image (you should reserve at least 4GB of RAM for it). To run a command-line terminal in Ubuntu, please locate a program containing the word “terminal” in its name (gnome-terminal, konsole etc.) or just press Ctrl+Alt+T.
|
||||
|
||||
@ -229,25 +230,6 @@ As simple code editors, you can use Sublime Text or Visual Studio Code, or Kate
|
||||
|
||||
Just in case, it is worth mentioning that CLion creates `build` path on its own, it also on its own selects `debug` for build type, for configuration it uses a version of CMake that is defined in CLion and not the one installed by you, and finally, CLion will use `make` to run build tasks instead of `ninja`. This is normal behaviour, just keep that in mind to avoid confusion.
|
||||
|
||||
## Debugging
|
||||
|
||||
Many graphical IDEs offer with an integrated debugger but you can also use a standalone debugger.
|
||||
|
||||
### GDB
|
||||
|
||||
### LLDB
|
||||
|
||||
# tell LLDB where to find the source code
|
||||
settings set target.source-map /path/to/build/dir /path/to/source/dir
|
||||
|
||||
# configure LLDB to display code before/after currently executing line
|
||||
settings set stop-line-count-before 10
|
||||
settings set stop-line-count-after 10
|
||||
|
||||
target create ./clickhouse-client
|
||||
# <set breakpoints here>
|
||||
process launch -- --query="SELECT * FROM TAB"
|
||||
|
||||
## Writing Code {#writing-code}
|
||||
|
||||
The description of ClickHouse architecture can be found here: https://clickhouse.com/docs/en/development/architecture/
|
||||
|
@ -1,8 +1,6 @@
|
||||
---
|
||||
toc_folder_title: Development
|
||||
toc_hidden: true
|
||||
toc_priority: 58
|
||||
toc_title: hidden
|
||||
sidebar_label: Development
|
||||
sidebar_position: 58
|
||||
---
|
||||
|
||||
# ClickHouse Development {#clickhouse-development}
|
||||
|
@ -1,9 +1,10 @@
|
||||
---
|
||||
toc_priority: 69
|
||||
toc_title: C++ Guide
|
||||
sidebar_position: 69
|
||||
sidebar_label: C++ Guide
|
||||
description: A list of recommendations regarding coding style, naming convention, formatting and more
|
||||
---
|
||||
|
||||
# How to Write C++ Code {#how-to-write-c-code}
|
||||
# How to Write C++ Code
|
||||
|
||||
## General Recommendations {#general-recommendations}
|
||||
|
||||
|
@ -1,11 +1,12 @@
|
||||
---
|
||||
toc_priority: 70
|
||||
toc_title: Testing
|
||||
sidebar_position: 70
|
||||
sidebar_label: Testing
|
||||
description: Most of ClickHouse features can be tested with functional tests and they are mandatory to use for every change in ClickHouse code that can be tested that way.
|
||||
---
|
||||
|
||||
# ClickHouse Testing {#clickhouse-testing}
|
||||
# ClickHouse Testing
|
||||
|
||||
## Functional Tests {#functional-tests}
|
||||
## Functional Tests
|
||||
|
||||
Functional tests are the most simple and convenient to use. Most of ClickHouse features can be tested with functional tests and they are mandatory to use for every change in ClickHouse code that can be tested that way.
|
||||
|
||||
|
8
docs/en/engines/_category_.yml
Normal file
8
docs/en/engines/_category_.yml
Normal file
@ -0,0 +1,8 @@
|
||||
position: 30
|
||||
label: 'Database & Table Engines'
|
||||
collapsible: true
|
||||
collapsed: true
|
||||
link:
|
||||
type: generated-index
|
||||
title: Database & Table Engines
|
||||
slug: /en/engines
|
@ -1,9 +1,9 @@
|
||||
---
|
||||
toc_priority: 32
|
||||
toc_title: Atomic
|
||||
sidebar_label: Atomic
|
||||
sidebar_position: 10
|
||||
---
|
||||
|
||||
# Atomic {#atomic}
|
||||
# Atomic
|
||||
|
||||
It supports non-blocking [DROP TABLE](#drop-detach-table) and [RENAME TABLE](#rename-table) queries and atomic [EXCHANGE TABLES](#exchange-tables) queries. `Atomic` database engine is used by default.
|
||||
|
||||
@ -18,14 +18,21 @@ CREATE DATABASE test [ENGINE = Atomic];
|
||||
### Table UUID {#table-uuid}
|
||||
|
||||
All tables in database `Atomic` have persistent [UUID](../../sql-reference/data-types/uuid.md) and store data in directory `/clickhouse_path/store/xxx/xxxyyyyy-yyyy-yyyy-yyyy-yyyyyyyyyyyy/`, where `xxxyyyyy-yyyy-yyyy-yyyy-yyyyyyyyyyyy` is UUID of the table.
|
||||
Usually, the UUID is generated automatically, but the user can also explicitly specify the UUID in the same way when creating the table (this is not recommended). To display the `SHOW CREATE` query with the UUID you can use setting [show_table_uuid_in_table_create_query_if_not_nil](../../operations/settings/settings.md#show_table_uuid_in_table_create_query_if_not_nil). For example:
|
||||
Usually, the UUID is generated automatically, but the user can also explicitly specify the UUID in the same way when creating the table (this is not recommended).
|
||||
|
||||
For example:
|
||||
|
||||
```sql
|
||||
CREATE TABLE name UUID '28f1c61c-2970-457a-bffe-454156ddcfef' (n UInt64) ENGINE = ...;
|
||||
```
|
||||
|
||||
:::note
|
||||
You can use the [show_table_uuid_in_table_create_query_if_not_nil](../../operations/settings/settings.md#show_table_uuid_in_table_create_query_if_not_nil) setting to display the UUID with the `SHOW CREATE` query.
|
||||
:::
|
||||
|
||||
### RENAME TABLE {#rename-table}
|
||||
|
||||
[RENAME](../../sql-reference/statements/rename.md) queries are performed without changing UUID and moving table data. These queries do not wait for the completion of queries using the table and are executed instantly.
|
||||
[RENAME](../../sql-reference/statements/rename.md) queries are performed without changing the UUID or moving table data. These queries do not wait for the completion of queries using the table and are executed instantly.
|
||||
|
||||
### DROP/DETACH TABLE {#drop-detach-table}
|
||||
|
||||
|
@ -6,11 +6,11 @@ toc_title: Introduction
|
||||
|
||||
# Database Engines {#database-engines}
|
||||
|
||||
Database engines allow you to work with tables.
|
||||
Database engines allow you to work with tables. By default, ClickHouse uses the [Atomic](../../engines/database-engines/atomic.md) database engine, which provides configurable [table engines](../../engines/table-engines/index.md) and an [SQL dialect](../../sql-reference/syntax.md).
|
||||
|
||||
By default, ClickHouse uses database engine [Atomic](../../engines/database-engines/atomic.md). It provides configurable [table engines](../../engines/table-engines/index.md) and an [SQL dialect](../../sql-reference/syntax.md).
|
||||
Here is a complete list of available database engines. Follow the links for more details:
|
||||
|
||||
You can also use the following database engines:
|
||||
- [Atomic](../../engines/database-engines/atomic.md)
|
||||
|
||||
- [MySQL](../../engines/database-engines/mysql.md)
|
||||
|
||||
@ -18,8 +18,6 @@ You can also use the following database engines:
|
||||
|
||||
- [Lazy](../../engines/database-engines/lazy.md)
|
||||
|
||||
- [Atomic](../../engines/database-engines/atomic.md)
|
||||
|
||||
- [PostgreSQL](../../engines/database-engines/postgresql.md)
|
||||
|
||||
- [Replicated](../../engines/database-engines/replicated.md)
|
||||
|
@ -1,6 +1,6 @@
|
||||
---
|
||||
toc_priority: 31
|
||||
toc_title: Lazy
|
||||
sidebar_label: Lazy
|
||||
sidebar_position: 20
|
||||
---
|
||||
|
||||
# Lazy {#lazy}
|
||||
|
@ -1,16 +1,15 @@
|
||||
---
|
||||
toc_priority: 29
|
||||
toc_title: MaterializedMySQL
|
||||
sidebar_label: MaterializedMySQL
|
||||
sidebar_position: 70
|
||||
---
|
||||
|
||||
# [experimental] MaterializedMySQL {#materialized-mysql}
|
||||
# [experimental] MaterializedMySQL
|
||||
|
||||
!!! warning "Warning"
|
||||
This is an experimental feature that should not be used in production.
|
||||
:::warning
|
||||
This is an experimental feature that should not be used in production.
|
||||
:::
|
||||
|
||||
Creates ClickHouse database with all the tables existing in MySQL, and all the data in those tables.
|
||||
|
||||
ClickHouse server works as MySQL replica. It reads binlog and performs DDL and DML queries.
|
||||
Creates a ClickHouse database with all the tables existing in MySQL, and all the data in those tables. The ClickHouse server works as MySQL replica. It reads `binlog` and performs DDL and DML queries.
|
||||
|
||||
## Creating a Database {#creating-a-database}
|
||||
|
||||
@ -31,8 +30,6 @@ ENGINE = MaterializedMySQL('host:port', ['database' | database], 'user', 'passwo
|
||||
|
||||
- `max_rows_in_buffer` — Maximum number of rows that data is allowed to cache in memory (for single table and the cache data unable to query). When this number is exceeded, the data will be materialized. Default: `65 505`.
|
||||
- `max_bytes_in_buffer` — Maximum number of bytes that data is allowed to cache in memory (for single table and the cache data unable to query). When this number is exceeded, the data will be materialized. Default: `1 048 576`.
|
||||
- `max_rows_in_buffers` — Maximum number of rows that data is allowed to cache in memory (for database and the cache data unable to query). When this number is exceeded, the data will be materialized. Default: `65 505`.
|
||||
- `max_bytes_in_buffers` — Maximum number of bytes that data is allowed to cache in memory (for database and the cache data unable to query). When this number is exceeded, the data will be materialized. Default: `1 048 576`.
|
||||
- `max_flush_data_time` — Maximum number of milliseconds that data is allowed to cache in memory (for database and the cache data unable to query). When this time is exceeded, the data will be materialized. Default: `1000`.
|
||||
- `max_wait_time_when_mysql_unavailable` — Retry interval when MySQL is not available (milliseconds). Negative value disables retry. Default: `1000`.
|
||||
- `allows_query_when_mysql_lost` — Allows to query a materialized table when MySQL is lost. Default: `0` (`false`).
|
||||
@ -52,8 +49,9 @@ For the correct work of `MaterializedMySQL`, there are few mandatory `MySQL`-sid
|
||||
- `default_authentication_plugin = mysql_native_password` since `MaterializedMySQL` can only authorize with this method.
|
||||
- `gtid_mode = on` since GTID based logging is a mandatory for providing correct `MaterializedMySQL` replication.
|
||||
|
||||
!!! attention "Attention"
|
||||
While turning on `gtid_mode` you should also specify `enforce_gtid_consistency = on`.
|
||||
:::note
|
||||
While turning on `gtid_mode` you should also specify `enforce_gtid_consistency = on`.
|
||||
:::
|
||||
|
||||
## Virtual Columns {#virtual-columns}
|
||||
|
||||
@ -76,7 +74,7 @@ When working with the `MaterializedMySQL` database engine, [ReplacingMergeTree](
|
||||
| FLOAT | [Float32](../../sql-reference/data-types/float.md) |
|
||||
| DOUBLE | [Float64](../../sql-reference/data-types/float.md) |
|
||||
| DECIMAL, NEWDECIMAL | [Decimal](../../sql-reference/data-types/decimal.md) |
|
||||
| DATE, NEWDATE | [Date32](../../sql-reference/data-types/date32.md) |
|
||||
| DATE, NEWDATE | [Date](../../sql-reference/data-types/date.md) |
|
||||
| DATETIME, TIMESTAMP | [DateTime](../../sql-reference/data-types/datetime.md) |
|
||||
| DATETIME2, TIMESTAMP2 | [DateTime64](../../sql-reference/data-types/datetime64.md) |
|
||||
| YEAR | [UInt16](../../sql-reference/data-types/int-uint.md) |
|
||||
@ -108,7 +106,7 @@ Apart of the data types limitations there are few restrictions comparing to `MyS
|
||||
|
||||
### DDL Queries {#ddl-queries}
|
||||
|
||||
MySQL DDL queries are converted into the corresponding ClickHouse DDL queries ([ALTER](../../sql-reference/statements/alter/index.md), [CREATE](../../sql-reference/statements/create/index.md), [DROP](../../sql-reference/statements/drop.md), [RENAME](../../sql-reference/statements/rename.md)). If ClickHouse cannot parse some DDL query, the query is ignored.
|
||||
MySQL DDL queries are converted into the corresponding ClickHouse DDL queries ([ALTER](../../sql-reference/statements/alter/index.md), [CREATE](../../sql-reference/statements/create/index.md), [DROP](../../sql-reference/statements/drop), [RENAME](../../sql-reference/statements/rename.md)). If ClickHouse cannot parse some DDL query, the query is ignored.
|
||||
|
||||
### Data Replication {#data-replication}
|
||||
|
||||
@ -220,13 +218,14 @@ extra care needs to be taken.
|
||||
|
||||
You may specify overrides for tables that do not exist yet.
|
||||
|
||||
!!! warning "Warning"
|
||||
It is easy to break replication with table overrides if not used with care. For example:
|
||||
:::warning
|
||||
It is easy to break replication with table overrides if not used with care. For example:
|
||||
|
||||
* If an ALIAS column is added with a table override, and a column with the same name is later added to the source
|
||||
MySQL table, the converted ALTER TABLE query in ClickHouse will fail and replication stops.
|
||||
* It is currently possible to add overrides that reference nullable columns where not-nullable are required, such as in
|
||||
`ORDER BY` or `PARTITION BY`. This will cause CREATE TABLE queries that will fail, also causing replication to stop.
|
||||
* If an ALIAS column is added with a table override, and a column with the same name is later added to the source
|
||||
MySQL table, the converted ALTER TABLE query in ClickHouse will fail and replication stops.
|
||||
* It is currently possible to add overrides that reference nullable columns where not-nullable are required, such as in
|
||||
`ORDER BY` or `PARTITION BY`. This will cause CREATE TABLE queries that will fail, also causing replication to stop.
|
||||
:::
|
||||
|
||||
## Examples of Use {#examples-of-use}
|
||||
|
||||
|
@ -1,6 +1,6 @@
|
||||
---
|
||||
toc_priority: 30
|
||||
toc_title: MaterializedPostgreSQL
|
||||
sidebar_label: MaterializedPostgreSQL
|
||||
sidebar_position: 60
|
||||
---
|
||||
|
||||
# [experimental] MaterializedPostgreSQL {#materialize-postgresql}
|
||||
@ -46,7 +46,9 @@ After `MaterializedPostgreSQL` database is created, it does not automatically de
|
||||
ATTACH TABLE postgres_database.new_table;
|
||||
```
|
||||
|
||||
Warning: before version 22.1 adding table to replication left unremoved temprorary replication slot (named `{db_name}_ch_replication_slot_tmp`). If attaching tables in clickhouse version before 22.1, make sure to delete it manually (`SELECT pg_drop_replication_slot('{db_name}_ch_replication_slot_tmp')`). Otherwise disk usage will grow. Issue is fixed in 22.1.
|
||||
:::warning
|
||||
Before version 22.1, adding a table to replication left an unremoved temporary replication slot (named `{db_name}_ch_replication_slot_tmp`). If attaching tables in ClickHouse version before 22.1, make sure to delete it manually (`SELECT pg_drop_replication_slot('{db_name}_ch_replication_slot_tmp')`). Otherwise disk usage will grow. This issue is fixed in 22.1.
|
||||
:::
|
||||
|
||||
## Dynamically removing tables from replication {#dynamically-removing-table-from-replication}
|
||||
|
||||
@ -135,69 +137,70 @@ FROM pg_class
|
||||
WHERE oid = 'postgres_table'::regclass;
|
||||
```
|
||||
|
||||
!!! warning "Warning"
|
||||
Replication of [**TOAST**](https://www.postgresql.org/docs/9.5/storage-toast.html) values is not supported. The default value for the data type will be used.
|
||||
:::warning
|
||||
Replication of [**TOAST**](https://www.postgresql.org/docs/9.5/storage-toast.html) values is not supported. The default value for the data type will be used.
|
||||
:::
|
||||
|
||||
## Settings {#settings}
|
||||
|
||||
1. materialized_postgresql_tables_list {#materialized-postgresql-tables-list}
|
||||
1. `materialized_postgresql_tables_list` {#materialized-postgresql-tables-list}
|
||||
|
||||
Sets a comma-separated list of PostgreSQL database tables, which will be replicated via [MaterializedPostgreSQL](../../engines/database-engines/materialized-postgresql.md) database engine.
|
||||
Sets a comma-separated list of PostgreSQL database tables, which will be replicated via [MaterializedPostgreSQL](../../engines/database-engines/materialized-postgresql.md) database engine.
|
||||
|
||||
Default value: empty list — means whole PostgreSQL database will be replicated.
|
||||
Default value: empty list — means whole PostgreSQL database will be replicated.
|
||||
|
||||
2. materialized_postgresql_schema {#materialized-postgresql-schema}
|
||||
2. `materialized_postgresql_schema` {#materialized-postgresql-schema}
|
||||
|
||||
Default value: empty string. (Default schema is used)
|
||||
Default value: empty string. (Default schema is used)
|
||||
|
||||
3. materialized_postgresql_schema_list {#materialized-postgresql-schema-list}
|
||||
3. `materialized_postgresql_schema_list` {#materialized-postgresql-schema-list}
|
||||
|
||||
Default value: empty list. (Default schema is used)
|
||||
Default value: empty list. (Default schema is used)
|
||||
|
||||
4. materialized_postgresql_allow_automatic_update {#materialized-postgresql-allow-automatic-update}
|
||||
4. `materialized_postgresql_allow_automatic_update` {#materialized-postgresql-allow-automatic-update}
|
||||
|
||||
Do not use this setting before 22.1 version.
|
||||
Do not use this setting before 22.1 version.
|
||||
|
||||
Allows reloading table in the background, when schema changes are detected. DDL queries on the PostgreSQL side are not replicated via ClickHouse [MaterializedPostgreSQL](../../engines/database-engines/materialized-postgresql.md) engine, because it is not allowed with PostgreSQL logical replication protocol, but the fact of DDL changes is detected transactionally. In this case, the default behaviour is to stop replicating those tables once DDL is detected. However, if this setting is enabled, then, instead of stopping the replication of those tables, they will be reloaded in the background via database snapshot without data losses and replication will continue for them.
|
||||
Allows reloading table in the background, when schema changes are detected. DDL queries on the PostgreSQL side are not replicated via ClickHouse [MaterializedPostgreSQL](../../engines/database-engines/materialized-postgresql.md) engine, because it is not allowed with PostgreSQL logical replication protocol, but the fact of DDL changes is detected transactionally. In this case, the default behaviour is to stop replicating those tables once DDL is detected. However, if this setting is enabled, then, instead of stopping the replication of those tables, they will be reloaded in the background via database snapshot without data losses and replication will continue for them.
|
||||
|
||||
Possible values:
|
||||
Possible values:
|
||||
|
||||
- 0 — The table is not automatically updated in the background, when schema changes are detected.
|
||||
- 1 — The table is automatically updated in the background, when schema changes are detected.
|
||||
- 0 — The table is not automatically updated in the background, when schema changes are detected.
|
||||
- 1 — The table is automatically updated in the background, when schema changes are detected.
|
||||
|
||||
Default value: `0`.
|
||||
Default value: `0`.
|
||||
|
||||
5. materialized_postgresql_max_block_size {#materialized-postgresql-max-block-size}
|
||||
5. `materialized_postgresql_max_block_size` {#materialized-postgresql-max-block-size}
|
||||
|
||||
Sets the number of rows collected in memory before flushing data into PostgreSQL database table.
|
||||
Sets the number of rows collected in memory before flushing data into PostgreSQL database table.
|
||||
|
||||
Possible values:
|
||||
Possible values:
|
||||
|
||||
- Positive integer.
|
||||
- Positive integer.
|
||||
|
||||
Default value: `65536`.
|
||||
Default value: `65536`.
|
||||
|
||||
6. materialized_postgresql_replication_slot {#materialized-postgresql-replication-slot}
|
||||
6. `materialized_postgresql_replication_slot` {#materialized-postgresql-replication-slot}
|
||||
|
||||
A user-created replication slot. Must be used together with `materialized_postgresql_snapshot`.
|
||||
A user-created replication slot. Must be used together with `materialized_postgresql_snapshot`.
|
||||
|
||||
7. materialized_postgresql_snapshot {#materialized-postgresql-snapshot}
|
||||
7. `materialized_postgresql_snapshot` {#materialized-postgresql-snapshot}
|
||||
|
||||
A text string identifying a snapshot, from which [initial dump of PostgreSQL tables](../../engines/database-engines/materialized-postgresql.md) will be performed. Must be used together with `materialized_postgresql_replication_slot`.
|
||||
A text string identifying a snapshot, from which [initial dump of PostgreSQL tables](../../engines/database-engines/materialized-postgresql.md) will be performed. Must be used together with `materialized_postgresql_replication_slot`.
|
||||
|
||||
``` sql
|
||||
CREATE DATABASE database1
|
||||
ENGINE = MaterializedPostgreSQL('postgres1:5432', 'postgres_database', 'postgres_user', 'postgres_password')
|
||||
SETTINGS materialized_postgresql_tables_list = 'table1,table2,table3';
|
||||
``` sql
|
||||
CREATE DATABASE database1
|
||||
ENGINE = MaterializedPostgreSQL('postgres1:5432', 'postgres_database', 'postgres_user', 'postgres_password')
|
||||
SETTINGS materialized_postgresql_tables_list = 'table1,table2,table3';
|
||||
|
||||
SELECT * FROM database1.table1;
|
||||
```
|
||||
SELECT * FROM database1.table1;
|
||||
```
|
||||
|
||||
The settings can be changed, if necessary, using a DDL query. But it is impossible to change the setting `materialized_postgresql_tables_list`. To update the list of tables in this setting use the `ATTACH TABLE` query.
|
||||
The settings can be changed, if necessary, using a DDL query. But it is impossible to change the setting `materialized_postgresql_tables_list`. To update the list of tables in this setting use the `ATTACH TABLE` query.
|
||||
|
||||
``` sql
|
||||
ALTER DATABASE postgres_database MODIFY SETTING materialized_postgresql_max_block_size = <new_size>;
|
||||
```
|
||||
``` sql
|
||||
ALTER DATABASE postgres_database MODIFY SETTING materialized_postgresql_max_block_size = <new_size>;
|
||||
```
|
||||
|
||||
## Notes {#notes}
|
||||
|
||||
@ -213,47 +216,47 @@ Please note that this should be used only if it is actually needed. If there is
|
||||
|
||||
1. Configure replication slot in PostgreSQL.
|
||||
|
||||
```yaml
|
||||
apiVersion: "acid.zalan.do/v1"
|
||||
kind: postgresql
|
||||
metadata:
|
||||
name: acid-demo-cluster
|
||||
spec:
|
||||
numberOfInstances: 2
|
||||
postgresql:
|
||||
parameters:
|
||||
wal_level: logical
|
||||
patroni:
|
||||
slots:
|
||||
clickhouse_sync:
|
||||
type: logical
|
||||
database: demodb
|
||||
plugin: pgoutput
|
||||
```
|
||||
```yaml
|
||||
apiVersion: "acid.zalan.do/v1"
|
||||
kind: postgresql
|
||||
metadata:
|
||||
name: acid-demo-cluster
|
||||
spec:
|
||||
numberOfInstances: 2
|
||||
postgresql:
|
||||
parameters:
|
||||
wal_level: logical
|
||||
patroni:
|
||||
slots:
|
||||
clickhouse_sync:
|
||||
type: logical
|
||||
database: demodb
|
||||
plugin: pgoutput
|
||||
```
|
||||
|
||||
2. Wait for replication slot to be ready, then begin a transaction and export the transaction snapshot identifier:
|
||||
|
||||
```sql
|
||||
BEGIN;
|
||||
SELECT pg_export_snapshot();
|
||||
```
|
||||
```sql
|
||||
BEGIN;
|
||||
SELECT pg_export_snapshot();
|
||||
```
|
||||
|
||||
3. In ClickHouse create database:
|
||||
|
||||
```sql
|
||||
CREATE DATABASE demodb
|
||||
ENGINE = MaterializedPostgreSQL('postgres1:5432', 'postgres_database', 'postgres_user', 'postgres_password')
|
||||
SETTINGS
|
||||
materialized_postgresql_replication_slot = 'clickhouse_sync',
|
||||
materialized_postgresql_snapshot = '0000000A-0000023F-3',
|
||||
materialized_postgresql_tables_list = 'table1,table2,table3';
|
||||
```
|
||||
```sql
|
||||
CREATE DATABASE demodb
|
||||
ENGINE = MaterializedPostgreSQL('postgres1:5432', 'postgres_database', 'postgres_user', 'postgres_password')
|
||||
SETTINGS
|
||||
materialized_postgresql_replication_slot = 'clickhouse_sync',
|
||||
materialized_postgresql_snapshot = '0000000A-0000023F-3',
|
||||
materialized_postgresql_tables_list = 'table1,table2,table3';
|
||||
```
|
||||
|
||||
4. End the PostgreSQL transaction once replication to ClickHouse DB is confirmed. Verify that replication continues after failover:
|
||||
|
||||
```bash
|
||||
kubectl exec acid-demo-cluster-0 -c postgres -- su postgres -c 'patronictl failover --candidate acid-demo-cluster-1 --force'
|
||||
```
|
||||
```bash
|
||||
kubectl exec acid-demo-cluster-0 -c postgres -- su postgres -c 'patronictl failover --candidate acid-demo-cluster-1 --force'
|
||||
```
|
||||
|
||||
### Required permissions
|
||||
|
||||
|
@ -1,9 +1,9 @@
|
||||
---
|
||||
toc_priority: 30
|
||||
toc_title: MySQL
|
||||
sidebar_position: 50
|
||||
sidebar_label: MySQL
|
||||
---
|
||||
|
||||
# MySQL {#mysql}
|
||||
# MySQL
|
||||
|
||||
Allows to connect to databases on a remote MySQL server and perform `INSERT` and `SELECT` queries to exchange data between ClickHouse and MySQL.
|
||||
|
||||
@ -49,8 +49,6 @@ ENGINE = MySQL('host:port', ['database' | database], 'user', 'password')
|
||||
|
||||
All other MySQL data types are converted into [String](../../sql-reference/data-types/string.md).
|
||||
|
||||
Because of the ClickHouse date type has a different range from the MySQL date range,If the MySQL date type is out of the range of ClickHouse date, you can use the setting mysql_datatypes_support_level to modify the mapping from the MySQL date type to the Clickhouse date type: date2Date32 (convert MySQL's date type to ClickHouse Date32) or date2String(convert MySQL's date type to ClickHouse String,this is usually used when your mysql data is less than 1925) or default(convert MySQL's date type to ClickHouse Date).
|
||||
|
||||
[Nullable](../../sql-reference/data-types/nullable.md) is supported.
|
||||
|
||||
## Global Variables Support {#global-variables-support}
|
||||
@ -61,8 +59,9 @@ These variables are supported:
|
||||
- `version`
|
||||
- `max_allowed_packet`
|
||||
|
||||
!!! warning "Warning"
|
||||
By now these variables are stubs and don't correspond to anything.
|
||||
:::warning
|
||||
By now these variables are stubs and don't correspond to anything.
|
||||
:::
|
||||
|
||||
Example:
|
||||
|
||||
|
@ -1,6 +1,6 @@
|
||||
---
|
||||
toc_priority: 35
|
||||
toc_title: PostgreSQL
|
||||
sidebar_position: 40
|
||||
sidebar_label: PostgreSQL
|
||||
---
|
||||
|
||||
# PostgreSQL {#postgresql}
|
||||
|
@ -1,6 +1,6 @@
|
||||
---
|
||||
toc_priority: 36
|
||||
toc_title: Replicated
|
||||
sidebar_position: 30
|
||||
sidebar_label: Replicated
|
||||
---
|
||||
|
||||
# [experimental] Replicated {#replicated}
|
||||
@ -20,8 +20,9 @@ One ClickHouse server can have multiple replicated databases running and updatin
|
||||
- `shard_name` — Shard name. Database replicas are grouped into shards by `shard_name`.
|
||||
- `replica_name` — Replica name. Replica names must be different for all replicas of the same shard.
|
||||
|
||||
!!! note "Warning"
|
||||
For [ReplicatedMergeTree](../table-engines/mergetree-family/replication.md#table_engines-replication) tables if no arguments provided, then default arguments are used: `/clickhouse/tables/{uuid}/{shard}` and `{replica}`. These can be changed in the server settings [default_replica_path](../../operations/server-configuration-parameters/settings.md#default_replica_path) and [default_replica_name](../../operations/server-configuration-parameters/settings.md#default_replica_name). Macro `{uuid}` is unfolded to table's uuid, `{shard}` and `{replica}` are unfolded to values from server config, not from database engine arguments. But in the future, it will be possible to use `shard_name` and `replica_name` of Replicated database.
|
||||
:::warning
|
||||
For [ReplicatedMergeTree](../table-engines/mergetree-family/replication.md#table_engines-replication) tables if no arguments provided, then default arguments are used: `/clickhouse/tables/{uuid}/{shard}` and `{replica}`. These can be changed in the server settings [default_replica_path](../../operations/server-configuration-parameters/settings.md#default_replica_path) and [default_replica_name](../../operations/server-configuration-parameters/settings.md#default_replica_name). Macro `{uuid}` is unfolded to table's uuid, `{shard}` and `{replica}` are unfolded to values from server config, not from database engine arguments. But in the future, it will be possible to use `shard_name` and `replica_name` of Replicated database.
|
||||
:::
|
||||
|
||||
## Specifics and Recommendations {#specifics-and-recommendations}
|
||||
|
||||
|
@ -1,6 +1,6 @@
|
||||
---
|
||||
toc_priority: 32
|
||||
toc_title: SQLite
|
||||
sidebar_position: 55
|
||||
sidebar_label: SQLite
|
||||
---
|
||||
|
||||
# SQLite {#sqlite}
|
||||
|
@ -1,15 +0,0 @@
|
||||
---
|
||||
toc_folder_title: Engines
|
||||
toc_hidden: true
|
||||
toc_priority: 25
|
||||
toc_title: hidden
|
||||
---
|
||||
|
||||
# ClickHouse Engines {#clickhouse-engines}
|
||||
|
||||
There are two key engine kinds in ClickHouse:
|
||||
|
||||
- [Table engines](../engines/table-engines/index.md)
|
||||
- [Database engines](../engines/database-engines/index.md)
|
||||
|
||||
{## [Original article](https://clickhouse.com/docs/en/engines/) ##}
|
@ -1,6 +1,6 @@
|
||||
---
|
||||
toc_priority: 12
|
||||
toc_title: ExternalDistributed
|
||||
sidebar_position: 12
|
||||
sidebar_label: ExternalDistributed
|
||||
---
|
||||
|
||||
# ExternalDistributed {#externaldistributed}
|
||||
@ -51,3 +51,6 @@ You can specify any number of shards and any number of replicas for each shard.
|
||||
- [MySQL table engine](../../../engines/table-engines/integrations/mysql.md)
|
||||
- [PostgreSQL table engine](../../../engines/table-engines/integrations/postgresql.md)
|
||||
- [Distributed table engine](../../../engines/table-engines/special/distributed.md)
|
||||
|
||||
|
||||
[Original article](https://clickhouse.com/docs/en/engines/table-engines/integrations/ExternalDistributed/) <!--hide-->
|
||||
|
@ -1,6 +1,6 @@
|
||||
---
|
||||
toc_priority: 9
|
||||
toc_title: EmbeddedRocksDB
|
||||
sidebar_position: 9
|
||||
sidebar_label: EmbeddedRocksDB
|
||||
---
|
||||
|
||||
# EmbeddedRocksDB Engine {#EmbeddedRocksDB-engine}
|
||||
|
@ -1,6 +1,6 @@
|
||||
---
|
||||
toc_priority: 6
|
||||
toc_title: HDFS
|
||||
sidebar_position: 6
|
||||
sidebar_label: HDFS
|
||||
---
|
||||
|
||||
# HDFS {#table_engines-hdfs}
|
||||
@ -98,8 +98,9 @@ Table consists of all the files in both directories (all files should satisfy fo
|
||||
CREATE TABLE table_with_asterisk (name String, value UInt32) ENGINE = HDFS('hdfs://hdfs1:9000/{some,another}_dir/*', 'TSV')
|
||||
```
|
||||
|
||||
!!! warning "Warning"
|
||||
If the listing of files contains number ranges with leading zeros, use the construction with braces for each digit separately or use `?`.
|
||||
:::warning
|
||||
If the listing of files contains number ranges with leading zeros, use the construction with braces for each digit separately or use `?`.
|
||||
:::
|
||||
|
||||
**Example**
|
||||
|
||||
|
@ -1,6 +1,6 @@
|
||||
---
|
||||
toc_priority: 4
|
||||
toc_title: Hive
|
||||
sidebar_position: 4
|
||||
sidebar_label: Hive
|
||||
---
|
||||
|
||||
# Hive {#hive}
|
||||
@ -137,7 +137,7 @@ CREATE TABLE test.test_orc
|
||||
`f_array_array_float` Array(Array(Float32)),
|
||||
`day` String
|
||||
)
|
||||
ENGINE = Hive('thrift://localhost:9083', 'test', 'test_orc')
|
||||
ENGINE = Hive('thrift://202.168.117.26:9083', 'test', 'test_orc')
|
||||
PARTITION BY day
|
||||
|
||||
```
|
||||
@ -406,3 +406,5 @@ f_char: hello world
|
||||
f_bool: true
|
||||
day: 2021-09-18
|
||||
```
|
||||
|
||||
[Original article](https://clickhouse.com/docs/en/engines/table-engines/integrations/hive/) <!--hide-->
|
||||
|
@ -1,6 +1,6 @@
|
||||
---
|
||||
toc_folder_title: Integrations
|
||||
toc_priority: 1
|
||||
sidebar_position: 40
|
||||
sidebar_label: Integrations
|
||||
---
|
||||
|
||||
# Table Engines for Integrations {#table-engines-for-integrations}
|
||||
|
@ -1,6 +1,6 @@
|
||||
---
|
||||
toc_priority: 3
|
||||
toc_title: JDBC
|
||||
sidebar_position: 3
|
||||
sidebar_label: JDBC
|
||||
---
|
||||
|
||||
# JDBC {#table-engine-jdbc}
|
||||
|
@ -1,6 +1,6 @@
|
||||
---
|
||||
toc_priority: 8
|
||||
toc_title: Kafka
|
||||
sidebar_position: 8
|
||||
sidebar_label: Kafka
|
||||
---
|
||||
|
||||
# Kafka {#kafka}
|
||||
@ -87,8 +87,9 @@ Examples:
|
||||
|
||||
<summary>Deprecated Method for Creating a Table</summary>
|
||||
|
||||
!!! attention "Attention"
|
||||
Do not use this method in new projects. If possible, switch old projects to the method described above.
|
||||
:::warning
|
||||
Do not use this method in new projects. If possible, switch old projects to the method described above.
|
||||
:::
|
||||
|
||||
``` sql
|
||||
Kafka(kafka_broker_list, kafka_topic_list, kafka_group_name, kafka_format
|
||||
@ -133,7 +134,7 @@ Example:
|
||||
|
||||
SELECT level, sum(total) FROM daily GROUP BY level;
|
||||
```
|
||||
To improve performance, received messages are grouped into blocks the size of [max_insert_block_size](../../../operations/settings/settings/#settings-max_insert_block_size). If the block wasn’t formed within [stream_flush_interval_ms](../../../operations/settings/settings/#stream-flush-interval-ms) milliseconds, the data will be flushed to the table regardless of the completeness of the block.
|
||||
To improve performance, received messages are grouped into blocks the size of [max_insert_block_size](../../../operations/settings/settings.md#settings-max_insert_block_size). If the block wasn’t formed within [stream_flush_interval_ms](../../../operations/settings/settings.md/#stream-flush-interval-ms) milliseconds, the data will be flushed to the table regardless of the completeness of the block.
|
||||
|
||||
To stop receiving topic data or to change the conversion logic, detach the materialized view:
|
||||
|
||||
|
@ -1,6 +1,6 @@
|
||||
---
|
||||
toc_priority: 12
|
||||
toc_title: MaterializedPostgreSQL
|
||||
sidebar_position: 12
|
||||
sidebar_label: MaterializedPostgreSQL
|
||||
---
|
||||
|
||||
# MaterializedPostgreSQL {#materialize-postgresql}
|
||||
@ -52,5 +52,8 @@ PRIMARY KEY key;
|
||||
SELECT key, value, _version FROM postgresql_db.postgresql_replica;
|
||||
```
|
||||
|
||||
!!! warning "Warning"
|
||||
Replication of [**TOAST**](https://www.postgresql.org/docs/9.5/storage-toast.html) values is not supported. The default value for the data type will be used.
|
||||
:::warning
|
||||
Replication of [**TOAST**](https://www.postgresql.org/docs/9.5/storage-toast.html) values is not supported. The default value for the data type will be used.
|
||||
:::
|
||||
|
||||
[Original article](https://clickhouse.com/docs/en/engines/table-engines/integrations/materialized-postgresql) <!--hide-->
|
||||
|
@ -1,6 +1,6 @@
|
||||
---
|
||||
toc_priority: 5
|
||||
toc_title: MongoDB
|
||||
sidebar_position: 5
|
||||
sidebar_label: MongoDB
|
||||
---
|
||||
|
||||
# MongoDB {#mongodb}
|
||||
|
@ -1,6 +1,6 @@
|
||||
---
|
||||
toc_priority: 4
|
||||
toc_title: MySQL
|
||||
sidebar_position: 4
|
||||
sidebar_label: MySQL
|
||||
---
|
||||
|
||||
# MySQL {#mysql}
|
||||
@ -148,3 +148,5 @@ Default value: `16`.
|
||||
|
||||
- [The mysql table function](../../../sql-reference/table-functions/mysql.md)
|
||||
- [Using MySQL as a source of external dictionary](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md#dicts-external_dicts_dict_sources-mysql)
|
||||
|
||||
[Original article](https://clickhouse.com/docs/en/engines/table-engines/integrations/mysql/) <!--hide-->
|
||||
|
@ -1,6 +1,6 @@
|
||||
---
|
||||
toc_priority: 2
|
||||
toc_title: ODBC
|
||||
sidebar_position: 2
|
||||
sidebar_label: ODBC
|
||||
---
|
||||
|
||||
# ODBC {#table-engine-odbc}
|
||||
|
@ -1,6 +1,6 @@
|
||||
---
|
||||
toc_priority: 11
|
||||
toc_title: PostgreSQL
|
||||
sidebar_position: 11
|
||||
sidebar_label: PostgreSQL
|
||||
---
|
||||
|
||||
# PostgreSQL {#postgresql}
|
||||
@ -73,8 +73,9 @@ All joins, aggregations, sorting, `IN [ array ]` conditions and the `LIMIT` samp
|
||||
|
||||
PostgreSQL `Array` types are converted into ClickHouse arrays.
|
||||
|
||||
!!! info "Note"
|
||||
Be careful - in PostgreSQL an array data, created like a `type_name[]`, may contain multi-dimensional arrays of different dimensions in different table rows in same column. But in ClickHouse it is only allowed to have multidimensional arrays of the same count of dimensions in all table rows in same column.
|
||||
:::warning
|
||||
Be careful - in PostgreSQL an array data, created like a `type_name[]`, may contain multi-dimensional arrays of different dimensions in different table rows in same column. But in ClickHouse it is only allowed to have multidimensional arrays of the same count of dimensions in all table rows in same column.
|
||||
:::
|
||||
|
||||
Supports multiple replicas that must be listed by `|`. For example:
|
||||
|
||||
|
@ -1,6 +1,6 @@
|
||||
---
|
||||
toc_priority: 10
|
||||
toc_title: RabbitMQ
|
||||
sidebar_position: 10
|
||||
sidebar_label: RabbitMQ
|
||||
---
|
||||
|
||||
# RabbitMQ Engine {#rabbitmq-engine}
|
||||
|
@ -1,6 +1,6 @@
|
||||
---
|
||||
toc_priority: 7
|
||||
toc_title: S3
|
||||
sidebar_position: 7
|
||||
sidebar_label: S3
|
||||
---
|
||||
|
||||
# S3 Table Engine {#table-engine-s3}
|
||||
@ -66,8 +66,9 @@ For more information about virtual columns see [here](../../../engines/table-eng
|
||||
|
||||
Constructions with `{}` are similar to the [remote](../../../sql-reference/table-functions/remote.md) table function.
|
||||
|
||||
!!! warning "Warning"
|
||||
If the listing of files contains number ranges with leading zeros, use the construction with braces for each digit separately or use `?`.
|
||||
:::warning
|
||||
If the listing of files contains number ranges with leading zeros, use the construction with braces for each digit separately or use `?`.
|
||||
:::
|
||||
|
||||
**Example with wildcards 1**
|
||||
|
||||
@ -158,3 +159,5 @@ The following settings can be specified in configuration file for given endpoint
|
||||
## See also
|
||||
|
||||
- [s3 table function](../../../sql-reference/table-functions/s3.md)
|
||||
|
||||
[Original article](https://clickhouse.com/docs/en/engines/table-engines/integrations/s3/) <!--hide-->
|
||||
|
@ -1,6 +1,6 @@
|
||||
---
|
||||
toc_priority: 7
|
||||
toc_title: SQLite
|
||||
sidebar_position: 7
|
||||
sidebar_label: SQLite
|
||||
---
|
||||
|
||||
# SQLite {#sqlite}
|
||||
@ -56,4 +56,7 @@ SELECT * FROM sqlite_db.table2 ORDER BY col1;
|
||||
**See Also**
|
||||
|
||||
- [SQLite](../../../engines/database-engines/sqlite.md) engine
|
||||
- [sqlite](../../../sql-reference/table-functions/sqlite.md) table function
|
||||
- [sqlite](../../../sql-reference/table-functions/sqlite.md) table function
|
||||
|
||||
|
||||
[Original article](https://clickhouse.com/docs/en/engines/table-engines/integrations/sqlite/) <!--hide-->
|
||||
|
@ -1,7 +1,6 @@
|
||||
---
|
||||
toc_folder_title: Log Family
|
||||
toc_priority: 29
|
||||
toc_title: Introduction
|
||||
sidebar_position: 20
|
||||
sidebar_label: Log Family
|
||||
---
|
||||
|
||||
# Log Engine Family {#log-engine-family}
|
||||
|
@ -10,3 +10,6 @@ The engine belongs to the family of `Log` engines. See the common properties of
|
||||
`Log` differs from [TinyLog](../../../engines/table-engines/log-family/tinylog.md) in that a small file of "marks" resides with the column files. These marks are written on every data block and contain offsets that indicate where to start reading the file in order to skip the specified number of rows. This makes it possible to read table data in multiple threads.
|
||||
For concurrent data access, the read operations can be performed simultaneously, while write operations block reads and each other.
|
||||
The `Log` engine does not support indexes. Similarly, if writing to a table failed, the table is broken, and reading from it returns an error. The `Log` engine is appropriate for temporary data, write-once tables, and for testing or demonstration purposes.
|
||||
|
||||
[Original article](https://clickhouse.com/docs/en/engines/table-engines/log-family/log/) <!--hide-->
|
||||
|
||||
|
@ -1,6 +1,6 @@
|
||||
---
|
||||
toc_priority: 35
|
||||
toc_title: AggregatingMergeTree
|
||||
sidebar_position: 60
|
||||
sidebar_label: AggregatingMergeTree
|
||||
---
|
||||
|
||||
# AggregatingMergeTree {#aggregatingmergetree}
|
||||
@ -42,8 +42,9 @@ When creating a `AggregatingMergeTree` table the same [clauses](../../../engines
|
||||
|
||||
<summary>Deprecated Method for Creating a Table</summary>
|
||||
|
||||
!!! attention "Attention"
|
||||
Do not use this method in new projects and, if possible, switch the old projects to the method described above.
|
||||
:::warning
|
||||
Do not use this method in new projects and, if possible, switch the old projects to the method described above.
|
||||
:::
|
||||
|
||||
``` sql
|
||||
CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster]
|
||||
|
@ -1,6 +1,6 @@
|
||||
---
|
||||
toc_priority: 36
|
||||
toc_title: CollapsingMergeTree
|
||||
sidebar_position: 70
|
||||
sidebar_label: CollapsingMergeTree
|
||||
---
|
||||
|
||||
# CollapsingMergeTree {#table_engine-collapsingmergetree}
|
||||
@ -42,8 +42,9 @@ When creating a `CollapsingMergeTree` table, the same [query clauses](../../../e
|
||||
|
||||
<summary>Deprecated Method for Creating a Table</summary>
|
||||
|
||||
!!! attention "Attention"
|
||||
Do not use this method in new projects and, if possible, switch the old projects to the method described above.
|
||||
:::warning
|
||||
Do not use this method in new projects and, if possible, switch old projects to the method described above.
|
||||
:::
|
||||
|
||||
``` sql
|
||||
CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster]
|
||||
|
@ -1,12 +1,15 @@
|
||||
---
|
||||
toc_priority: 32
|
||||
toc_title: Custom Partitioning Key
|
||||
sidebar_position: 30
|
||||
sidebar_label: Custom Partitioning Key
|
||||
---
|
||||
|
||||
# Custom Partitioning Key {#custom-partitioning-key}
|
||||
|
||||
!!! warning "Warning"
|
||||
In most cases you don't need partition key, and in most other cases you don't need partition key more granular than by months. Partitioning does not speed up queries (in contrast to the ORDER BY expression). You should never use too granular partitioning. Don't partition your data by client identifiers or names (instead make client identifier or name the first column in the ORDER BY expression).
|
||||
:::warning
|
||||
In most cases you do not need a partition key, and in most other cases you do not need a partition key more granular than by months. Partitioning does not speed up queries (in contrast to the ORDER BY expression).
|
||||
|
||||
You should never use too granular of partitioning. Don't partition your data by client identifiers or names. Instead, make a client identifier or name the first column in the ORDER BY expression.
|
||||
:::
|
||||
|
||||
Partitioning is available for the [MergeTree](../../../engines/table-engines/mergetree-family/mergetree.md) family tables (including [replicated](../../../engines/table-engines/mergetree-family/replication.md) tables). [Materialized views](../../../engines/table-engines/special/materializedview.md#materializedview) based on MergeTree tables support partitioning, as well.
|
||||
|
||||
@ -40,8 +43,9 @@ By default, the floating-point partition key is not supported. To use it enable
|
||||
|
||||
When inserting new data to a table, this data is stored as a separate part (chunk) sorted by the primary key. In 10-15 minutes after inserting, the parts of the same partition are merged into the entire part.
|
||||
|
||||
!!! info "Info"
|
||||
A merge only works for data parts that have the same value for the partitioning expression. This means **you shouldn’t make overly granular partitions** (more than about a thousand partitions). Otherwise, the `SELECT` query performs poorly because of an unreasonably large number of files in the file system and open file descriptors.
|
||||
:::info
|
||||
A merge only works for data parts that have the same value for the partitioning expression. This means **you shouldn’t make overly granular partitions** (more than about a thousand partitions). Otherwise, the `SELECT` query performs poorly because of an unreasonably large number of files in the file system and open file descriptors.
|
||||
:::
|
||||
|
||||
Use the [system.parts](../../../operations/system-tables/parts.md#system_tables-parts) table to view the table parts and partitions. For example, let’s assume that we have a `visits` table with partitioning by month. Let’s perform the `SELECT` query for the `system.parts` table:
|
||||
|
||||
@ -78,8 +82,9 @@ Let’s break down the name of the part: `201901_1_9_2_11`:
|
||||
- `2` is the chunk level (the depth of the merge tree it is formed from).
|
||||
- `11` is the mutation version (if a part mutated)
|
||||
|
||||
!!! info "Info"
|
||||
The parts of old-type tables have the name: `20190117_20190123_2_2_0` (minimum date - maximum date - minimum block number - maximum block number - level).
|
||||
:::info
|
||||
The parts of old-type tables have the name: `20190117_20190123_2_2_0` (minimum date - maximum date - minimum block number - maximum block number - level).
|
||||
:::
|
||||
|
||||
The `active` column shows the status of the part. `1` is active; `0` is inactive. The inactive parts are, for example, source parts remaining after merging to a larger part. The corrupted data parts are also indicated as inactive.
|
||||
|
||||
|
@ -1,6 +1,6 @@
|
||||
---
|
||||
toc_priority: 38
|
||||
toc_title: GraphiteMergeTree
|
||||
sidebar_position: 90
|
||||
sidebar_label: GraphiteMergeTree
|
||||
---
|
||||
|
||||
# GraphiteMergeTree {#graphitemergetree}
|
||||
@ -54,8 +54,9 @@ When creating a `GraphiteMergeTree` table, the same [clauses](../../../engines/t
|
||||
|
||||
<summary>Deprecated Method for Creating a Table</summary>
|
||||
|
||||
!!! attention "Attention"
|
||||
Do not use this method in new projects and, if possible, switch the old projects to the method described above.
|
||||
:::warning
|
||||
Do not use this method in new projects and, if possible, switch old projects to the method described above.
|
||||
:::
|
||||
|
||||
``` sql
|
||||
CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster]
|
||||
@ -119,12 +120,13 @@ default
|
||||
...
|
||||
```
|
||||
|
||||
!!! warning "Attention"
|
||||
Patterns must be strictly ordered:
|
||||
:::warning
|
||||
Patterns must be strictly ordered:
|
||||
|
||||
1. Patterns without `function` or `retention`.
|
||||
1. Patterns with both `function` and `retention`.
|
||||
1. Pattern `default`.
|
||||
1. Patterns without `function` or `retention`.
|
||||
1. Patterns with both `function` and `retention`.
|
||||
1. Pattern `default`.
|
||||
:::
|
||||
|
||||
When processing a row, ClickHouse checks the rules in the `pattern` sections. Each of `pattern` (including `default`) sections can contain `function` parameter for aggregation, `retention` parameters or both. If the metric name matches the `regexp`, the rules from the `pattern` section (or sections) are applied; otherwise, the rules from the `default` section are used.
|
||||
|
||||
@ -253,7 +255,6 @@ Valid values:
|
||||
```
|
||||
|
||||
|
||||
!!! warning "Warning"
|
||||
Data rollup is performed during merges. Usually, for old partitions, merges are not started, so for rollup it is necessary to trigger an unscheduled merge using [optimize](../../../sql-reference/statements/optimize.md). Or use additional tools, for example [graphite-ch-optimizer](https://github.com/innogames/graphite-ch-optimizer).
|
||||
|
||||
[Original article](https://clickhouse.com/docs/en/operations/table_engines/graphitemergetree/) <!--hide-->
|
||||
:::warning
|
||||
Data rollup is performed during merges. Usually, for old partitions, merges are not started, so for rollup it is necessary to trigger an unscheduled merge using [optimize](../../../sql-reference/statements/optimize.md). Or use additional tools, for example [graphite-ch-optimizer](https://github.com/innogames/graphite-ch-optimizer).
|
||||
:::
|
||||
|
@ -1,7 +1,6 @@
|
||||
---
|
||||
toc_folder_title: MergeTree Family
|
||||
toc_priority: 28
|
||||
toc_title: Introduction
|
||||
sidebar_position: 10
|
||||
sidebar_label: MergeTree Family
|
||||
---
|
||||
|
||||
# MergeTree Engine Family {#mergetree-engine-family}
|
||||
|
@ -1,6 +1,6 @@
|
||||
---
|
||||
toc_priority: 30
|
||||
toc_title: MergeTree
|
||||
sidebar_position: 11
|
||||
sidebar_label: MergeTree
|
||||
---
|
||||
|
||||
# MergeTree {#table_engines-mergetree}
|
||||
@ -27,8 +27,9 @@ Main features:
|
||||
|
||||
If necessary, you can set the data sampling method in the table.
|
||||
|
||||
!!! info "Info"
|
||||
The [Merge](../../../engines/table-engines/special/merge.md#merge) engine does not belong to the `*MergeTree` family.
|
||||
:::info
|
||||
The [Merge](../../../engines/table-engines/special/merge.md#merge) engine does not belong to the `*MergeTree` family.
|
||||
:::
|
||||
|
||||
## Creating a Table {#table_engine-mergetree-creating-a-table}
|
||||
|
||||
@ -127,8 +128,9 @@ The `index_granularity` setting can be omitted because 8192 is the default value
|
||||
|
||||
<summary>Deprecated Method for Creating a Table</summary>
|
||||
|
||||
!!! attention "Attention"
|
||||
Do not use this method in new projects. If possible, switch old projects to the method described above.
|
||||
:::warning
|
||||
Do not use this method in new projects. If possible, switch old projects to the method described above.
|
||||
:::
|
||||
|
||||
``` sql
|
||||
CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster]
|
||||
@ -304,8 +306,8 @@ CREATE TABLE table_name
|
||||
Indices from the example can be used by ClickHouse to reduce the amount of data to read from disk in the following queries:
|
||||
|
||||
``` sql
|
||||
SELECT count() FROM table WHERE s < 'z'
|
||||
SELECT count() FROM table WHERE u64 * i32 == 10 AND u64 * length(s) >= 1234
|
||||
SELECT count() FROM table WHERE s < 'z'
|
||||
SELECT count() FROM table WHERE u64 * i32 == 10 AND u64 * length(s) >= 1234
|
||||
```
|
||||
|
||||
#### Available Types of Indices {#available-types-of-indices}
|
||||
@ -339,7 +341,7 @@ SELECT count() FROM table WHERE u64 * i32 == 10 AND u64 * length(s) >= 1234
|
||||
|
||||
For `Map` data type client can specify if index should be created for keys or values using [mapKeys](../../../sql-reference/functions/tuple-map-functions.md#mapkeys) or [mapValues](../../../sql-reference/functions/tuple-map-functions.md#mapvalues) function.
|
||||
|
||||
The following functions can use the filter: [equals](../../../sql-reference/functions/comparison-functions.md), [notEquals](../../../sql-reference/functions/comparison-functions.md), [in](../../../sql-reference/functions/in-functions.md), [notIn](../../../sql-reference/functions/in-functions.md), [has](../../../sql-reference/functions/array-functions.md#hasarr-elem), [hasAny](../../../sql-reference/functions/array-functions.md#hasany), [hasAll](../../../sql-reference/functions/array-functions.md#hasall).
|
||||
The following functions can use the filter: [equals](../../../sql-reference/functions/comparison-functions.md), [notEquals](../../../sql-reference/functions/comparison-functions.md), [in](../../../sql-reference/functions/in-functions), [notIn](../../../sql-reference/functions/in-functions), [has](../../../sql-reference/functions/array-functions#hasarr-elem), [hasAny](../../../sql-reference/functions/array-functions#hasany), [hasAll](../../../sql-reference/functions/array-functions#hasall).
|
||||
|
||||
Example of index creation for `Map` data type
|
||||
|
||||
@ -364,26 +366,28 @@ The `set` index can be used with all functions. Function subsets for other index
|
||||
| Function (operator) / Index | primary key | minmax | ngrambf_v1 | tokenbf_v1 | bloom_filter |
|
||||
|------------------------------------------------------------------------------------------------------------|-------------|--------|-------------|-------------|---------------|
|
||||
| [equals (=, ==)](../../../sql-reference/functions/comparison-functions.md#function-equals) | ✔ | ✔ | ✔ | ✔ | ✔ |
|
||||
| [notEquals(!=, <>)](../../../sql-reference/functions/comparison-functions.md#function-notequals) | ✔ | ✔ | ✔ | ✔ | ✔ |
|
||||
| [notEquals(!=, <>)](../../../sql-reference/functions/comparison-functions.md#function-notequals) | ✔ | ✔ | ✔ | ✔ | ✔ |
|
||||
| [like](../../../sql-reference/functions/string-search-functions.md#function-like) | ✔ | ✔ | ✔ | ✔ | ✗ |
|
||||
| [notLike](../../../sql-reference/functions/string-search-functions.md#function-notlike) | ✔ | ✔ | ✔ | ✔ | ✗ |
|
||||
| [startsWith](../../../sql-reference/functions/string-functions.md#startswith) | ✔ | ✔ | ✔ | ✔ | ✗ |
|
||||
| [endsWith](../../../sql-reference/functions/string-functions.md#endswith) | ✗ | ✗ | ✔ | ✔ | ✗ |
|
||||
| [multiSearchAny](../../../sql-reference/functions/string-search-functions.md#function-multisearchany) | ✗ | ✗ | ✔ | ✗ | ✗ |
|
||||
| [in](../../../sql-reference/functions/in-functions.md#in-functions) | ✔ | ✔ | ✔ | ✔ | ✔ |
|
||||
| [notIn](../../../sql-reference/functions/in-functions.md#in-functions) | ✔ | ✔ | ✔ | ✔ | ✔ |
|
||||
| [in](../../../sql-reference/functions/in-functions#in-functions) | ✔ | ✔ | ✔ | ✔ | ✔ |
|
||||
| [notIn](../../../sql-reference/functions/in-functions#in-functions) | ✔ | ✔ | ✔ | ✔ | ✔ |
|
||||
| [less (<)](../../../sql-reference/functions/comparison-functions.md#function-less) | ✔ | ✔ | ✗ | ✗ | ✗ |
|
||||
| [greater (>)](../../../sql-reference/functions/comparison-functions.md#function-greater) | ✔ | ✔ | ✗ | ✗ | ✗ |
|
||||
| [lessOrEquals (<=)](../../../sql-reference/functions/comparison-functions.md#function-lessorequals) | ✔ | ✔ | ✗ | ✗ | ✗ |
|
||||
| [greaterOrEquals (>=)](../../../sql-reference/functions/comparison-functions.md#function-greaterorequals) | ✔ | ✔ | ✗ | ✗ | ✗ |
|
||||
| [empty](../../../sql-reference/functions/array-functions.md#function-empty) | ✔ | ✔ | ✗ | ✗ | ✗ |
|
||||
| [notEmpty](../../../sql-reference/functions/array-functions.md#function-notempty) | ✔ | ✔ | ✗ | ✗ | ✗ |
|
||||
| [empty](../../../sql-reference/functions/array-functions#function-empty) | ✔ | ✔ | ✗ | ✗ | ✗ |
|
||||
| [notEmpty](../../../sql-reference/functions/array-functions#function-notempty) | ✔ | ✔ | ✗ | ✗ | ✗ |
|
||||
| hasToken | ✗ | ✗ | ✗ | ✔ | ✗ |
|
||||
|
||||
Functions with a constant argument that is less than ngram size can’t be used by `ngrambf_v1` for query optimization.
|
||||
|
||||
!!! note "Note"
|
||||
Bloom filters can have false positive matches, so the `ngrambf_v1`, `tokenbf_v1`, and `bloom_filter` indexes can’t be used for optimizing queries where the result of a function is expected to be false, for example:
|
||||
:::note
|
||||
Bloom filters can have false positive matches, so the `ngrambf_v1`, `tokenbf_v1`, and `bloom_filter` indexes can not be used for optimizing queries where the result of a function is expected to be false.
|
||||
|
||||
For example:
|
||||
|
||||
- Can be optimized:
|
||||
- `s LIKE '%test%'`
|
||||
@ -391,12 +395,13 @@ Functions with a constant argument that is less than ngram size can’t be used
|
||||
- `s = 1`
|
||||
- `NOT s != 1`
|
||||
- `startsWith(s, 'test')`
|
||||
- Can’t be optimized:
|
||||
- Can not be optimized:
|
||||
- `NOT s LIKE '%test%'`
|
||||
- `s NOT LIKE '%test%'`
|
||||
- `NOT s = 1`
|
||||
- `s != 1`
|
||||
- `NOT startsWith(s, 'test')`
|
||||
:::
|
||||
|
||||
## Projections {#projections}
|
||||
Projections are like [materialized views](../../../sql-reference/statements/create/view.md#materialized) but defined in part-level. It provides consistency guarantees along with automatic usage in queries.
|
||||
|
@ -1,6 +1,6 @@
|
||||
---
|
||||
toc_priority: 33
|
||||
toc_title: ReplacingMergeTree
|
||||
sidebar_position: 40
|
||||
sidebar_label: ReplacingMergeTree
|
||||
---
|
||||
|
||||
# ReplacingMergeTree {#replacingmergetree}
|
||||
@ -29,8 +29,9 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster]
|
||||
|
||||
For a description of request parameters, see [statement description](../../../sql-reference/statements/create/table.md).
|
||||
|
||||
!!! note "Attention"
|
||||
Uniqueness of rows is determined by the `ORDER BY` table section, not `PRIMARY KEY`.
|
||||
:::warning
|
||||
Uniqueness of rows is determined by the `ORDER BY` table section, not `PRIMARY KEY`.
|
||||
:::
|
||||
|
||||
**ReplacingMergeTree Parameters**
|
||||
|
||||
@ -49,8 +50,9 @@ When creating a `ReplacingMergeTree` table the same [clauses](../../../engines/t
|
||||
|
||||
<summary>Deprecated Method for Creating a Table</summary>
|
||||
|
||||
!!! attention "Attention"
|
||||
Do not use this method in new projects and, if possible, switch the old projects to the method described above.
|
||||
:::warning
|
||||
Do not use this method in new projects and, if possible, switch old projects to the method described above.
|
||||
:::
|
||||
|
||||
``` sql
|
||||
CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster]
|
||||
|
@ -1,6 +1,6 @@
|
||||
---
|
||||
toc_priority: 31
|
||||
toc_title: Data Replication
|
||||
sidebar_position: 20
|
||||
sidebar_label: Data Replication
|
||||
---
|
||||
|
||||
# Data Replication {#table_engines-replication}
|
||||
@ -31,8 +31,9 @@ ClickHouse uses [Apache ZooKeeper](https://zookeeper.apache.org) for storing rep
|
||||
|
||||
To use replication, set parameters in the [zookeeper](../../../operations/server-configuration-parameters/settings.md#server-settings_zookeeper) server configuration section.
|
||||
|
||||
!!! attention "Attention"
|
||||
Don’t neglect the security setting. ClickHouse supports the `digest` [ACL scheme](https://zookeeper.apache.org/doc/current/zookeeperProgrammers.html#sc_ZooKeeperAccessControl) of the ZooKeeper security subsystem.
|
||||
:::warning
|
||||
Don’t neglect the security setting. ClickHouse supports the `digest` [ACL scheme](https://zookeeper.apache.org/doc/current/zookeeperProgrammers.html#sc_ZooKeeperAccessControl) of the ZooKeeper security subsystem.
|
||||
:::
|
||||
|
||||
Example of setting the addresses of the ZooKeeper cluster:
|
||||
|
||||
|
@ -1,6 +1,6 @@
|
||||
---
|
||||
toc_priority: 34
|
||||
toc_title: SummingMergeTree
|
||||
sidebar_position: 50
|
||||
sidebar_label: SummingMergeTree
|
||||
---
|
||||
|
||||
# SummingMergeTree {#summingmergetree}
|
||||
@ -41,8 +41,9 @@ When creating a `SummingMergeTree` table the same [clauses](../../../engines/tab
|
||||
|
||||
<summary>Deprecated Method for Creating a Table</summary>
|
||||
|
||||
!!! attention "Attention"
|
||||
Do not use this method in new projects and, if possible, switch the old projects to the method described above.
|
||||
:::warning
|
||||
Do not use this method in new projects and, if possible, switch the old projects to the method described above.
|
||||
:::
|
||||
|
||||
``` sql
|
||||
CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster]
|
||||
|
@ -1,6 +1,6 @@
|
||||
---
|
||||
toc_priority: 37
|
||||
toc_title: VersionedCollapsingMergeTree
|
||||
sidebar_position: 80
|
||||
sidebar_label: VersionedCollapsingMergeTree
|
||||
---
|
||||
|
||||
# VersionedCollapsingMergeTree {#versionedcollapsingmergetree}
|
||||
@ -53,8 +53,9 @@ When creating a `VersionedCollapsingMergeTree` table, the same [clauses](../../.
|
||||
|
||||
<summary>Deprecated Method for Creating a Table</summary>
|
||||
|
||||
!!! attention "Attention"
|
||||
Do not use this method in new projects. If possible, switch the old projects to the method described above.
|
||||
:::warning
|
||||
Do not use this method in new projects. If possible, switch old projects to the method described above.
|
||||
:::
|
||||
|
||||
``` sql
|
||||
CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster]
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user