mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-17 21:24:28 +00:00
Merged with master
This commit is contained in:
commit
c78dd2cd79
@ -287,7 +287,7 @@ endif ()
|
||||
|
||||
include(cmake/dbms_glob_sources.cmake)
|
||||
|
||||
if (OS_LINUX)
|
||||
if (OS_LINUX OR OS_ANDROID)
|
||||
include(cmake/linux/default_libs.cmake)
|
||||
elseif (OS_DARWIN)
|
||||
include(cmake/darwin/default_libs.cmake)
|
||||
|
@ -16,5 +16,4 @@ ClickHouse is an open-source column-oriented database management system that all
|
||||
|
||||
## Upcoming Events
|
||||
|
||||
* [ClickHouse virtual office hours](https://www.eventbrite.com/e/clickhouse-july-virtual-meetup-tickets-111199787558) on July 15, 2020.
|
||||
* [ClickHouse at ByteDance (in Chinese)](https://mp.weixin.qq.com/s/Em-HjPylO8D7WPui4RREAQ) on July 17, 2020.
|
||||
* [ClickHouse at ByteDance (in Chinese)](https://mp.weixin.qq.com/s/Em-HjPylO8D7WPui4RREAQ) on July 31, 2020.
|
||||
|
@ -30,7 +30,7 @@ struct StringRef
|
||||
constexpr StringRef(const CharT * data_, size_t size_) : data(reinterpret_cast<const char *>(data_)), size(size_) {}
|
||||
|
||||
StringRef(const std::string & s) : data(s.data()), size(s.size()) {}
|
||||
constexpr StringRef(const std::string_view & s) : data(s.data()), size(s.size()) {}
|
||||
constexpr explicit StringRef(const std::string_view & s) : data(s.data()), size(s.size()) {}
|
||||
constexpr StringRef(const char * data_) : StringRef(std::string_view{data_}) {}
|
||||
constexpr StringRef() = default;
|
||||
|
||||
|
@ -1,6 +1,9 @@
|
||||
#include <common/getThreadId.h>
|
||||
|
||||
#if defined(OS_LINUX)
|
||||
#if defined(OS_ANDROID)
|
||||
#include <sys/types.h>
|
||||
#include <unistd.h>
|
||||
#elif defined(OS_LINUX)
|
||||
#include <unistd.h>
|
||||
#include <syscall.h>
|
||||
#elif defined(OS_FREEBSD)
|
||||
@ -16,7 +19,9 @@ uint64_t getThreadId()
|
||||
{
|
||||
if (!current_tid)
|
||||
{
|
||||
#if defined(OS_LINUX)
|
||||
#if defined(OS_ANDROID)
|
||||
current_tid = gettid();
|
||||
#elif defined(OS_LINUX)
|
||||
current_tid = syscall(SYS_gettid); /// This call is always successful. - man gettid
|
||||
#elif defined(OS_FREEBSD)
|
||||
current_tid = pthread_getthreadid_np();
|
||||
|
@ -9,7 +9,6 @@
|
||||
#include <string.h>
|
||||
#include <signal.h>
|
||||
#include <cxxabi.h>
|
||||
#include <execinfo.h>
|
||||
#include <unistd.h>
|
||||
|
||||
#include <typeinfo>
|
||||
|
@ -7,7 +7,7 @@
|
||||
#
|
||||
# Sets values of:
|
||||
# OPENLDAP_FOUND - TRUE if found
|
||||
# OPENLDAP_INCLUDE_DIR - path to the include directory
|
||||
# OPENLDAP_INCLUDE_DIRS - paths to the include directories
|
||||
# OPENLDAP_LIBRARIES - paths to the libldap and liblber libraries
|
||||
# OPENLDAP_LDAP_LIBRARY - paths to the libldap library
|
||||
# OPENLDAP_LBER_LIBRARY - paths to the liblber library
|
||||
@ -28,11 +28,11 @@ if(OPENLDAP_USE_REENTRANT_LIBS)
|
||||
endif()
|
||||
|
||||
if(OPENLDAP_ROOT_DIR)
|
||||
find_path(OPENLDAP_INCLUDE_DIR NAMES "ldap.h" "lber.h" PATHS "${OPENLDAP_ROOT_DIR}" PATH_SUFFIXES "include" NO_DEFAULT_PATH)
|
||||
find_path(OPENLDAP_INCLUDE_DIRS NAMES "ldap.h" "lber.h" PATHS "${OPENLDAP_ROOT_DIR}" PATH_SUFFIXES "include" NO_DEFAULT_PATH)
|
||||
find_library(OPENLDAP_LDAP_LIBRARY NAMES "ldap${_r_suffix}" PATHS "${OPENLDAP_ROOT_DIR}" PATH_SUFFIXES "lib" NO_DEFAULT_PATH)
|
||||
find_library(OPENLDAP_LBER_LIBRARY NAMES "lber" PATHS "${OPENLDAP_ROOT_DIR}" PATH_SUFFIXES "lib" NO_DEFAULT_PATH)
|
||||
else()
|
||||
find_path(OPENLDAP_INCLUDE_DIR NAMES "ldap.h" "lber.h")
|
||||
find_path(OPENLDAP_INCLUDE_DIRS NAMES "ldap.h" "lber.h")
|
||||
find_library(OPENLDAP_LDAP_LIBRARY NAMES "ldap${_r_suffix}")
|
||||
find_library(OPENLDAP_LBER_LIBRARY NAMES "lber")
|
||||
endif()
|
||||
@ -44,10 +44,10 @@ set(OPENLDAP_LIBRARIES ${OPENLDAP_LDAP_LIBRARY} ${OPENLDAP_LBER_LIBRARY})
|
||||
include(FindPackageHandleStandardArgs)
|
||||
find_package_handle_standard_args(
|
||||
OpenLDAP DEFAULT_MSG
|
||||
OPENLDAP_INCLUDE_DIR OPENLDAP_LDAP_LIBRARY OPENLDAP_LBER_LIBRARY
|
||||
OPENLDAP_INCLUDE_DIRS OPENLDAP_LDAP_LIBRARY OPENLDAP_LBER_LIBRARY
|
||||
)
|
||||
|
||||
mark_as_advanced(OPENLDAP_INCLUDE_DIR OPENLDAP_LIBRARIES OPENLDAP_LDAP_LIBRARY OPENLDAP_LBER_LIBRARY)
|
||||
mark_as_advanced(OPENLDAP_INCLUDE_DIRS OPENLDAP_LIBRARIES OPENLDAP_LDAP_LIBRARY OPENLDAP_LBER_LIBRARY)
|
||||
|
||||
if(OPENLDAP_USE_STATIC_LIBS)
|
||||
set(CMAKE_FIND_LIBRARY_SUFFIXES ${_orig_CMAKE_FIND_LIBRARY_SUFFIXES})
|
||||
|
@ -1,4 +1,5 @@
|
||||
SET(ENABLE_AMQPCPP ${ENABLE_LIBRARIES})
|
||||
option(ENABLE_AMQPCPP "Enalbe AMQP-CPP" ${ENABLE_LIBRARIES})
|
||||
|
||||
if (NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/AMQP-CPP/CMakeLists.txt")
|
||||
message (WARNING "submodule contrib/AMQP-CPP is missing. to fix try run: \n git submodule update --init --recursive")
|
||||
set (ENABLE_AMQPCPP 0)
|
||||
|
@ -1,3 +1,7 @@
|
||||
option (ENABLE_GTEST_LIBRARY "Enable gtest library" ${ENABLE_LIBRARIES})
|
||||
|
||||
if (ENABLE_GTEST_LIBRARY)
|
||||
|
||||
option (USE_INTERNAL_GTEST_LIBRARY "Set to FALSE to use system Google Test instead of bundled" ${NOT_UNBUNDLED})
|
||||
|
||||
if (NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/googletest/googletest/CMakeLists.txt")
|
||||
@ -28,4 +32,6 @@ if((GTEST_INCLUDE_DIRS AND GTEST_BOTH_LIBRARIES) OR GTEST_SRC_DIR)
|
||||
set(USE_GTEST 1)
|
||||
endif()
|
||||
|
||||
endif()
|
||||
|
||||
message (STATUS "Using gtest=${USE_GTEST}: ${GTEST_INCLUDE_DIRS} : ${GTEST_BOTH_LIBRARIES} : ${GTEST_SRC_DIR}")
|
||||
|
@ -16,12 +16,17 @@ if (ENABLE_LDAP)
|
||||
set (OPENLDAP_USE_REENTRANT_LIBS 1)
|
||||
|
||||
if (NOT USE_INTERNAL_LDAP_LIBRARY)
|
||||
if (OPENLDAP_USE_STATIC_LIBS)
|
||||
message (WARNING "Unable to use external static OpenLDAP libraries, falling back to the bundled version.")
|
||||
set (USE_INTERNAL_LDAP_LIBRARY 1)
|
||||
else ()
|
||||
if (APPLE AND NOT OPENLDAP_ROOT_DIR)
|
||||
set (OPENLDAP_ROOT_DIR "/usr/local/opt/openldap")
|
||||
endif ()
|
||||
|
||||
find_package (OpenLDAP)
|
||||
endif ()
|
||||
endif ()
|
||||
|
||||
if (NOT OPENLDAP_FOUND AND NOT MISSING_INTERNAL_LDAP_LIBRARY)
|
||||
string (TOLOWER "${CMAKE_SYSTEM_NAME}" _system_name)
|
||||
@ -54,7 +59,10 @@ if (ENABLE_LDAP)
|
||||
else ()
|
||||
set (USE_INTERNAL_LDAP_LIBRARY 1)
|
||||
set (OPENLDAP_ROOT_DIR "${ClickHouse_SOURCE_DIR}/contrib/openldap")
|
||||
set (OPENLDAP_INCLUDE_DIR "${ClickHouse_SOURCE_DIR}/contrib/openldap/include")
|
||||
set (OPENLDAP_INCLUDE_DIRS
|
||||
"${ClickHouse_SOURCE_DIR}/contrib/openldap-cmake/${_system_name}_${_system_processor}/include"
|
||||
"${ClickHouse_SOURCE_DIR}/contrib/openldap/include"
|
||||
)
|
||||
# Below, 'ldap'/'ldap_r' and 'lber' will be resolved to
|
||||
# the targets defined in contrib/openldap-cmake/CMakeLists.txt
|
||||
if (OPENLDAP_USE_REENTRANT_LIBS)
|
||||
@ -73,4 +81,4 @@ if (ENABLE_LDAP)
|
||||
endif ()
|
||||
endif ()
|
||||
|
||||
message (STATUS "Using ldap=${USE_LDAP}: ${OPENLDAP_INCLUDE_DIR} : ${OPENLDAP_LIBRARIES}")
|
||||
message (STATUS "Using ldap=${USE_LDAP}: ${OPENLDAP_INCLUDE_DIRS} : ${OPENLDAP_LIBRARIES}")
|
||||
|
@ -1,3 +1,7 @@
|
||||
option(ENABLE_GSASL_LIBRARY "Enable gsasl library" ${ENABLE_LIBRARIES})
|
||||
|
||||
if (ENABLE_GSASL_LIBRARY)
|
||||
|
||||
option (USE_INTERNAL_LIBGSASL_LIBRARY "Set to FALSE to use system libgsasl library instead of bundled" ${NOT_UNBUNDLED})
|
||||
|
||||
if (NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/libgsasl/src/gsasl.h")
|
||||
@ -24,4 +28,6 @@ if(LIBGSASL_LIBRARY AND LIBGSASL_INCLUDE_DIR)
|
||||
set (USE_LIBGSASL 1)
|
||||
endif()
|
||||
|
||||
endif()
|
||||
|
||||
message (STATUS "Using libgsasl=${USE_LIBGSASL}: ${LIBGSASL_INCLUDE_DIR} : ${LIBGSASL_LIBRARY}")
|
||||
|
@ -1,3 +1,7 @@
|
||||
option (ENABLE_MSGPACK "Enable msgpack library" ${ENABLE_LIBRARIES})
|
||||
|
||||
if (ENABLE_MSGPACK)
|
||||
|
||||
option (USE_INTERNAL_MSGPACK_LIBRARY "Set to FALSE to use system msgpack library instead of bundled" ${NOT_UNBUNDLED})
|
||||
|
||||
if (USE_INTERNAL_MSGPACK_LIBRARY)
|
||||
@ -14,4 +18,10 @@ else()
|
||||
find_path(MSGPACK_INCLUDE_DIR NAMES msgpack.hpp PATHS ${MSGPACK_INCLUDE_PATHS})
|
||||
endif()
|
||||
|
||||
message(STATUS "Using msgpack: ${MSGPACK_INCLUDE_DIR}")
|
||||
if (MSGPACK_INCLUDE_DIR)
|
||||
set(USE_MSGPACK 1)
|
||||
endif()
|
||||
|
||||
endif()
|
||||
|
||||
message(STATUS "Using msgpack=${USE_MSGPACK}: ${MSGPACK_INCLUDE_DIR}")
|
||||
|
@ -1,17 +1,8 @@
|
||||
if (NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/simdjson/include/simdjson/jsonparser.h")
|
||||
if (NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/simdjson/include/simdjson.h")
|
||||
message (WARNING "submodule contrib/simdjson is missing. to fix try run: \n git submodule update --init --recursive")
|
||||
return()
|
||||
endif ()
|
||||
|
||||
if (NOT HAVE_SSE42)
|
||||
message (WARNING "submodule contrib/simdjson requires support of SSE4.2 instructions")
|
||||
return()
|
||||
elseif (NOT HAVE_PCLMULQDQ)
|
||||
message (WARNING "submodule contrib/simdjson requires support of PCLMULQDQ instructions")
|
||||
return()
|
||||
endif ()
|
||||
|
||||
option (USE_SIMDJSON "Use simdjson" ON)
|
||||
set (SIMDJSON_LIBRARY "simdjson")
|
||||
|
||||
message(STATUS "Using simdjson=${USE_SIMDJSON}: ${SIMDJSON_LIBRARY}")
|
||||
message(STATUS "Using simdjson=${USE_SIMDJSON}")
|
||||
|
@ -11,7 +11,12 @@ else ()
|
||||
set (BUILTINS_LIBRARY "-lgcc")
|
||||
endif ()
|
||||
|
||||
if (OS_ANDROID)
|
||||
# pthread and rt are included in libc
|
||||
set (DEFAULT_LIBS "${DEFAULT_LIBS} ${BUILTINS_LIBRARY} ${COVERAGE_OPTION} -lc -lm -ldl")
|
||||
else ()
|
||||
set (DEFAULT_LIBS "${DEFAULT_LIBS} ${BUILTINS_LIBRARY} ${COVERAGE_OPTION} -lc -lm -lrt -lpthread -ldl")
|
||||
endif ()
|
||||
|
||||
message(STATUS "Default libraries: ${DEFAULT_LIBS}")
|
||||
|
||||
@ -35,7 +40,11 @@ add_library(global-libs INTERFACE)
|
||||
set(THREADS_PREFER_PTHREAD_FLAG ON)
|
||||
find_package(Threads REQUIRED)
|
||||
|
||||
if (NOT OS_ANDROID)
|
||||
# Our compatibility layer doesn't build under Android, many errors in musl.
|
||||
add_subdirectory(base/glibc-compatibility)
|
||||
endif ()
|
||||
|
||||
include (cmake/find/unwind.cmake)
|
||||
include (cmake/find/cxx.cmake)
|
||||
|
||||
|
@ -1,6 +1,11 @@
|
||||
if (CMAKE_SYSTEM_NAME MATCHES "Linux")
|
||||
set (OS_LINUX 1)
|
||||
add_definitions(-D OS_LINUX)
|
||||
elseif (CMAKE_SYSTEM_NAME MATCHES "Android")
|
||||
# This is a toy configuration and not in CI, so expect it to be broken.
|
||||
# Use cmake flags such as: -DCMAKE_TOOLCHAIN_FILE=~/ch2/android-ndk-r21d/build/cmake/android.toolchain.cmake -DANDROID_ABI=arm64-v8a -DANDROID_PLATFORM=28
|
||||
set (OS_ANDROID 1)
|
||||
add_definitions(-D OS_ANDROID)
|
||||
elseif (CMAKE_SYSTEM_NAME MATCHES "FreeBSD")
|
||||
set (OS_FREEBSD 1)
|
||||
add_definitions(-D OS_FREEBSD)
|
||||
@ -17,7 +22,7 @@ if (CMAKE_CROSSCOMPILING)
|
||||
set (ENABLE_PARQUET OFF CACHE INTERNAL "")
|
||||
set (ENABLE_ICU OFF CACHE INTERNAL "")
|
||||
set (ENABLE_FASTOPS OFF CACHE INTERNAL "")
|
||||
elseif (OS_LINUX)
|
||||
elseif (OS_LINUX OR OS_ANDROID)
|
||||
if (ARCH_AARCH64)
|
||||
# FIXME: broken dependencies
|
||||
set (ENABLE_PROTOBUF OFF CACHE INTERNAL "")
|
||||
|
@ -22,7 +22,7 @@ elseif (COMPILER_CLANG)
|
||||
if (CMAKE_CXX_COMPILER_VERSION VERSION_LESS ${APPLE_CLANG_MINIMUM_VERSION})
|
||||
message (FATAL_ERROR "AppleClang compiler version must be at least ${APPLE_CLANG_MINIMUM_VERSION} (Xcode ${XCODE_MINIMUM_VERSION}).")
|
||||
elseif (CMAKE_CXX_COMPILER_VERSION VERSION_LESS 11.0.0)
|
||||
# char8_t is available staring (upstream vanilla) Clang 7, but prior to Clang 8,
|
||||
# char8_t is available starting (upstream vanilla) Clang 7, but prior to Clang 8,
|
||||
# it is not enabled by -std=c++20 and can be enabled with an explicit -fchar8_t.
|
||||
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fchar8_t")
|
||||
set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fchar8_t")
|
||||
|
2
contrib/CMakeLists.txt
vendored
2
contrib/CMakeLists.txt
vendored
@ -102,7 +102,7 @@ if (USE_INTERNAL_SSL_LIBRARY)
|
||||
add_library(OpenSSL::SSL ALIAS ${OPENSSL_SSL_LIBRARY})
|
||||
endif ()
|
||||
|
||||
if (ENABLE_LDAP AND USE_INTERNAL_LDAP_LIBRARY)
|
||||
if (USE_INTERNAL_LDAP_LIBRARY)
|
||||
add_subdirectory (openldap-cmake)
|
||||
endif ()
|
||||
|
||||
|
2
contrib/simdjson
vendored
2
contrib/simdjson
vendored
@ -1 +1 @@
|
||||
Subproject commit 560f0742cc0895d00d78359dbdeb82064a24adb8
|
||||
Subproject commit 1e4aa116e5a39e4ba23b9a93e6c7f048c5105b20
|
@ -1,14 +1,6 @@
|
||||
set(SIMDJSON_INCLUDE_DIR "${ClickHouse_SOURCE_DIR}/contrib/simdjson/include")
|
||||
set(SIMDJSON_SRC_DIR "${SIMDJSON_INCLUDE_DIR}/../src")
|
||||
set(SIMDJSON_SRC
|
||||
${SIMDJSON_SRC_DIR}/document.cpp
|
||||
${SIMDJSON_SRC_DIR}/error.cpp
|
||||
${SIMDJSON_SRC_DIR}/implementation.cpp
|
||||
${SIMDJSON_SRC_DIR}/jsonioutil.cpp
|
||||
${SIMDJSON_SRC_DIR}/jsonminifier.cpp
|
||||
${SIMDJSON_SRC_DIR}/stage1_find_marks.cpp
|
||||
${SIMDJSON_SRC_DIR}/stage2_build_tape.cpp
|
||||
)
|
||||
set(SIMDJSON_SRC_DIR "${ClickHouse_SOURCE_DIR}/contrib/simdjson/src")
|
||||
set(SIMDJSON_SRC ${SIMDJSON_SRC_DIR}/simdjson.cpp)
|
||||
|
||||
add_library(${SIMDJSON_LIBRARY} ${SIMDJSON_SRC})
|
||||
target_include_directories(${SIMDJSON_LIBRARY} SYSTEM PUBLIC "${SIMDJSON_INCLUDE_DIR}" PRIVATE "${SIMDJSON_SRC_DIR}")
|
||||
add_library(simdjson ${SIMDJSON_SRC})
|
||||
target_include_directories(simdjson SYSTEM PUBLIC "${SIMDJSON_INCLUDE_DIR}" PRIVATE "${SIMDJSON_SRC_DIR}")
|
||||
|
@ -88,6 +88,10 @@
|
||||
"name": "yandex/clickhouse-testflows-runner",
|
||||
"dependent": []
|
||||
},
|
||||
"docker/test/fasttest": {
|
||||
"name": "yandex/clickhouse-fasttest",
|
||||
"dependent": []
|
||||
},
|
||||
"docker/test/integration/s3_proxy": {
|
||||
"name": "yandex/clickhouse-s3-proxy",
|
||||
"dependent": []
|
||||
@ -95,5 +99,9 @@
|
||||
"docker/test/integration/resolver": {
|
||||
"name": "yandex/clickhouse-python-bottle",
|
||||
"dependent": []
|
||||
},
|
||||
"docker/test/integration/helper_container": {
|
||||
"name": "yandex/clickhouse-integration-helper",
|
||||
"dependent": []
|
||||
}
|
||||
}
|
||||
|
@ -48,8 +48,9 @@ then
|
||||
mkdir /output/ch
|
||||
git -C /output/ch init --bare
|
||||
git -C /output/ch remote add origin /build
|
||||
git -C /output/ch fetch --no-tags --depth 50 origin HEAD
|
||||
git -C /output/ch reset --soft FETCH_HEAD
|
||||
git -C /output/ch fetch --no-tags --depth 50 origin HEAD:pr
|
||||
git -C /output/ch fetch --no-tags --depth 50 origin master:master
|
||||
git -C /output/ch reset --soft pr
|
||||
git -C /output/ch log -5
|
||||
fi
|
||||
|
||||
|
65
docker/test/fasttest/Dockerfile
Normal file
65
docker/test/fasttest/Dockerfile
Normal file
@ -0,0 +1,65 @@
|
||||
# docker build -t yandex/clickhouse-fasttest .
|
||||
FROM ubuntu:19.10
|
||||
|
||||
ARG odbc_driver_url="https://github.com/ClickHouse/clickhouse-odbc/releases/download/v1.1.4.20200302/clickhouse-odbc-1.1.4-Linux.tar.gz"
|
||||
ENV COMMIT_SHA=''
|
||||
ENV PULL_REQUEST_NUMBER=''
|
||||
|
||||
RUN apt-get --allow-unauthenticated update -y && apt-get install --yes wget gnupg
|
||||
RUN wget -O - https://apt.llvm.org/llvm-snapshot.gpg.key | apt-key add -
|
||||
RUN echo "deb [trusted=yes] http://apt.llvm.org/eoan/ llvm-toolchain-eoan-10 main" >> /etc/apt/sources.list
|
||||
|
||||
|
||||
RUN apt-get --allow-unauthenticated update -y \
|
||||
&& env DEBIAN_FRONTEND=noninteractive \
|
||||
apt-get --allow-unauthenticated install --yes --no-install-recommends \
|
||||
bash \
|
||||
fakeroot \
|
||||
ccache \
|
||||
software-properties-common \
|
||||
apt-transport-https \
|
||||
ca-certificates \
|
||||
wget \
|
||||
bash \
|
||||
fakeroot \
|
||||
cmake \
|
||||
ccache \
|
||||
llvm-10 \
|
||||
clang-10 \
|
||||
lld-10 \
|
||||
clang-tidy-10 \
|
||||
ninja-build \
|
||||
gperf \
|
||||
git \
|
||||
tzdata \
|
||||
gperf \
|
||||
rename \
|
||||
build-essential \
|
||||
expect \
|
||||
python \
|
||||
python-lxml \
|
||||
python-termcolor \
|
||||
python-requests \
|
||||
unixodbc \
|
||||
qemu-user-static \
|
||||
sudo \
|
||||
moreutils \
|
||||
curl \
|
||||
brotli
|
||||
|
||||
RUN mkdir -p /tmp/clickhouse-odbc-tmp \
|
||||
&& wget --quiet -O - ${odbc_driver_url} | tar --strip-components=1 -xz -C /tmp/clickhouse-odbc-tmp \
|
||||
&& cp /tmp/clickhouse-odbc-tmp/lib64/*.so /usr/local/lib/ \
|
||||
&& odbcinst -i -d -f /tmp/clickhouse-odbc-tmp/share/doc/clickhouse-odbc/config/odbcinst.ini.sample \
|
||||
&& odbcinst -i -s -l -f /tmp/clickhouse-odbc-tmp/share/doc/clickhouse-odbc/config/odbc.ini.sample \
|
||||
&& rm -rf /tmp/clickhouse-odbc-tmp
|
||||
|
||||
# This symlink required by gcc to find lld compiler
|
||||
RUN ln -s /usr/bin/lld-10 /usr/bin/ld.lld
|
||||
|
||||
ENV TZ=Europe/Moscow
|
||||
RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone
|
||||
|
||||
|
||||
COPY run.sh /
|
||||
CMD ["/bin/bash", "/run.sh"]
|
97
docker/test/fasttest/run.sh
Executable file
97
docker/test/fasttest/run.sh
Executable file
@ -0,0 +1,97 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -x -e
|
||||
|
||||
ls -la
|
||||
|
||||
git clone https://github.com/ClickHouse/ClickHouse.git | ts '%Y-%m-%d %H:%M:%S' | tee /test_output/clone_log.txt
|
||||
cd ClickHouse
|
||||
CLICKHOUSE_DIR=`pwd`
|
||||
|
||||
|
||||
if [ "$PULL_REQUEST_NUMBER" != "0" ]; then
|
||||
if git fetch origin "+refs/pull/$PULL_REQUEST_NUMBER/merge"; then
|
||||
git checkout FETCH_HEAD
|
||||
echo 'Clonned merge head'
|
||||
else
|
||||
git fetch
|
||||
git checkout $COMMIT_SHA
|
||||
echo 'Checked out to commit'
|
||||
fi
|
||||
else
|
||||
if [ "$COMMIT_SHA" != "" ]; then
|
||||
git checkout $COMMIT_SHA
|
||||
fi
|
||||
fi
|
||||
|
||||
SUBMODULES_TO_UPDATE="contrib/boost contrib/zlib-ng contrib/libxml2 contrib/poco contrib/libunwind contrib/ryu contrib/fmtlib contrib/base64 contrib/cctz contrib/libcpuid contrib/double-conversion contrib/libcxx contrib/libcxxabi contrib/libc-headers contrib/lz4 contrib/zstd contrib/fastops contrib/rapidjson contrib/re2 contrib/sparsehash-c11"
|
||||
|
||||
git submodule update --init --recursive $SUBMODULES_TO_UPDATE | ts '%Y-%m-%d %H:%M:%S' | tee /test_output/submodule_log.txt
|
||||
|
||||
export CMAKE_LIBS_CONFIG="-DENABLE_LIBRARIES=0 -DENABLE_TESTS=0 -DENABLE_UTILS=0 -DENABLE_EMBEDDED_COMPILER=0 -DENABLE_THINLTO=0 -DUSE_UNWIND=1"
|
||||
|
||||
export CCACHE_DIR=/ccache
|
||||
export CCACHE_BASEDIR=/ClickHouse
|
||||
export CCACHE_NOHASHDIR=true
|
||||
export CCACHE_COMPILERCHECK=content
|
||||
export CCACHE_MAXSIZE=15G
|
||||
|
||||
ccache --show-stats ||:
|
||||
ccache --zero-stats ||:
|
||||
|
||||
mkdir build
|
||||
cd build
|
||||
CLICKHOUSE_BUILD_DIR=`pwd`
|
||||
cmake .. -DCMAKE_INSTALL_PREFIX=/usr -DCMAKE_CXX_COMPILER=clang++-10 -DCMAKE_C_COMPILER=clang-10 $CMAKE_LIBS_CONFIG | ts '%Y-%m-%d %H:%M:%S' | tee /test_output/cmake_log.txt
|
||||
ninja clickhouse-bundle | ts '%Y-%m-%d %H:%M:%S' | tee /test_output/build_log.txt
|
||||
ninja install | ts '%Y-%m-%d %H:%M:%S' | tee /test_output/install_log.txt
|
||||
|
||||
|
||||
ccache --show-stats ||:
|
||||
|
||||
mkdir -p /etc/clickhouse-server
|
||||
mkdir -p /etc/clickhouse-client
|
||||
mkdir -p /etc/clickhouse-server/config.d
|
||||
mkdir -p /etc/clickhouse-server/users.d
|
||||
mkdir -p /var/log/clickhouse-server
|
||||
cp $CLICKHOUSE_DIR/programs/server/config.xml /etc/clickhouse-server/
|
||||
cp $CLICKHOUSE_DIR/programs/server/users.xml /etc/clickhouse-server/
|
||||
|
||||
mkdir -p /etc/clickhouse-server/dict_examples
|
||||
ln -s /usr/share/clickhouse-test/config/ints_dictionary.xml /etc/clickhouse-server/dict_examples/
|
||||
ln -s /usr/share/clickhouse-test/config/strings_dictionary.xml /etc/clickhouse-server/dict_examples/
|
||||
ln -s /usr/share/clickhouse-test/config/decimals_dictionary.xml /etc/clickhouse-server/dict_examples/
|
||||
ln -s /usr/share/clickhouse-test/config/zookeeper.xml /etc/clickhouse-server/config.d/
|
||||
ln -s /usr/share/clickhouse-test/config/listen.xml /etc/clickhouse-server/config.d/
|
||||
ln -s /usr/share/clickhouse-test/config/part_log.xml /etc/clickhouse-server/config.d/
|
||||
ln -s /usr/share/clickhouse-test/config/text_log.xml /etc/clickhouse-server/config.d/
|
||||
ln -s /usr/share/clickhouse-test/config/metric_log.xml /etc/clickhouse-server/config.d/
|
||||
ln -s /usr/share/clickhouse-test/config/query_masking_rules.xml /etc/clickhouse-server/config.d/
|
||||
ln -s /usr/share/clickhouse-test/config/log_queries.xml /etc/clickhouse-server/users.d/
|
||||
ln -s /usr/share/clickhouse-test/config/readonly.xml /etc/clickhouse-server/users.d/
|
||||
ln -s /usr/share/clickhouse-test/config/access_management.xml /etc/clickhouse-server/users.d/
|
||||
ln -s /usr/share/clickhouse-test/config/ints_dictionary.xml /etc/clickhouse-server/
|
||||
ln -s /usr/share/clickhouse-test/config/strings_dictionary.xml /etc/clickhouse-server/
|
||||
ln -s /usr/share/clickhouse-test/config/decimals_dictionary.xml /etc/clickhouse-server/
|
||||
ln -s /usr/share/clickhouse-test/config/macros.xml /etc/clickhouse-server/config.d/
|
||||
ln -s /usr/share/clickhouse-test/config/disks.xml /etc/clickhouse-server/config.d/
|
||||
#ln -s /usr/share/clickhouse-test/config/secure_ports.xml /etc/clickhouse-server/config.d/
|
||||
ln -s /usr/share/clickhouse-test/config/clusters.xml /etc/clickhouse-server/config.d/
|
||||
ln -s /usr/share/clickhouse-test/config/graphite.xml /etc/clickhouse-server/config.d/
|
||||
ln -s /usr/share/clickhouse-test/config/server.key /etc/clickhouse-server/
|
||||
ln -s /usr/share/clickhouse-test/config/server.crt /etc/clickhouse-server/
|
||||
ln -s /usr/share/clickhouse-test/config/dhparam.pem /etc/clickhouse-server/
|
||||
ln -sf /usr/share/clickhouse-test/config/client_config.xml /etc/clickhouse-client/config.xml
|
||||
|
||||
clickhouse-server --config /etc/clickhouse-server/config.xml --daemon
|
||||
|
||||
until clickhouse-client --query "SELECT 1"
|
||||
do
|
||||
sleep 0.1
|
||||
done
|
||||
|
||||
TESTS_TO_SKIP="parquet avro h3 odbc mysql sha256 _orc_ arrow 01098_temporary_and_external_tables 01083_expressions_in_engine_arguments hdfs 00911_tautological_compare protobuf capnproto java_hash hashing secure 00490_special_line_separators_and_characters_outside_of_bmp 00436_convert_charset 00105_shard_collations 01354_order_by_tuple_collate_const 01292_create_user 01098_msgpack_format 00929_multi_match_edit_distance 00926_multimatch 00834_cancel_http_readonly_queries_on_client_close brotli parallel_alter 00302_http_compression 00417_kill_query 01294_lazy_database_concurrent 01193_metadata_loading base64 01031_mutations_interpreter_and_context json client 01305_replica_create_drop_zookeeper 01092_memory_profiler 01355_ilike 01281_unsucceeded_insert_select_queries_counter live_view limit_memory memory_limit memory_leak 00110_external_sort 00682_empty_parts_merge 00701_rollup 00109_shard_totals_after_having ddl_dictionaries 01251_dict_is_in_infinite_loop 01259_dictionary_custom_settings_ddl 01268_dictionary_direct_layout 01280_ssd_complex_key_dictionary 00652_replicated_mutations_zookeeper"
|
||||
|
||||
clickhouse-test -j 4 --no-long --testname --shard --zookeeper --skip $TESTS_TO_SKIP 2>&1 | ts '%Y-%m-%d %H:%M:%S' | tee /test_output/test_log.txt
|
||||
|
||||
mv /var/log/clickhouse-server/* /test_output
|
@ -37,6 +37,8 @@ function download
|
||||
|
||||
wget -nv -nd -c "https://clickhouse-builds.s3.yandex.net/$PR_TO_TEST/$SHA_TO_TEST/clickhouse_build_check/clang-10_debug_none_bundled_unsplitted_disable_False_binary/clickhouse"
|
||||
chmod +x clickhouse
|
||||
ln -s ./clickhouse ./clickhouse-server
|
||||
ln -s ./clickhouse ./clickhouse-client
|
||||
}
|
||||
|
||||
function configure
|
||||
@ -45,7 +47,8 @@ function configure
|
||||
mkdir db ||:
|
||||
cp -av "$repo_dir"/programs/server/config* db
|
||||
cp -av "$repo_dir"/programs/server/user* db
|
||||
cp -av "$repo_dir"/tests/config db/config.d
|
||||
# TODO figure out which ones are needed
|
||||
cp -av "$repo_dir"/tests/config/listen.xml db/config.d
|
||||
cp -av "$script_dir"/query-fuzzer-tweaks-users.xml db/users.d
|
||||
}
|
||||
|
||||
@ -54,31 +57,55 @@ function watchdog
|
||||
sleep 3600
|
||||
|
||||
echo "Fuzzing run has timed out"
|
||||
./clickhouse client --query "select elapsed, query from system.processes" ||:
|
||||
killall -9 clickhouse clickhouse-server clickhouse-client ||:
|
||||
killall clickhouse-client ||:
|
||||
for x in {1..10}
|
||||
do
|
||||
if ! pgrep -f clickhouse-client
|
||||
then
|
||||
break
|
||||
fi
|
||||
sleep 1
|
||||
done
|
||||
|
||||
killall -9 clickhouse-client ||:
|
||||
}
|
||||
|
||||
function fuzz
|
||||
{
|
||||
./clickhouse server --config-file db/config.xml -- --path db 2>&1 | tail -100000 > server.log &
|
||||
./clickhouse-server --config-file db/config.xml -- --path db 2>&1 | tail -10000 > server.log &
|
||||
server_pid=$!
|
||||
kill -0 $server_pid
|
||||
while ! ./clickhouse client --query "select 1" && kill -0 $server_pid ; do echo . ; sleep 1 ; done
|
||||
./clickhouse client --query "select 1"
|
||||
while ! ./clickhouse-client --query "select 1" && kill -0 $server_pid ; do echo . ; sleep 1 ; done
|
||||
./clickhouse-client --query "select 1"
|
||||
kill -0 $server_pid
|
||||
echo Server started
|
||||
|
||||
fuzzer_exit_code=0
|
||||
./clickhouse client --query-fuzzer-runs=1000 \
|
||||
./clickhouse-client --query-fuzzer-runs=1000 \
|
||||
< <(for f in $(ls ch/tests/queries/0_stateless/*.sql | sort -R); do cat "$f"; echo ';'; done) \
|
||||
> >(tail -100000 > fuzzer.log) \
|
||||
> >(tail -10000 > fuzzer.log) \
|
||||
2>&1 \
|
||||
|| fuzzer_exit_code=$?
|
||||
|
||||
echo "Fuzzer exit code is $fuzzer_exit_code"
|
||||
./clickhouse client --query "select elapsed, query from system.processes" ||:
|
||||
kill -9 $server_pid ||:
|
||||
return $fuzzer_exit_code
|
||||
|
||||
./clickhouse-client --query "select elapsed, query from system.processes" ||:
|
||||
killall clickhouse-server ||:
|
||||
for x in {1..10}
|
||||
do
|
||||
if ! pgrep -f clickhouse-server
|
||||
then
|
||||
break
|
||||
fi
|
||||
sleep 1
|
||||
done
|
||||
killall -9 clickhouse-server ||:
|
||||
|
||||
if [ "$fuzzer_exit_code" == "143" ]
|
||||
then
|
||||
# Killed by watchdog, meaning, no errors.
|
||||
fuzzer_exit_code=0
|
||||
fi
|
||||
}
|
||||
|
||||
case "$stage" in
|
||||
@ -106,11 +133,19 @@ case "$stage" in
|
||||
time configure
|
||||
;&
|
||||
"fuzz")
|
||||
# Start a watchdog that should kill the fuzzer on timeout.
|
||||
# The shell won't kill the child sleep when we kill it, so we have to put it
|
||||
# into a separate process group so that we can kill them all.
|
||||
set -m
|
||||
watchdog &
|
||||
watchdog_pid=$!
|
||||
set +m
|
||||
# Check that the watchdog has started
|
||||
kill -0 $watchdog_pid
|
||||
|
||||
fuzzer_exit_code=0
|
||||
time fuzz || fuzzer_exit_code=$?
|
||||
kill $watchdog_pid ||:
|
||||
kill -- -$watchdog_pid ||:
|
||||
|
||||
# Debug
|
||||
date
|
||||
@ -118,6 +153,19 @@ case "$stage" in
|
||||
jobs
|
||||
pstree -aspgT
|
||||
|
||||
# Make files with status and description we'll show for this check on Github
|
||||
if [ "$fuzzer_exit_code" == 0 ]
|
||||
then
|
||||
echo "OK" > description.txt
|
||||
echo "success" > status.txt
|
||||
else
|
||||
echo "failure" > status.txt
|
||||
if ! grep -a "Received signal \|Logical error" server.log > description.txt
|
||||
then
|
||||
echo "Fuzzer exit code $fuzzer_exit_code. See the logs" > description.txt
|
||||
fi
|
||||
fi
|
||||
|
||||
exit $fuzzer_exit_code
|
||||
;&
|
||||
esac
|
||||
|
@ -1,3 +1,4 @@
|
||||
# docker build -t yandex/clickhouse-integration-helper .
|
||||
# Helper docker container to run iptables without sudo
|
||||
|
||||
FROM alpine
|
||||
|
@ -11,6 +11,7 @@ services:
|
||||
environment:
|
||||
MINIO_ACCESS_KEY: minio
|
||||
MINIO_SECRET_KEY: minio123
|
||||
MINIO_PROMETHEUS_AUTH_TYPE: public
|
||||
command: server --address :9001 --certs-dir /certs /data1-1
|
||||
depends_on:
|
||||
- proxy1
|
||||
|
@ -17,6 +17,7 @@ RUN apt-get update \
|
||||
libc6-dbg \
|
||||
moreutils \
|
||||
ncdu \
|
||||
numactl \
|
||||
p7zip-full \
|
||||
parallel \
|
||||
psmisc \
|
||||
|
@ -317,9 +317,11 @@ create view right_query_log as select *
|
||||
'$(cat "right-query-log.tsv.columns")');
|
||||
|
||||
create view query_logs as
|
||||
select *, 0 version from left_query_log
|
||||
select 0 version, query_id, ProfileEvents.Names, ProfileEvents.Values,
|
||||
query_duration_ms from left_query_log
|
||||
union all
|
||||
select *, 1 version from right_query_log
|
||||
select 1 version, query_id, ProfileEvents.Names, ProfileEvents.Values,
|
||||
query_duration_ms from right_query_log
|
||||
;
|
||||
|
||||
-- This is a single source of truth on all metrics we have for query runs. The
|
||||
@ -498,7 +500,8 @@ create table queries engine File(TSVWithNamesAndTypes, 'report/queries.tsv')
|
||||
|
||||
left, right, diff, stat_threshold,
|
||||
if(report_threshold > 0, report_threshold, 0.10) as report_threshold,
|
||||
test, query_index, query_display_name
|
||||
query_metric_stats.test test, query_metric_stats.query_index query_index,
|
||||
query_display_name
|
||||
from query_metric_stats
|
||||
left join file('analyze/report-thresholds.tsv', TSV,
|
||||
'test text, report_threshold float') thresholds
|
||||
@ -666,7 +669,8 @@ create view query_display_names as select * from
|
||||
|
||||
create table unstable_query_runs engine File(TSVWithNamesAndTypes,
|
||||
'unstable-query-runs.$version.rep') as
|
||||
select test, query_index, query_display_name, query_id
|
||||
select query_runs.test test, query_runs.query_index query_index,
|
||||
query_display_name, query_id
|
||||
from query_runs
|
||||
join queries_for_flamegraph on
|
||||
query_runs.test = queries_for_flamegraph.test
|
||||
|
@ -6,7 +6,6 @@ trap 'kill $(jobs -pr) ||:' EXIT
|
||||
|
||||
mkdir db0 ||:
|
||||
mkdir left ||:
|
||||
mkdir right ||:
|
||||
|
||||
left_pr=$1
|
||||
left_sha=$2
|
||||
@ -24,7 +23,7 @@ dataset_paths["values"]="https://clickhouse-datasets.s3.yandex.net/values_with_e
|
||||
|
||||
function download
|
||||
{
|
||||
# Historically there were various path for the performance test package.
|
||||
# Historically there were various paths for the performance test package.
|
||||
# Test all of them.
|
||||
for path in "https://clickhouse-builds.s3.yandex.net/$left_pr/$left_sha/"{,clickhouse_build_check/}"performance/performance.tgz"
|
||||
do
|
||||
@ -34,22 +33,13 @@ function download
|
||||
fi
|
||||
done
|
||||
|
||||
for path in "https://clickhouse-builds.s3.yandex.net/$right_pr/$right_sha/"{,clickhouse_build_check/}"performance/performance.tgz"
|
||||
do
|
||||
if curl --fail --head "$path"
|
||||
then
|
||||
right_path="$path"
|
||||
fi
|
||||
done
|
||||
|
||||
# might have the same version on left and right
|
||||
if ! [ "$left_path" = "$right_path" ]
|
||||
# Might have the same version on left and right (for testing).
|
||||
if ! [ "$left_sha" = "$right_sha" ]
|
||||
then
|
||||
wget -nv -nd -c "$left_path" -O- | tar -C left --strip-components=1 -zxv &
|
||||
wget -nv -nd -c "$right_path" -O- | tar -C right --strip-components=1 -zxv &
|
||||
else
|
||||
mkdir right ||:
|
||||
wget -nv -nd -c "$left_path" -O- | tar -C left --strip-components=1 -zxv && cp -a left/* right &
|
||||
mkdir left ||:
|
||||
cp -a right/* left &
|
||||
fi
|
||||
|
||||
for dataset_name in $datasets
|
||||
|
@ -1,38 +1,23 @@
|
||||
#!/bin/bash
|
||||
set -ex
|
||||
|
||||
chown nobody workspace output
|
||||
chgrp nogroup workspace output
|
||||
chmod 777 workspace output
|
||||
|
||||
cd workspace
|
||||
|
||||
# Fetch the repository to find and describe the compared revisions.
|
||||
rm -rf ch ||:
|
||||
time git clone --depth 50 --bare https://github.com/ClickHouse/ClickHouse ch
|
||||
git -C ch fetch origin "$SHA_TO_TEST"
|
||||
|
||||
# Use the packaged repository to find the revision we will compare to.
|
||||
function find_reference_sha
|
||||
{
|
||||
# If not master, try to fetch pull/.../{head,merge}
|
||||
if [ "$PR_TO_TEST" != "0" ]
|
||||
then
|
||||
git -C ch fetch origin "refs/pull/$PR_TO_TEST/*:refs/heads/pull/$PR_TO_TEST/*"
|
||||
fi
|
||||
|
||||
# Go back from the revision to be tested, trying to find the closest published
|
||||
# testing release.
|
||||
start_ref="$SHA_TO_TEST"~
|
||||
# If we are testing a PR, and it merges with master successfully, we are
|
||||
# building and testing not the nominal last SHA specified by pull/.../head
|
||||
# and SHA_TO_TEST, but a revision that is merged with recent master, given
|
||||
# by pull/.../merge ref.
|
||||
# Master is the first parent of the pull/.../merge.
|
||||
if git -C ch rev-parse "pull/$PR_TO_TEST/merge"
|
||||
# testing release. The PR branch may be either pull/*/head which is the
|
||||
# author's branch, or pull/*/merge, which is head merged with some master
|
||||
# automatically by Github. We will use a merge base with master as a reference
|
||||
# for tesing (or some older commit). A caveat is that if we're testing the
|
||||
# master, the merge base is the tested commit itself, so we have to step back
|
||||
# once.
|
||||
start_ref=$(git -C ch merge-base origin/master pr)
|
||||
if [ "PR_TO_TEST" == "0" ]
|
||||
then
|
||||
start_ref="pull/$PR_TO_TEST/merge~"
|
||||
start_ref=$start_ref~
|
||||
fi
|
||||
|
||||
# Loop back to find a commit that actually has a published perf test package.
|
||||
while :
|
||||
do
|
||||
# FIXME the original idea was to compare to a closest testing tag, which
|
||||
@ -48,10 +33,10 @@ function find_reference_sha
|
||||
# dereference the tag to get the commit it points to, hence the '~0' thing.
|
||||
REF_SHA=$(git -C ch rev-parse "$ref_tag~0")
|
||||
|
||||
# FIXME sometimes we have testing tags on commits without published builds --
|
||||
# normally these are documentation commits. Loop to skip them.
|
||||
# Historically there were various path for the performance test package.
|
||||
# Test all of them.
|
||||
# FIXME sometimes we have testing tags on commits without published builds.
|
||||
# Normally these are documentation commits. Loop to skip them.
|
||||
# Historically there were various path for the performance test package,
|
||||
# test all of them.
|
||||
unset found
|
||||
for path in "https://clickhouse-builds.s3.yandex.net/0/$REF_SHA/"{,clickhouse_build_check/}"performance/performance.tgz"
|
||||
do
|
||||
@ -69,6 +54,24 @@ function find_reference_sha
|
||||
REF_PR=0
|
||||
}
|
||||
|
||||
chown nobody workspace output
|
||||
chgrp nogroup workspace output
|
||||
chmod 777 workspace output
|
||||
|
||||
cd workspace
|
||||
|
||||
# Download the package for the version we are going to test
|
||||
for path in "https://clickhouse-builds.s3.yandex.net/$PR_TO_TEST/$SHA_TO_TEST/"{,clickhouse_build_check/}"performance/performance.tgz"
|
||||
do
|
||||
if curl --fail --head "$path"
|
||||
then
|
||||
right_path="$path"
|
||||
fi
|
||||
done
|
||||
|
||||
mkdir right
|
||||
wget -nv -nd -c "$right_path" -O- | tar -C right --strip-components=1 -zxv
|
||||
|
||||
# Find reference revision if not specified explicitly
|
||||
if [ "$REF_SHA" == "" ]; then find_reference_sha; fi
|
||||
if [ "$REF_SHA" == "" ]; then echo Reference SHA is not specified ; exit 1 ; fi
|
||||
|
@ -55,18 +55,21 @@ ln -s /usr/share/clickhouse-test/config/ints_dictionary.xml /etc/clickhouse-serv
|
||||
ln -s /usr/share/clickhouse-test/config/strings_dictionary.xml /etc/clickhouse-server/dict_examples/; \
|
||||
ln -s /usr/share/clickhouse-test/config/decimals_dictionary.xml /etc/clickhouse-server/dict_examples/;
|
||||
|
||||
ln -s /usr/share/clickhouse-test/config/zookeeper.xml /etc/clickhouse-server/config.d/; \
|
||||
ln -s /usr/share/clickhouse-test/config/listen.xml /etc/clickhouse-server/config.d/; \
|
||||
ln -s /usr/share/clickhouse-test/config/part_log.xml /etc/clickhouse-server/config.d/; \
|
||||
ln -s /usr/share/clickhouse-test/config/text_log.xml /etc/clickhouse-server/config.d/; \
|
||||
ln -s /usr/share/clickhouse-test/config/metric_log.xml /etc/clickhouse-server/config.d/; \
|
||||
ln -s /usr/share/clickhouse-test/config/query_masking_rules.xml /etc/clickhouse-server/config.d/; \
|
||||
ln -s /usr/share/clickhouse-test/config/log_queries.xml /etc/clickhouse-server/users.d/; \
|
||||
ln -s /usr/share/clickhouse-test/config/readonly.xml /etc/clickhouse-server/users.d/; \
|
||||
ln -s /usr/share/clickhouse-test/config/ints_dictionary.xml /etc/clickhouse-server/; \
|
||||
ln -s /usr/share/clickhouse-test/config/strings_dictionary.xml /etc/clickhouse-server/; \
|
||||
ln -s /usr/share/clickhouse-test/config/decimals_dictionary.xml /etc/clickhouse-server/; \
|
||||
ln -s /usr/share/clickhouse-test/config/macros.xml /etc/clickhouse-server/config.d/;
|
||||
ln -s /usr/share/clickhouse-test/config/zookeeper.xml /etc/clickhouse-server/config.d/
|
||||
ln -s /usr/share/clickhouse-test/config/listen.xml /etc/clickhouse-server/config.d/
|
||||
ln -s /usr/share/clickhouse-test/config/part_log.xml /etc/clickhouse-server/config.d/
|
||||
ln -s /usr/share/clickhouse-test/config/text_log.xml /etc/clickhouse-server/config.d/
|
||||
ln -s /usr/share/clickhouse-test/config/metric_log.xml /etc/clickhouse-server/config.d/
|
||||
ln -s /usr/share/clickhouse-test/config/log_queries.xml /etc/clickhouse-server/users.d/
|
||||
ln -s /usr/share/clickhouse-test/config/readonly.xml /etc/clickhouse-server/users.d/
|
||||
ln -s /usr/share/clickhouse-test/config/ints_dictionary.xml /etc/clickhouse-server/
|
||||
ln -s /usr/share/clickhouse-test/config/strings_dictionary.xml /etc/clickhouse-server/
|
||||
ln -s /usr/share/clickhouse-test/config/decimals_dictionary.xml /etc/clickhouse-server/
|
||||
ln -s /usr/share/clickhouse-test/config/macros.xml /etc/clickhouse-server/config.d/
|
||||
|
||||
# Retain any pre-existing config and allow ClickHouse to load those if required
|
||||
ln -s --backup=simple --suffix=_original.xml \
|
||||
/usr/share/clickhouse-test/config/query_masking_rules.xml /etc/clickhouse-server/config.d/
|
||||
|
||||
|
||||
service zookeeper start
|
||||
|
@ -17,7 +17,6 @@ ln -s /usr/share/clickhouse-test/config/listen.xml /etc/clickhouse-server/config
|
||||
ln -s /usr/share/clickhouse-test/config/part_log.xml /etc/clickhouse-server/config.d/
|
||||
ln -s /usr/share/clickhouse-test/config/text_log.xml /etc/clickhouse-server/config.d/
|
||||
ln -s /usr/share/clickhouse-test/config/metric_log.xml /etc/clickhouse-server/config.d/
|
||||
ln -s /usr/share/clickhouse-test/config/query_masking_rules.xml /etc/clickhouse-server/config.d/
|
||||
ln -s /usr/share/clickhouse-test/config/log_queries.xml /etc/clickhouse-server/users.d/
|
||||
ln -s /usr/share/clickhouse-test/config/readonly.xml /etc/clickhouse-server/users.d/
|
||||
ln -s /usr/share/clickhouse-test/config/access_management.xml /etc/clickhouse-server/users.d/
|
||||
@ -33,6 +32,10 @@ ln -s /usr/share/clickhouse-test/config/server.key /etc/clickhouse-server/
|
||||
ln -s /usr/share/clickhouse-test/config/server.crt /etc/clickhouse-server/
|
||||
ln -s /usr/share/clickhouse-test/config/dhparam.pem /etc/clickhouse-server/
|
||||
|
||||
# Retain any pre-existing config and allow ClickHouse to load it if required
|
||||
ln -s --backup=simple --suffix=_original.xml \
|
||||
/usr/share/clickhouse-test/config/query_masking_rules.xml /etc/clickhouse-server/config.d/
|
||||
|
||||
if [[ -n "$USE_POLYMORPHIC_PARTS" ]] && [[ "$USE_POLYMORPHIC_PARTS" -eq 1 ]]; then
|
||||
ln -s /usr/share/clickhouse-test/config/polymorphic_parts.xml /etc/clickhouse-server/config.d/
|
||||
fi
|
||||
|
@ -46,28 +46,31 @@ ln -s /usr/share/clickhouse-test/config/ints_dictionary.xml /etc/clickhouse-serv
|
||||
ln -s /usr/share/clickhouse-test/config/strings_dictionary.xml /etc/clickhouse-server/dict_examples/; \
|
||||
ln -s /usr/share/clickhouse-test/config/decimals_dictionary.xml /etc/clickhouse-server/dict_examples/;
|
||||
|
||||
ln -s /usr/share/clickhouse-test/config/zookeeper.xml /etc/clickhouse-server/config.d/; \
|
||||
ln -s /usr/share/clickhouse-test/config/listen.xml /etc/clickhouse-server/config.d/; \
|
||||
ln -s /usr/share/clickhouse-test/config/part_log.xml /etc/clickhouse-server/config.d/; \
|
||||
ln -s /usr/share/clickhouse-test/config/text_log.xml /etc/clickhouse-server/config.d/; \
|
||||
ln -s /usr/share/clickhouse-test/config/metric_log.xml /etc/clickhouse-server/config.d/; \
|
||||
ln -s /usr/share/clickhouse-test/config/query_masking_rules.xml /etc/clickhouse-server/config.d/; \
|
||||
ln -s /usr/share/clickhouse-test/config/log_queries.xml /etc/clickhouse-server/users.d/; \
|
||||
ln -s /usr/share/clickhouse-test/config/readonly.xml /etc/clickhouse-server/users.d/; \
|
||||
ln -s /usr/share/clickhouse-test/config/access_management.xml /etc/clickhouse-server/users.d/; \
|
||||
ln -s /usr/share/clickhouse-test/config/ints_dictionary.xml /etc/clickhouse-server/; \
|
||||
ln -s /usr/share/clickhouse-test/config/strings_dictionary.xml /etc/clickhouse-server/; \
|
||||
ln -s /usr/share/clickhouse-test/config/decimals_dictionary.xml /etc/clickhouse-server/; \
|
||||
ln -s /usr/share/clickhouse-test/config/macros.xml /etc/clickhouse-server/config.d/; \
|
||||
ln -s /usr/share/clickhouse-test/config/disks.xml /etc/clickhouse-server/config.d/; \
|
||||
ln -s /usr/share/clickhouse-test/config/secure_ports.xml /etc/clickhouse-server/config.d/; \
|
||||
ln -s /usr/share/clickhouse-test/config/clusters.xml /etc/clickhouse-server/config.d/; \
|
||||
ln -s /usr/share/clickhouse-test/config/graphite.xml /etc/clickhouse-server/config.d/; \
|
||||
ln -s /usr/share/clickhouse-test/config/server.key /etc/clickhouse-server/; \
|
||||
ln -s /usr/share/clickhouse-test/config/server.crt /etc/clickhouse-server/; \
|
||||
ln -s /usr/share/clickhouse-test/config/dhparam.pem /etc/clickhouse-server/; \
|
||||
ln -s /usr/share/clickhouse-test/config/zookeeper.xml /etc/clickhouse-server/config.d/
|
||||
ln -s /usr/share/clickhouse-test/config/listen.xml /etc/clickhouse-server/config.d/
|
||||
ln -s /usr/share/clickhouse-test/config/part_log.xml /etc/clickhouse-server/config.d/
|
||||
ln -s /usr/share/clickhouse-test/config/text_log.xml /etc/clickhouse-server/config.d/
|
||||
ln -s /usr/share/clickhouse-test/config/metric_log.xml /etc/clickhouse-server/config.d/
|
||||
ln -s /usr/share/clickhouse-test/config/log_queries.xml /etc/clickhouse-server/users.d/
|
||||
ln -s /usr/share/clickhouse-test/config/readonly.xml /etc/clickhouse-server/users.d/
|
||||
ln -s /usr/share/clickhouse-test/config/access_management.xml /etc/clickhouse-server/users.d/
|
||||
ln -s /usr/share/clickhouse-test/config/ints_dictionary.xml /etc/clickhouse-server/
|
||||
ln -s /usr/share/clickhouse-test/config/strings_dictionary.xml /etc/clickhouse-server/
|
||||
ln -s /usr/share/clickhouse-test/config/decimals_dictionary.xml /etc/clickhouse-server/
|
||||
ln -s /usr/share/clickhouse-test/config/macros.xml /etc/clickhouse-server/config.d/
|
||||
ln -s /usr/share/clickhouse-test/config/disks.xml /etc/clickhouse-server/config.d/
|
||||
ln -s /usr/share/clickhouse-test/config/secure_ports.xml /etc/clickhouse-server/config.d/
|
||||
ln -s /usr/share/clickhouse-test/config/clusters.xml /etc/clickhouse-server/config.d/
|
||||
ln -s /usr/share/clickhouse-test/config/graphite.xml /etc/clickhouse-server/config.d/
|
||||
ln -s /usr/share/clickhouse-test/config/server.key /etc/clickhouse-server/
|
||||
ln -s /usr/share/clickhouse-test/config/server.crt /etc/clickhouse-server/
|
||||
ln -s /usr/share/clickhouse-test/config/dhparam.pem /etc/clickhouse-server/
|
||||
ln -sf /usr/share/clickhouse-test/config/client_config.xml /etc/clickhouse-client/config.xml
|
||||
|
||||
# Retain any pre-existing config and allow ClickHouse to load it if required
|
||||
ln -s --backup=simple --suffix=_original.xml \
|
||||
/usr/share/clickhouse-test/config/query_masking_rules.xml /etc/clickhouse-server/config.d/
|
||||
|
||||
service zookeeper start
|
||||
sleep 5
|
||||
|
||||
|
@ -23,28 +23,7 @@ RUN apt-get update -y \
|
||||
brotli
|
||||
|
||||
COPY ./stress /stress
|
||||
COPY run.sh /
|
||||
|
||||
ENV DATASETS="hits visits"
|
||||
|
||||
CMD dpkg -i package_folder/clickhouse-common-static_*.deb; \
|
||||
dpkg -i package_folder/clickhouse-common-static-dbg_*.deb; \
|
||||
dpkg -i package_folder/clickhouse-server_*.deb; \
|
||||
dpkg -i package_folder/clickhouse-client_*.deb; \
|
||||
dpkg -i package_folder/clickhouse-test_*.deb; \
|
||||
ln -s /usr/share/clickhouse-test/config/log_queries.xml /etc/clickhouse-server/users.d/; \
|
||||
ln -s /usr/share/clickhouse-test/config/part_log.xml /etc/clickhouse-server/config.d/; \
|
||||
echo "TSAN_OPTIONS='halt_on_error=1 history_size=7 ignore_noninstrumented_modules=1 verbosity=1'" >> /etc/environment; \
|
||||
echo "UBSAN_OPTIONS='print_stacktrace=1'" >> /etc/environment; \
|
||||
echo "ASAN_OPTIONS='malloc_context_size=10 verbosity=1 allocator_release_to_os_interval_ms=10000'" >> /etc/environment; \
|
||||
service clickhouse-server start && sleep 5 \
|
||||
&& /s3downloader --dataset-names $DATASETS \
|
||||
&& chmod 777 -R /var/lib/clickhouse \
|
||||
&& clickhouse-client --query "ATTACH DATABASE IF NOT EXISTS datasets ENGINE = Ordinary" \
|
||||
&& clickhouse-client --query "CREATE DATABASE IF NOT EXISTS test" \
|
||||
&& service clickhouse-server restart && sleep 5 \
|
||||
&& clickhouse-client --query "SHOW TABLES FROM datasets" \
|
||||
&& clickhouse-client --query "SHOW TABLES FROM test" \
|
||||
&& clickhouse-client --query "RENAME TABLE datasets.hits_v1 TO test.hits" \
|
||||
&& clickhouse-client --query "RENAME TABLE datasets.visits_v1 TO test.visits" \
|
||||
&& clickhouse-client --query "SHOW TABLES FROM test" \
|
||||
&& ./stress --output-folder test_output --skip-func-tests "$SKIP_TESTS_OPTION"
|
||||
CMD ["/bin/bash", "/run.sh"]
|
||||
|
56
docker/test/stress/run.sh
Executable file
56
docker/test/stress/run.sh
Executable file
@ -0,0 +1,56 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -x
|
||||
|
||||
dpkg -i package_folder/clickhouse-common-static_*.deb
|
||||
dpkg -i package_folder/clickhouse-common-static-dbg_*.deb
|
||||
dpkg -i package_folder/clickhouse-server_*.deb
|
||||
dpkg -i package_folder/clickhouse-client_*.deb
|
||||
dpkg -i package_folder/clickhouse-test_*.deb
|
||||
|
||||
function wait_server()
|
||||
{
|
||||
counter=0
|
||||
until clickhouse-client --query "SELECT 1"
|
||||
do
|
||||
if [ "$counter" -gt 120 ]
|
||||
then
|
||||
break
|
||||
fi
|
||||
sleep 0.5
|
||||
counter=$(($counter + 1))
|
||||
done
|
||||
}
|
||||
|
||||
ln -s /usr/share/clickhouse-test/config/log_queries.xml /etc/clickhouse-server/users.d/
|
||||
ln -s /usr/share/clickhouse-test/config/part_log.xml /etc/clickhouse-server/config.d/
|
||||
|
||||
echo "TSAN_OPTIONS='halt_on_error=1 history_size=7 ignore_noninstrumented_modules=1 verbosity=1'" >> /etc/environment
|
||||
echo "UBSAN_OPTIONS='print_stacktrace=1'" >> /etc/environment
|
||||
echo "ASAN_OPTIONS='malloc_context_size=10 verbosity=1 allocator_release_to_os_interval_ms=10000'" >> /etc/environment
|
||||
|
||||
service clickhouse-server start
|
||||
|
||||
wait_server
|
||||
|
||||
/s3downloader --dataset-names $DATASETS
|
||||
chmod 777 -R /var/lib/clickhouse
|
||||
clickhouse-client --query "ATTACH DATABASE IF NOT EXISTS datasets ENGINE = Ordinary"
|
||||
clickhouse-client --query "CREATE DATABASE IF NOT EXISTS test"
|
||||
service clickhouse-server restart
|
||||
|
||||
wait_server
|
||||
|
||||
clickhouse-client --query "SHOW TABLES FROM datasets"
|
||||
clickhouse-client --query "SHOW TABLES FROM test"
|
||||
clickhouse-client --query "RENAME TABLE datasets.hits_v1 TO test.hits"
|
||||
clickhouse-client --query "RENAME TABLE datasets.visits_v1 TO test.visits"
|
||||
clickhouse-client --query "SHOW TABLES FROM test"
|
||||
|
||||
./stress --output-folder test_output --skip-func-tests "$SKIP_TESTS_OPTION"
|
||||
|
||||
service clickhouse-server restart
|
||||
|
||||
wait_server
|
||||
|
||||
clickhouse-client --query "SELECT 'Server successfuly started'" > /test_output/alive_check.txt || echo 'Server failed to start' > /test_output/alive_check.txt
|
@ -41,15 +41,6 @@ def run_func_test(cmd, output_prefix, num_processes, skip_tests_option):
|
||||
return pipes
|
||||
|
||||
|
||||
def check_clickhouse_alive(cmd):
|
||||
try:
|
||||
logging.info("Checking ClickHouse still alive")
|
||||
check_call("{} --query \"select 'Still alive'\"".format(cmd), shell=True)
|
||||
return True
|
||||
except:
|
||||
return False
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(message)s')
|
||||
parser = argparse.ArgumentParser(description="ClickHouse script for running stresstest")
|
||||
@ -65,7 +56,6 @@ if __name__ == "__main__":
|
||||
args = parser.parse_args()
|
||||
func_pipes = []
|
||||
perf_process = None
|
||||
try:
|
||||
perf_process = run_perf_test(args.perf_test_cmd, args.perf_test_xml_path, args.output_folder)
|
||||
func_pipes = run_func_test(args.test_cmd, args.output_folder, args.num_parallel, args.skip_func_tests)
|
||||
|
||||
@ -80,14 +70,4 @@ if __name__ == "__main__":
|
||||
logging.info("Finished %s from %s processes", len(retcodes), len(func_pipes))
|
||||
time.sleep(5)
|
||||
|
||||
if not check_clickhouse_alive(args.client_cmd):
|
||||
raise Exception("Stress failed, results in logs")
|
||||
else:
|
||||
logging.info("Stress is ok")
|
||||
except Exception as ex:
|
||||
raise ex
|
||||
finally:
|
||||
if os.path.exists(args.server_log_folder):
|
||||
logging.info("Copying server log files")
|
||||
for log_file in os.listdir(args.server_log_folder):
|
||||
shutil.copy(os.path.join(args.server_log_folder, log_file), os.path.join(args.output_folder, log_file))
|
||||
logging.info("Stress test finished")
|
||||
|
@ -35,7 +35,7 @@ RUN apt-get update \
|
||||
ENV TZ=Europe/Moscow
|
||||
RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone
|
||||
|
||||
RUN pip3 install urllib3 testflows==1.6.24 docker-compose docker dicttoxml kazoo tzlocal
|
||||
RUN pip3 install urllib3 testflows==1.6.39 docker-compose docker dicttoxml kazoo tzlocal
|
||||
|
||||
ENV DOCKER_CHANNEL stable
|
||||
ENV DOCKER_VERSION 17.09.1-ce
|
||||
|
@ -1,25 +0,0 @@
|
||||
{
|
||||
"checkYo": false,
|
||||
"excludeFiles": [],
|
||||
"fileExtensions": [],
|
||||
"format": "auto",
|
||||
"ignoreTags": [
|
||||
"code",
|
||||
"kbd",
|
||||
"object",
|
||||
"samp",
|
||||
"script",
|
||||
"style",
|
||||
"var"
|
||||
],
|
||||
"maxRequests": 2,
|
||||
"lang": "en,ru",
|
||||
"report": ["console"],
|
||||
"dictionary": [
|
||||
"(C|c)lick(H|h)ouse",
|
||||
"CatBoost",
|
||||
"(Ш|ш)ард(ы|ов|а|у|е|ам|ирование|ированы|ах)?",
|
||||
"логир(ование|уются|ования)?",
|
||||
"конфиг(а|е|ом|у)"
|
||||
]
|
||||
}
|
24
docs/_description_templates/template-statement.md
Normal file
24
docs/_description_templates/template-statement.md
Normal file
@ -0,0 +1,24 @@
|
||||
# Statement name (for example, SHOW USER)
|
||||
|
||||
Brief description of what the statement does.
|
||||
|
||||
Syntax:
|
||||
|
||||
```sql
|
||||
Syntax of the statement.
|
||||
```
|
||||
|
||||
## Other necessary sections of the description (Optional)
|
||||
|
||||
Examples of descriptions with a complicated structure:
|
||||
|
||||
- https://clickhouse.tech/docs/en/sql-reference/statements/grant/
|
||||
- https://clickhouse.tech/docs/en/sql-reference/statements/revoke/
|
||||
- https://clickhouse.tech/docs/en/sql-reference/statements/select/join/
|
||||
|
||||
|
||||
## See Also (Optional)
|
||||
|
||||
Links to related topics as a list.
|
||||
|
||||
- [link](#)
|
@ -1,6 +1,6 @@
|
||||
---
|
||||
toc_priority: 71
|
||||
toc_title: Source Code
|
||||
toc_title: Source Code Browser
|
||||
---
|
||||
|
||||
# Browse ClickHouse Source Code {#browse-clickhouse-source-code}
|
||||
|
@ -1,6 +1,6 @@
|
||||
---
|
||||
toc_priority: 67
|
||||
toc_title: How to Build ClickHouse on Linux for AARCH64 (ARM64)
|
||||
toc_title: Build on Linux for AARCH64 (ARM64)
|
||||
---
|
||||
|
||||
# How to Build ClickHouse on Linux for AARCH64 (ARM64) Architecture {#how-to-build-clickhouse-on-linux-for-aarch64-arm64-architecture}
|
||||
@ -9,7 +9,7 @@ This is for the case when you have Linux machine and want to use it to build `cl
|
||||
|
||||
The cross-build for AARCH64 is based on the [Build instructions](../development/build.md), follow them first.
|
||||
|
||||
# Install Clang-8 {#install-clang-8}
|
||||
## Install Clang-8 {#install-clang-8}
|
||||
|
||||
Follow the instructions from https://apt.llvm.org/ for your Ubuntu or Debian setup.
|
||||
For example, in Ubuntu Bionic you can use the following commands:
|
||||
@ -20,7 +20,7 @@ sudo apt-get update
|
||||
sudo apt-get install clang-8
|
||||
```
|
||||
|
||||
# Install Cross-Compilation Toolset {#install-cross-compilation-toolset}
|
||||
## Install Cross-Compilation Toolset {#install-cross-compilation-toolset}
|
||||
|
||||
``` bash
|
||||
cd ClickHouse
|
||||
@ -29,7 +29,7 @@ wget 'https://developer.arm.com/-/media/Files/downloads/gnu-a/8.3-2019.03/binrel
|
||||
tar xJf gcc-arm-8.3-2019.03-x86_64-aarch64-linux-gnu.tar.xz -C build-aarch64/cmake/toolchain/linux-aarch64 --strip-components=1
|
||||
```
|
||||
|
||||
# Build ClickHouse {#build-clickhouse}
|
||||
## Build ClickHouse {#build-clickhouse}
|
||||
|
||||
``` bash
|
||||
cd ClickHouse
|
||||
|
@ -1,6 +1,6 @@
|
||||
---
|
||||
toc_priority: 66
|
||||
toc_title: How to Build ClickHouse on Linux for Mac OS X
|
||||
toc_title: Build on Linux for Mac OS X
|
||||
---
|
||||
|
||||
# How to Build ClickHouse on Linux for Mac OS X {#how-to-build-clickhouse-on-linux-for-mac-os-x}
|
||||
@ -9,7 +9,7 @@ This is for the case when you have Linux machine and want to use it to build `cl
|
||||
|
||||
The cross-build for Mac OS X is based on the [Build instructions](../development/build.md), follow them first.
|
||||
|
||||
# Install Clang-8 {#install-clang-8}
|
||||
## Install Clang-8 {#install-clang-8}
|
||||
|
||||
Follow the instructions from https://apt.llvm.org/ for your Ubuntu or Debian setup.
|
||||
For example the commands for Bionic are like:
|
||||
@ -19,7 +19,7 @@ sudo echo "deb [trusted=yes] http://apt.llvm.org/bionic/ llvm-toolchain-bionic-8
|
||||
sudo apt-get install clang-8
|
||||
```
|
||||
|
||||
# Install Cross-Compilation Toolset {#install-cross-compilation-toolset}
|
||||
## Install Cross-Compilation Toolset {#install-cross-compilation-toolset}
|
||||
|
||||
Let’s remember the path where we install `cctools` as ${CCTOOLS}
|
||||
|
||||
@ -47,7 +47,7 @@ mkdir -p build-darwin/cmake/toolchain/darwin-x86_64
|
||||
tar xJf MacOSX10.14.sdk.tar.xz -C build-darwin/cmake/toolchain/darwin-x86_64 --strip-components=1
|
||||
```
|
||||
|
||||
# Build ClickHouse {#build-clickhouse}
|
||||
## Build ClickHouse {#build-clickhouse}
|
||||
|
||||
``` bash
|
||||
cd ClickHouse
|
||||
|
@ -1,6 +1,6 @@
|
||||
---
|
||||
toc_priority: 65
|
||||
toc_title: How to Build ClickHouse on Mac OS X
|
||||
toc_title: Build on Mac OS X
|
||||
---
|
||||
|
||||
# How to Build ClickHouse on Mac OS X {#how-to-build-clickhouse-on-mac-os-x}
|
||||
@ -45,14 +45,12 @@ $ cd ..
|
||||
|
||||
## Caveats {#caveats}
|
||||
|
||||
If you intend to run clickhouse-server, make sure to increase the system’s maxfiles variable.
|
||||
If you intend to run `clickhouse-server`, make sure to increase the system’s maxfiles variable.
|
||||
|
||||
!!! info "Note"
|
||||
You’ll need to use sudo.
|
||||
|
||||
To do so, create the following file:
|
||||
|
||||
/Library/LaunchDaemons/limit.maxfiles.plist:
|
||||
To do so, create the `/Library/LaunchDaemons/limit.maxfiles.plist` file with the following content:
|
||||
|
||||
``` xml
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
|
@ -1,11 +1,9 @@
|
||||
---
|
||||
toc_priority: 64
|
||||
toc_title: How to Build ClickHouse on Linux
|
||||
toc_title: Build on Linux
|
||||
---
|
||||
|
||||
# How to Build ClickHouse for Development {#how-to-build-clickhouse-for-development}
|
||||
|
||||
The following tutorial is based on the Ubuntu Linux system. With appropriate changes, it should also work on any other Linux distribution.
|
||||
# How to Build ClickHouse on Linux {#how-to-build-clickhouse-for-development}
|
||||
|
||||
Supported platforms:
|
||||
|
||||
@ -13,7 +11,11 @@ Supported platforms:
|
||||
- AArch64
|
||||
- Power9 (experimental)
|
||||
|
||||
## Install Git, CMake, Python and Ninja {#install-git-cmake-python-and-ninja}
|
||||
## Normal Build for Development on Ubuntu
|
||||
|
||||
The following tutorial is based on the Ubuntu Linux system. With appropriate changes, it should also work on any other Linux distribution.
|
||||
|
||||
### Install Git, CMake, Python and Ninja {#install-git-cmake-python-and-ninja}
|
||||
|
||||
``` bash
|
||||
$ sudo apt-get install git cmake python ninja-build
|
||||
@ -21,18 +23,18 @@ $ sudo apt-get install git cmake python ninja-build
|
||||
|
||||
Or cmake3 instead of cmake on older systems.
|
||||
|
||||
## Install GCC 9 {#install-gcc-9}
|
||||
### Install GCC 9 {#install-gcc-9}
|
||||
|
||||
There are several ways to do this.
|
||||
|
||||
### Install from Repository {#install-from-repository}
|
||||
#### Install from Repository {#install-from-repository}
|
||||
|
||||
On Ubuntu 19.10 or newer:
|
||||
|
||||
$ sudo apt-get update
|
||||
$ sudo apt-get install gcc-9 g++-9
|
||||
|
||||
### Install from a PPA Package {#install-from-a-ppa-package}
|
||||
#### Install from a PPA Package {#install-from-a-ppa-package}
|
||||
|
||||
On older Ubuntu:
|
||||
|
||||
@ -43,18 +45,18 @@ $ sudo apt-get update
|
||||
$ sudo apt-get install gcc-9 g++-9
|
||||
```
|
||||
|
||||
### Install from Sources {#install-from-sources}
|
||||
#### Install from Sources {#install-from-sources}
|
||||
|
||||
See [utils/ci/build-gcc-from-sources.sh](https://github.com/ClickHouse/ClickHouse/blob/master/utils/ci/build-gcc-from-sources.sh)
|
||||
|
||||
## Use GCC 9 for Builds {#use-gcc-9-for-builds}
|
||||
### Use GCC 9 for Builds {#use-gcc-9-for-builds}
|
||||
|
||||
``` bash
|
||||
$ export CC=gcc-9
|
||||
$ export CXX=g++-9
|
||||
```
|
||||
|
||||
## Checkout ClickHouse Sources {#checkout-clickhouse-sources}
|
||||
### Checkout ClickHouse Sources {#checkout-clickhouse-sources}
|
||||
|
||||
``` bash
|
||||
$ git clone --recursive git@github.com:ClickHouse/ClickHouse.git
|
||||
@ -66,7 +68,7 @@ or
|
||||
$ git clone --recursive https://github.com/ClickHouse/ClickHouse.git
|
||||
```
|
||||
|
||||
## Build ClickHouse {#build-clickhouse}
|
||||
### Build ClickHouse {#build-clickhouse}
|
||||
|
||||
``` bash
|
||||
$ cd ClickHouse
|
||||
@ -79,7 +81,7 @@ $ ninja
|
||||
To create an executable, run `ninja clickhouse`.
|
||||
This will create the `programs/clickhouse` executable, which can be used with `client` or `server` arguments.
|
||||
|
||||
# How to Build ClickHouse on Any Linux {#how-to-build-clickhouse-on-any-linux}
|
||||
## How to Build ClickHouse on Any Linux {#how-to-build-clickhouse-on-any-linux}
|
||||
|
||||
The build requires the following components:
|
||||
|
||||
@ -93,32 +95,58 @@ The build requires the following components:
|
||||
If all the components are installed, you may build in the same way as the steps above.
|
||||
|
||||
Example for Ubuntu Eoan:
|
||||
|
||||
``` bash
|
||||
sudo apt update
|
||||
sudo apt install git cmake ninja-build g++ python
|
||||
git clone --recursive https://github.com/ClickHouse/ClickHouse.git
|
||||
mkdir build && cd build
|
||||
cmake ../ClickHouse
|
||||
ninja
|
||||
```
|
||||
|
||||
Example for OpenSUSE Tumbleweed:
|
||||
|
||||
``` bash
|
||||
sudo zypper install git cmake ninja gcc-c++ python lld
|
||||
git clone --recursive https://github.com/ClickHouse/ClickHouse.git
|
||||
mkdir build && cd build
|
||||
cmake ../ClickHouse
|
||||
ninja
|
||||
```
|
||||
|
||||
Example for Fedora Rawhide:
|
||||
|
||||
``` bash
|
||||
sudo yum update
|
||||
yum --nogpg install git cmake make gcc-c++ python2
|
||||
git clone --recursive https://github.com/ClickHouse/ClickHouse.git
|
||||
mkdir build && cd build
|
||||
cmake ../ClickHouse
|
||||
make -j $(nproc)
|
||||
```
|
||||
|
||||
# You Don’t Have to Build ClickHouse {#you-dont-have-to-build-clickhouse}
|
||||
|
||||
## How to Build ClickHouse Debian Package {#how-to-build-clickhouse-debian-package}
|
||||
|
||||
### Install Git and Pbuilder {#install-git-and-pbuilder}
|
||||
|
||||
``` bash
|
||||
$ sudo apt-get update
|
||||
$ sudo apt-get install git python pbuilder debhelper lsb-release fakeroot sudo debian-archive-keyring debian-keyring
|
||||
```
|
||||
|
||||
### Checkout ClickHouse Sources {#checkout-clickhouse-sources-1}
|
||||
|
||||
``` bash
|
||||
$ git clone --recursive --branch master https://github.com/ClickHouse/ClickHouse.git
|
||||
$ cd ClickHouse
|
||||
```
|
||||
|
||||
### Run Release Script {#run-release-script}
|
||||
|
||||
``` bash
|
||||
$ ./release
|
||||
```
|
||||
|
||||
## You Don’t Have to Build ClickHouse {#you-dont-have-to-build-clickhouse}
|
||||
|
||||
ClickHouse is available in pre-built binaries and packages. Binaries are portable and can be run on any Linux flavour.
|
||||
|
||||
@ -126,26 +154,4 @@ They are built for stable, prestable and testing releases as long as for every c
|
||||
|
||||
To find the freshest build from `master`, go to [commits page](https://github.com/ClickHouse/ClickHouse/commits/master), click on the first green checkmark or red cross near commit, and click to the “Details” link right after “ClickHouse Build Check”.
|
||||
|
||||
# How to Build ClickHouse Debian Package {#how-to-build-clickhouse-debian-package}
|
||||
|
||||
## Install Git and Pbuilder {#install-git-and-pbuilder}
|
||||
|
||||
``` bash
|
||||
$ sudo apt-get update
|
||||
$ sudo apt-get install git python pbuilder debhelper lsb-release fakeroot sudo debian-archive-keyring debian-keyring
|
||||
```
|
||||
|
||||
## Checkout ClickHouse Sources {#checkout-clickhouse-sources-1}
|
||||
|
||||
``` bash
|
||||
$ git clone --recursive --branch master https://github.com/ClickHouse/ClickHouse.git
|
||||
$ cd ClickHouse
|
||||
```
|
||||
|
||||
## Run Release Script {#run-release-script}
|
||||
|
||||
``` bash
|
||||
$ ./release
|
||||
```
|
||||
|
||||
[Original article](https://clickhouse.tech/docs/en/development/build/) <!--hide-->
|
||||
|
@ -35,6 +35,7 @@ toc_title: Third-Party Libraries Used
|
||||
| poco | [Boost Software License - Version 1.0](https://github.com/ClickHouse-Extras/poco/blob/fe5505e56c27b6ecb0dcbc40c49dc2caf4e9637f/LICENSE) |
|
||||
| protobuf | [BSD 3-Clause License](https://github.com/ClickHouse-Extras/protobuf/blob/12735370922a35f03999afff478e1c6d7aa917a4/LICENSE) |
|
||||
| re2 | [BSD 3-Clause License](https://github.com/google/re2/blob/7cf8b88e8f70f97fd4926b56aa87e7f53b2717e0/LICENSE) |
|
||||
| sentry-native | [MIT License](https://github.com/getsentry/sentry-native/blob/master/LICENSE) |
|
||||
| UnixODBC | [LGPL v2.1](https://github.com/ClickHouse-Extras/UnixODBC/tree/b0ad30f7f6289c12b76f04bfb9d466374bb32168) |
|
||||
| zlib-ng | [Zlib License](https://github.com/ClickHouse-Extras/zlib-ng/blob/develop/LICENSE.md) |
|
||||
| zstd | [BSD 3-Clause License](https://github.com/facebook/zstd/blob/dev/LICENSE) |
|
||||
|
@ -1,6 +1,6 @@
|
||||
---
|
||||
toc_priority: 68
|
||||
toc_title: How to Write C++ Code
|
||||
toc_title: C++ Guide
|
||||
---
|
||||
|
||||
# How to Write C++ Code {#how-to-write-c-code}
|
||||
|
@ -1,6 +1,6 @@
|
||||
---
|
||||
toc_priority: 69
|
||||
toc_title: How to Run ClickHouse Tests
|
||||
toc_title: Testing
|
||||
---
|
||||
|
||||
# ClickHouse Testing {#clickhouse-testing}
|
||||
@ -25,12 +25,7 @@ Tests should use (create, drop, etc) only tables in `test` database that is assu
|
||||
|
||||
If you want to use distributed queries in functional tests, you can leverage `remote` table function with `127.0.0.{1..2}` addresses for the server to query itself; or you can use predefined test clusters in server configuration file like `test_shard_localhost`.
|
||||
|
||||
Some tests are marked with `zookeeper`, `shard` or `long` in their names.
|
||||
`zookeeper` is for tests that are using ZooKeeper. `shard` is for tests that
|
||||
requires server to listen `127.0.0.*`; `distributed` or `global` have the same
|
||||
meaning. `long` is for tests that run slightly longer that one second. You can
|
||||
disable these groups of tests using `--no-zookeeper`, `--no-shard` and
|
||||
`--no-long` options, respectively.
|
||||
Some tests are marked with `zookeeper`, `shard` or `long` in their names. `zookeeper` is for tests that are using ZooKeeper. `shard` is for tests that requires server to listen `127.0.0.*`; `distributed` or `global` have the same meaning. `long` is for tests that run slightly longer that one second. You can disable these groups of tests using `--no-zookeeper`, `--no-shard` and `--no-long` options, respectively.
|
||||
|
||||
## Known Bugs {#known-bugs}
|
||||
|
||||
@ -153,11 +148,11 @@ Motivation:
|
||||
|
||||
Normally we release and run all tests on a single variant of ClickHouse build. But there are alternative build variants that are not thoroughly tested. Examples:
|
||||
|
||||
- build on FreeBSD;
|
||||
- build on Debian with libraries from system packages;
|
||||
- build with shared linking of libraries;
|
||||
- build on AArch64 platform;
|
||||
- build on PowerPc platform.
|
||||
- build on FreeBSD
|
||||
- build on Debian with libraries from system packages
|
||||
- build with shared linking of libraries
|
||||
- build on AArch64 platform
|
||||
- build on PowerPc platform
|
||||
|
||||
For example, build with system packages is bad practice, because we cannot guarantee what exact version of packages a system will have. But this is really needed by Debian maintainers. For this reason we at least have to support this variant of build. Another example: shared linking is a common source of trouble, but it is needed for some enthusiasts.
|
||||
|
||||
@ -177,22 +172,22 @@ For production builds, gcc is used (it still generates slightly more efficient c
|
||||
|
||||
## Sanitizers {#sanitizers}
|
||||
|
||||
**Address sanitizer**.
|
||||
### Address sanitizer
|
||||
We run functional and integration tests under ASan on per-commit basis.
|
||||
|
||||
**Valgrind (Memcheck)**.
|
||||
### Valgrind (Memcheck)
|
||||
We run functional tests under Valgrind overnight. It takes multiple hours. Currently there is one known false positive in `re2` library, see [this article](https://research.swtch.com/sparse).
|
||||
|
||||
**Undefined behaviour sanitizer.**
|
||||
### Undefined behaviour sanitizer
|
||||
We run functional and integration tests under ASan on per-commit basis.
|
||||
|
||||
**Thread sanitizer**.
|
||||
### Thread sanitizer
|
||||
We run functional tests under TSan on per-commit basis. We still don’t run integration tests under TSan on per-commit basis.
|
||||
|
||||
**Memory sanitizer**.
|
||||
### Memory sanitizer
|
||||
Currently we still don’t use MSan.
|
||||
|
||||
**Debug allocator.**
|
||||
### Debug allocator
|
||||
Debug version of `jemalloc` is used for debug build.
|
||||
|
||||
## Fuzzing {#fuzzing}
|
||||
@ -227,7 +222,7 @@ If you use `CLion` as an IDE, you can leverage some `clang-tidy` checks out of t
|
||||
|
||||
## Code Style {#code-style}
|
||||
|
||||
Code style rules are described [here](https://clickhouse.tech/docs/en/development/style/).
|
||||
Code style rules are described [here](style.md).
|
||||
|
||||
To check for some common style violations, you can use `utils/check-style` script.
|
||||
|
||||
|
@ -96,6 +96,7 @@ For a description of parameters, see the [CREATE query description](../../../sql
|
||||
- `write_final_mark` — Enables or disables writing the final index mark at the end of data part (after the last byte). Default value: 1. Don’t turn it off.
|
||||
- `merge_max_block_size` — Maximum number of rows in block for merge operations. Default value: 8192.
|
||||
- `storage_policy` — Storage policy. See [Using Multiple Block Devices for Data Storage](#table_engine-mergetree-multiple-volumes).
|
||||
- `min_bytes_for_wide_part`, `min_rows_for_wide_part` — Minimum number of bytes/rows in a data part that can be stored in `Wide` format. You can set one, both or none of these settings. See [Data Storage](#mergetree-data-storage).
|
||||
|
||||
**Example of Sections Setting**
|
||||
|
||||
@ -149,6 +150,10 @@ When data is inserted in a table, separate data parts are created and each of th
|
||||
|
||||
Data belonging to different partitions are separated into different parts. In the background, ClickHouse merges data parts for more efficient storage. Parts belonging to different partitions are not merged. The merge mechanism does not guarantee that all rows with the same primary key will be in the same data part.
|
||||
|
||||
Data parts can be stored in `Wide` or `Compact` format. In `Wide` format each column is stored in a separate file in a filesystem, in `Compact` format all columns are stored in one file. `Compact` format can be used to increase performance of small and frequent inserts.
|
||||
|
||||
Data storing format is controlled by the `min_bytes_for_wide_part` and `min_rows_for_wide_part` settings of the table engine. If the number of bytes or rows in a data part is less then the corresponding setting's value, the part is stored in `Compact` format. Otherwise it is stored in `Wide` format. If none of these settings is set, data parts are stored in `Wide` format.
|
||||
|
||||
Each data part is logically divided into granules. A granule is the smallest indivisible data set that ClickHouse reads when selecting data. ClickHouse doesn’t split rows or values, so each granule always contains an integer number of rows. The first row of a granule is marked with the value of the primary key for the row. For each data part, ClickHouse creates an index file that stores the marks. For each column, whether it’s in the primary key or not, ClickHouse also stores the same marks. These marks let you find data directly in column files.
|
||||
|
||||
The granule size is restricted by the `index_granularity` and `index_granularity_bytes` settings of the table engine. The number of rows in a granule lays in the `[1, index_granularity]` range, depending on the size of the rows. The size of a granule can exceed `index_granularity_bytes` if the size of a single row is greater than the value of the setting. In this case, the size of the granule equals the size of the row.
|
||||
|
@ -22,7 +22,7 @@ The Distributed engine accepts parameters:
|
||||
|
||||
See also:
|
||||
|
||||
- `insert_distributed_sync` setting
|
||||
- [insert_distributed_sync](../../../operations/settings/settings.md#insert_distributed_sync) setting
|
||||
- [MergeTree](../../../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-multiple-volumes) for the examples
|
||||
|
||||
Example:
|
||||
|
@ -24,7 +24,7 @@ See the detailed description of the [CREATE TABLE](../../../sql-reference/statem
|
||||
|
||||
**Engine Parameters**
|
||||
|
||||
- `join_strictness` – [JOIN strictness](../../../sql-reference/statements/select/join.md#select-join-strictness).
|
||||
- `join_strictness` – [JOIN strictness](../../../sql-reference/statements/select/join.md#select-join-types).
|
||||
- `join_type` – [JOIN type](../../../sql-reference/statements/select/join.md#select-join-types).
|
||||
- `k1[, k2, ...]` – Key columns from the `USING` clause that the `JOIN` operation is made with.
|
||||
|
||||
|
@ -16,7 +16,7 @@ One of the following batches of those t-shirts was supposed to be given away on
|
||||
So, what does it mean? Here are some ways to translate *“не тормозит”*:
|
||||
|
||||
- If you translate it literally, it’d be something like *“ClickHouse doesn’t press the brake pedal”*.
|
||||
- If you’d want to express it as close to how it sounds to a Russian person with IT background, it’d be something like *“If you larger system lags, it’s not because it uses ClickHouse”*.
|
||||
- If you’d want to express it as close to how it sounds to a Russian person with IT background, it’d be something like *“If your larger system lags, it’s not because it uses ClickHouse”*.
|
||||
- Shorter, but not so precise versions could be *“ClickHouse is not slow”*, *“ClickHouse doesn’t lag”* or just *“ClickHouse is fast”*.
|
||||
|
||||
If you haven’t seen one of those t-shirts in person, you can check them out online in many ClickHouse-related videos. For example, this one:
|
||||
|
@ -30,7 +30,7 @@ See [File](../../engines/table-engines/special/file.md) table engine.
|
||||
|
||||
## Using Command-Line Redirection {#using-command-line-redirection}
|
||||
|
||||
``` sql
|
||||
``` bash
|
||||
$ clickhouse-client --query "SELECT * from table" --format FormatName > result.txt
|
||||
```
|
||||
|
||||
|
@ -1,13 +1,14 @@
|
||||
---
|
||||
toc_folder_title: Example Datasets
|
||||
toc_priority: 12
|
||||
toc_priority: 15
|
||||
toc_title: Introduction
|
||||
---
|
||||
|
||||
# Example Datasets {#example-datasets}
|
||||
|
||||
This section describes how to obtain example datasets and import them into ClickHouse.
|
||||
For some datasets example queries are also available.
|
||||
This section describes how to obtain example datasets and import them into ClickHouse. For some datasets example queries are also available.
|
||||
|
||||
The list of documented datasets:
|
||||
|
||||
- [Anonymized Yandex.Metrica Dataset](../../getting-started/example-datasets/metrica.md)
|
||||
- [Star Schema Benchmark](../../getting-started/example-datasets/star-schema.md)
|
||||
|
@ -37,6 +37,7 @@ The queries are executed as a read-only user. It implies some limitations:
|
||||
- INSERT queries are not allowed
|
||||
|
||||
The following settings are also enforced:
|
||||
|
||||
- [max\_result\_bytes=10485760](../operations/settings/query_complexity/#max-result-bytes)
|
||||
- [max\_result\_rows=2000](../operations/settings/query_complexity/#setting-max_result_rows)
|
||||
- [result\_overflow\_mode=break](../operations/settings/query_complexity/#result-overflow-mode)
|
||||
|
@ -20,6 +20,7 @@ toc_title: Adopters
|
||||
| <a href="https://www.bloomberg.com/" class="favicon">Bloomberg</a> | Finance, Media | Monitoring | 102 servers | — | [Slides, May 2018](https://www.slideshare.net/Altinity/http-analytics-for-6m-requests-per-second-using-clickhouse-by-alexander-bocharov) |
|
||||
| <a href="https://bloxy.info" class="favicon">Bloxy</a> | Blockchain | Analytics | — | — | [Slides in Russian, August 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup17/4_bloxy.pptx) |
|
||||
| <a href="https://www.chinatelecomglobal.com/" class="favicon">Dataliance for China Telecom</a> | Telecom | Analytics | — | — | [Slides in Chinese, January 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup12/telecom.pdf) |
|
||||
| <a href="https://cardsmobile.ru/" class="favicon">CardsMobile</a> | Finance | Analytics | — | — | [VC.ru](https://vc.ru/s/cardsmobile/143449-rukovoditel-gruppy-analiza-dannyh) |
|
||||
| <a href="https://carto.com/" class="favicon">CARTO</a> | Business Intelligence | Geo analytics | — | — | [Geospatial processing with ClickHouse](https://carto.com/blog/geospatial-processing-with-clickhouse/) |
|
||||
| <a href="http://public.web.cern.ch/public/" class="favicon">CERN</a> | Research | Experiment | — | — | [Press release, April 2012](https://www.yandex.com/company/press_center/press_releases/2012/2012-04-10/) |
|
||||
| <a href="http://cisco.com/" class="favicon">Cisco</a> | Networking | Traffic analysis | — | — | [Lightning talk, October 2019](https://youtu.be/-hI1vDR2oPY?t=5057) |
|
||||
@ -44,6 +45,7 @@ toc_title: Adopters
|
||||
| <a href="https://www.lbl.gov" class="favicon">Lawrence Berkeley National Laboratory</a> | Research | Traffic analysis | 1 server | 11.8 TiB | [Slides in English, April 2019](https://www.smitasin.com/presentations/2019-04-17_DOE-NSM.pdf) |
|
||||
| <a href="https://lifestreet.com/" class="favicon">LifeStreet</a> | Ad network | Main product | 75 servers (3 replicas) | 5.27 PiB | [Blog post in Russian, February 2017](https://habr.com/en/post/322620/) |
|
||||
| <a href="https://mcs.mail.ru/" class="favicon">Mail.ru Cloud Solutions</a> | Cloud services | Main product | — | — | [Article in Russian](https://mcs.mail.ru/help/db-create/clickhouse#) |
|
||||
| <a href="https://tech.mymarilyn.ru" class="favicon">Marilyn</a> | Advertising | Statistics | — | — | [Talk in Russian, June 2017](https://www.youtube.com/watch?v=iXlIgx2khwc) |
|
||||
| <a href="https://www.messagebird.com" class="favicon">MessageBird</a> | Telecommunications | Statistics | — | — | [Slides in English, November 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup20/messagebird.pdf) |
|
||||
| <a href="https://www.mgid.com/" class="favicon">MGID</a> | Ad network | Web-analytics | — | — | [Blog post in Russian, April 2020](http://gs-studio.com/news-about-it/32777----clickhouse---c) |
|
||||
| <a href="https://www.oneapm.com/" class="favicon">OneAPM</a> | Monitorings and Data Analysis | Main product | — | — | [Slides in Chinese, October 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup19/8.%20clickhouse在OneAPM的应用%20杜龙.pdf) |
|
||||
|
@ -8,73 +8,54 @@ toc_title: Testing Hardware
|
||||
With this instruction you can run basic ClickHouse performance test on any server without installation of ClickHouse packages.
|
||||
|
||||
1. Go to “commits” page: https://github.com/ClickHouse/ClickHouse/commits/master
|
||||
|
||||
2. Click on the first green check mark or red cross with green “ClickHouse Build Check” and click on the “Details” link near “ClickHouse Build Check”. There is no such link in some commits, for example commits with documentation. In this case, choose the nearest commit having this link.
|
||||
|
||||
3. Copy the link to “clickhouse” binary for amd64 or aarch64.
|
||||
|
||||
3. Copy the link to `clickhouse` binary for amd64 or aarch64.
|
||||
4. ssh to the server and download it with wget:
|
||||
|
||||
<!-- -->
|
||||
|
||||
```bash
|
||||
# For amd64:
|
||||
wget https://clickhouse-builds.s3.yandex.net/0/00ba767f5d2a929394ea3be193b1f79074a1c4bc/1578163263_binary/clickhouse
|
||||
# For aarch64:
|
||||
wget https://clickhouse-builds.s3.yandex.net/0/00ba767f5d2a929394ea3be193b1f79074a1c4bc/1578161264_binary/clickhouse
|
||||
# Then do:
|
||||
chmod a+x clickhouse
|
||||
|
||||
1. Download configs:
|
||||
|
||||
<!-- -->
|
||||
|
||||
```
|
||||
5. Download configs:
|
||||
```bash
|
||||
wget https://raw.githubusercontent.com/ClickHouse/ClickHouse/master/programs/server/config.xml
|
||||
wget https://raw.githubusercontent.com/ClickHouse/ClickHouse/master/programs/server/users.xml
|
||||
mkdir config.d
|
||||
wget https://raw.githubusercontent.com/ClickHouse/ClickHouse/master/programs/server/config.d/path.xml -O config.d/path.xml
|
||||
wget https://raw.githubusercontent.com/ClickHouse/ClickHouse/master/programs/server/config.d/log_to_console.xml -O config.d/log_to_console.xml
|
||||
|
||||
1. Download benchmark files:
|
||||
|
||||
<!-- -->
|
||||
|
||||
```
|
||||
6. Download benchmark files:
|
||||
```bash
|
||||
wget https://raw.githubusercontent.com/ClickHouse/ClickHouse/master/benchmark/clickhouse/benchmark-new.sh
|
||||
chmod a+x benchmark-new.sh
|
||||
wget https://raw.githubusercontent.com/ClickHouse/ClickHouse/master/benchmark/clickhouse/queries.sql
|
||||
|
||||
1. Download test data according to the [Yandex.Metrica dataset](../getting-started/example-datasets/metrica.md) instruction (“hits” table containing 100 million rows).
|
||||
|
||||
<!-- -->
|
||||
|
||||
```
|
||||
7. Download test data according to the [Yandex.Metrica dataset](../getting-started/example-datasets/metrica.md) instruction (“hits” table containing 100 million rows).
|
||||
```bash
|
||||
wget https://clickhouse-datasets.s3.yandex.net/hits/partitions/hits_100m_obfuscated_v1.tar.xz
|
||||
tar xvf hits_100m_obfuscated_v1.tar.xz -C .
|
||||
mv hits_100m_obfuscated_v1/* .
|
||||
|
||||
1. Run the server:
|
||||
|
||||
<!-- -->
|
||||
|
||||
```
|
||||
8. Run the server:
|
||||
```bash
|
||||
./clickhouse server
|
||||
|
||||
1. Check the data: ssh to the server in another terminal
|
||||
|
||||
<!-- -->
|
||||
|
||||
```
|
||||
9. Check the data: ssh to the server in another terminal
|
||||
```bash
|
||||
./clickhouse client --query "SELECT count() FROM hits_100m_obfuscated"
|
||||
100000000
|
||||
|
||||
1. Edit the benchmark-new.sh, change `clickhouse-client` to `./clickhouse client` and add `–-max_memory_usage 100000000000` parameter.
|
||||
|
||||
<!-- -->
|
||||
|
||||
```
|
||||
10. Edit the benchmark-new.sh, change `clickhouse-client` to `./clickhouse client` and add `--max_memory_usage 100000000000` parameter.
|
||||
```bash
|
||||
mcedit benchmark-new.sh
|
||||
|
||||
1. Run the benchmark:
|
||||
|
||||
<!-- -->
|
||||
|
||||
```
|
||||
11. Run the benchmark:
|
||||
```bash
|
||||
./benchmark-new.sh hits_100m_obfuscated
|
||||
|
||||
1. Send the numbers and the info about your hardware configuration to clickhouse-feedback@yandex-team.com
|
||||
```
|
||||
12. Send the numbers and the info about your hardware configuration to clickhouse-feedback@yandex-team.com
|
||||
|
||||
All the results are published here: https://clickhouse.tech/benchmark/hardware/
|
||||
|
@ -397,7 +397,6 @@ The cache is shared for the server and memory is allocated as needed. The cache
|
||||
``` xml
|
||||
<mark_cache_size>5368709120</mark_cache_size>
|
||||
```
|
||||
|
||||
## max\_server\_memory\_usage {#max_server_memory_usage}
|
||||
|
||||
Limits total RAM usage by the ClickHouse server. You can specify it only for the default profile.
|
||||
@ -411,11 +410,37 @@ Default value: `0`.
|
||||
|
||||
**Additional Info**
|
||||
|
||||
On hosts with low RAM and swap, you possibly need setting `max_server_memory_usage_to_ram_ratio > 1`.
|
||||
The default `max_server_memory_usage` value is calculated as `memory_amount * max_server_memory_usage_to_ram_ratio`.
|
||||
|
||||
**See also**
|
||||
|
||||
- [max\_memory\_usage](../../operations/settings/query-complexity.md#settings_max_memory_usage)
|
||||
- [max_server_memory_usage_to_ram_ratio](#max_server_memory_usage_to_ram_ratio)
|
||||
|
||||
## max_server_memory_usage_to_ram_ratio {#max_server_memory_usage_to_ram_ratio}
|
||||
|
||||
Defines the fraction of total physical RAM amount, available to the Clickhouse server. If the server tries to utilize more, the memory is cut down to the appropriate amount.
|
||||
|
||||
Possible values:
|
||||
|
||||
- Positive double.
|
||||
- 0 — The Clickhouse server can use all available RAM.
|
||||
|
||||
Default value: `0`.
|
||||
|
||||
**Usage**
|
||||
|
||||
On hosts with low RAM and swap, you possibly need setting `max_server_memory_usage_to_ram_ratio` larger than 1.
|
||||
|
||||
**Example**
|
||||
|
||||
``` xml
|
||||
<max_server_memory_usage_to_ram_ratio>0.9</max_server_memory_usage_to_ram_ratio>
|
||||
```
|
||||
|
||||
**See Also**
|
||||
|
||||
- [max_server_memory_usage](#max_server_memory_usage)
|
||||
|
||||
## max\_concurrent\_queries {#max-concurrent-queries}
|
||||
|
||||
|
@ -471,7 +471,7 @@ Default value: 0.
|
||||
|
||||
See also:
|
||||
|
||||
- [JOIN strictness](../../sql-reference/statements/select/join.md#select-join-strictness)
|
||||
- [JOIN strictness](../../sql-reference/statements/select/join.md#join-settings)
|
||||
|
||||
## temporary\_files\_codec {#temporary_files_codec}
|
||||
|
||||
@ -585,6 +585,31 @@ Possible values:
|
||||
|
||||
Default value: 0.
|
||||
|
||||
## network_compression_method {#network_compression_method}
|
||||
|
||||
Sets the method of data compression that is used for communication between servers and between server and [clickhouse-client](../../interfaces/cli.md).
|
||||
|
||||
Possible values:
|
||||
|
||||
- `LZ4` — sets LZ4 compression method.
|
||||
- `ZSTD` — sets ZSTD compression method.
|
||||
|
||||
Default value: `LZ4`.
|
||||
|
||||
**See Also**
|
||||
|
||||
- [network_zstd_compression_level](#network_zstd_compression_level)
|
||||
|
||||
## network_zstd_compression_level {#network_zstd_compression_level}
|
||||
|
||||
Adjusts the level of ZSTD compression. Used only when [network_compression_method](#network_compression_method) is set to `ZSTD`.
|
||||
|
||||
Possible values:
|
||||
|
||||
- Positive integer from 1 to 15.
|
||||
|
||||
Default value: `1`.
|
||||
|
||||
## log\_queries {#settings-log-queries}
|
||||
|
||||
Setting up query logging.
|
||||
@ -783,6 +808,17 @@ If unsuccessful, several attempts are made to connect to various replicas.
|
||||
|
||||
Default value: 50.
|
||||
|
||||
## connection\_pool\_max\_wait\_ms {#connection-pool-max-wait-ms}
|
||||
|
||||
The wait time in milliseconds for a connection when the connection pool is full.
|
||||
|
||||
Possible values:
|
||||
|
||||
- Positive integer.
|
||||
- 0 — Infinite timeout.
|
||||
|
||||
Default value: 0.
|
||||
|
||||
## connections\_with\_failover\_max\_tries {#connections-with-failover-max-tries}
|
||||
|
||||
The maximum number of connection attempts with each replica for the Distributed table engine.
|
||||
@ -794,6 +830,21 @@ Default value: 3.
|
||||
Whether to count extreme values (the minimums and maximums in columns of a query result). Accepts 0 or 1. By default, 0 (disabled).
|
||||
For more information, see the section “Extreme values”.
|
||||
|
||||
## kafka\_max\_wait\_ms {#kafka-max-wait-ms}
|
||||
|
||||
The wait time in milliseconds for reading messages from [Kafka](../../engines/table-engines/integrations/kafka.md#kafka) before retry.
|
||||
|
||||
Possible values:
|
||||
|
||||
- Positive integer.
|
||||
- 0 — Infinite timeout.
|
||||
|
||||
Default value: 5000.
|
||||
|
||||
See also:
|
||||
|
||||
- [Apache Kafka](https://kafka.apache.org/)
|
||||
|
||||
## use\_uncompressed\_cache {#setting-use_uncompressed_cache}
|
||||
|
||||
Whether to use a cache of uncompressed blocks. Accepts 0 or 1. By default, 0 (disabled).
|
||||
@ -812,6 +863,17 @@ If a query from the same user with the same ‘query\_id’ already exists at th
|
||||
|
||||
Yandex.Metrica uses this parameter set to 1 for implementing suggestions for segmentation conditions. After entering the next character, if the old query hasn’t finished yet, it should be cancelled.
|
||||
|
||||
## replace\_running\_query\_max\_wait\_ms {#replace-running-query-max-wait-ms}
|
||||
|
||||
The wait time for running query with the same `query_id` to finish, when the [replace_running_query](#replace-running-query) setting is active.
|
||||
|
||||
Possible values:
|
||||
|
||||
- Positive integer.
|
||||
- 0 — Throwing an exception that does not allow to run a new query if the server already executes a query with the same `query_id`.
|
||||
|
||||
Default value: 5000.
|
||||
|
||||
## stream\_flush\_interval\_ms {#stream-flush-interval-ms}
|
||||
|
||||
Works for tables with streaming in the case of a timeout, or when a thread generates [max\_insert\_block\_size](#settings-max_insert_block_size) rows.
|
||||
@ -1397,6 +1459,23 @@ Possible values:
|
||||
|
||||
Default value: 16.
|
||||
|
||||
## insert_distributed_sync {#insert_distributed_sync}
|
||||
|
||||
Enables or disables synchronous data insertion into a [Distributed](../../engines/table-engines/special/distributed.md#distributed) table.
|
||||
|
||||
By default, when inserting data into a `Distributed` table, the ClickHouse server sends data to cluster nodes in asynchronous mode. When `insert_distributed_sync=1`, the data is processed synchronously, and the `INSERT` operation succeeds only after all the data is saved on all shards (at least one replica for each shard if `internal_replication` is true).
|
||||
|
||||
Possible values:
|
||||
|
||||
- 0 — Data is inserted in asynchronous mode.
|
||||
- 1 — Data is inserted in synchronous mode.
|
||||
|
||||
Default value: `0`.
|
||||
|
||||
**See Also**
|
||||
|
||||
- [Distributed Table Engine](../../engines/table-engines/special/distributed.md#distributed)
|
||||
- [Managing Distributed Tables](../../sql-reference/statements/system.md#query-language-system-distributed)
|
||||
## background\_buffer\_flush\_schedule\_pool\_size {#background_buffer_flush_schedule_pool_size}
|
||||
|
||||
Sets the number of threads performing background flush in [Buffer](../../engines/table-engines/special/buffer.md)-engine tables. This setting is applied at ClickHouse server start and can’t be changed in a user session.
|
||||
@ -1454,6 +1533,17 @@ Possible values:
|
||||
|
||||
Default value: 16.
|
||||
|
||||
## validate\_polygons {#validate_polygons}
|
||||
|
||||
Enables or disables throwing an exception in the [pointInPolygon](../../sql-reference/functions/geo.md#pointinpolygon) function, if the polygon is self-intersecting or self-tangent.
|
||||
|
||||
Possible values:
|
||||
|
||||
- 0 — Throwing an exception is disabled. `pointInPolygon` accepts invalid polygons and returns possibly incorrect results for them.
|
||||
- 1 — Throwing an exception is enabled.
|
||||
|
||||
Default value: 1.
|
||||
|
||||
## transform\_null\_in {#transform_null_in}
|
||||
|
||||
Enables equality of [NULL](../../sql-reference/syntax.md#null-literal) values for [IN](../../sql-reference/operators/in.md) operator.
|
||||
|
@ -1,3 +1,36 @@
|
||||
## system.asynchronous\_metric\_log {#system-tables-async-log}
|
||||
|
||||
Contains the historical values for `system.asynchronous_log` (see [system.asynchronous\_metrics](../../operations/system-tables/asynchronous_metrics.md#system_tables-asynchronous_metrics))
|
||||
Contains the historical values for `system.asynchronous_metrics`, which are saved once per minute. This feature is enabled by default.
|
||||
|
||||
Columns:
|
||||
|
||||
- `event_date` ([Date](../../sql-reference/data-types/date.md)) — Event date.
|
||||
- `event_time` ([DateTime](../../sql-reference/data-types/datetime.md)) — Event time.
|
||||
- `name` ([String](../../sql-reference/data-types/string.md)) — Metric name.
|
||||
- `value` ([Float64](../../sql-reference/data-types/float.md)) — Metric value.
|
||||
|
||||
**Example**
|
||||
|
||||
``` sql
|
||||
SELECT * FROM system.asynchronous_metric_log LIMIT 10
|
||||
```
|
||||
|
||||
``` text
|
||||
┌─event_date─┬──────────event_time─┬─name─────────────────────────────────────┬────value─┐
|
||||
│ 2020-06-22 │ 2020-06-22 06:57:30 │ jemalloc.arenas.all.pmuzzy │ 0 │
|
||||
│ 2020-06-22 │ 2020-06-22 06:57:30 │ jemalloc.arenas.all.pdirty │ 4214 │
|
||||
│ 2020-06-22 │ 2020-06-22 06:57:30 │ jemalloc.background_thread.run_intervals │ 0 │
|
||||
│ 2020-06-22 │ 2020-06-22 06:57:30 │ jemalloc.background_thread.num_runs │ 0 │
|
||||
│ 2020-06-22 │ 2020-06-22 06:57:30 │ jemalloc.retained │ 17657856 │
|
||||
│ 2020-06-22 │ 2020-06-22 06:57:30 │ jemalloc.mapped │ 71471104 │
|
||||
│ 2020-06-22 │ 2020-06-22 06:57:30 │ jemalloc.resident │ 61538304 │
|
||||
│ 2020-06-22 │ 2020-06-22 06:57:30 │ jemalloc.metadata │ 6199264 │
|
||||
│ 2020-06-22 │ 2020-06-22 06:57:30 │ jemalloc.allocated │ 38074336 │
|
||||
│ 2020-06-22 │ 2020-06-22 06:57:30 │ jemalloc.epoch │ 2 │
|
||||
└────────────┴─────────────────────┴──────────────────────────────────────────┴──────────┘
|
||||
```
|
||||
|
||||
**See Also**
|
||||
|
||||
- [system.asynchronous\_metrics](../system-tables/asynchronous_metrics.md) — Contains metrics that are calculated periodically in the background.
|
||||
- [system.metric_log](../system-tables/metric_log.md) — Contains history of metrics values from tables `system.metrics` and `system.events`, periodically flushed to disk.
|
||||
|
@ -1,4 +1,4 @@
|
||||
# system.asynchronous\_metrics {#system_tables-asynchronous_metrics}
|
||||
# system.asynchronous_metrics {#system_tables-asynchronous_metrics}
|
||||
|
||||
Contains metrics that are calculated periodically in the background. For example, the amount of RAM in use.
|
||||
|
||||
|
10
docs/en/operations/system-tables/current-roles.md
Normal file
10
docs/en/operations/system-tables/current-roles.md
Normal file
@ -0,0 +1,10 @@
|
||||
#system.current_roles {#system_tables-current_roles}
|
||||
Contains active roles of a current user. `SET ROLE` changes the contents of this table.
|
||||
|
||||
Columns:
|
||||
|
||||
- `role_name` ([String](../../sql-reference/data-types/string.md))) — Role name.
|
||||
- `with_admin_option` ([UInt8](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Flag that shows whether `current_role` is a role with `ADMIN OPTION` privilege.
|
||||
- `is_default` ([UInt8](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Flag that shows whether `current_role` is a default role.
|
||||
|
||||
[Original article](https://clickhouse.tech/docs/en/operations/system_tables/current-roles) <!--hide-->
|
11
docs/en/operations/system-tables/enabled-roles.md
Normal file
11
docs/en/operations/system-tables/enabled-roles.md
Normal file
@ -0,0 +1,11 @@
|
||||
#system.enabled_roles {#system_tables-enabled_roles}
|
||||
Contains all active roles at the moment, including current role of the current user and granted roles for current role.
|
||||
|
||||
Columns:
|
||||
|
||||
- `role_name` ([String](../../sql-reference/data-types/string.md))) — Role name.
|
||||
- `with_admin_option` ([UInt8](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Flag that shows whether `enabled_role` is a role with `ADMIN OPTION` privilege.
|
||||
- `is_current` ([UInt8](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Flag that shows whether `enabled_role` is a current role of a current user.
|
||||
- `is_default` ([UInt8](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Flag that shows whether `enabled_role` is a default role.
|
||||
|
||||
[Original article](https://clickhouse.tech/docs/en/operations/system_tables/enabled-roles) <!--hide-->
|
39
docs/en/operations/system-tables/licenses.md
Normal file
39
docs/en/operations/system-tables/licenses.md
Normal file
@ -0,0 +1,39 @@
|
||||
# system.licenses {#system-tables_system.licenses}
|
||||
|
||||
Сontains licenses of third-party libraries that are located in the [contrib](https://github.com/ClickHouse/ClickHouse/tree/master/contrib) directory of ClickHouse sources.
|
||||
|
||||
Columns:
|
||||
|
||||
- `library_name` ([String](../../sql-reference/data-types/string.md)) — Name of the library, which is license connected with.
|
||||
- `license_type` ([String](../../sql-reference/data-types/string.md)) — License type — e.g. Apache, MIT.
|
||||
- `license_path` ([String](../../sql-reference/data-types/string.md)) — Path to the file with the license text.
|
||||
- `license_text` ([String](../../sql-reference/data-types/string.md)) — License text.
|
||||
|
||||
**Example**
|
||||
|
||||
``` sql
|
||||
SELECT library_name, license_type, license_path FROM system.licenses LIMIT 15
|
||||
```
|
||||
|
||||
``` text
|
||||
┌─library_name───────┬─license_type─┬─license_path────────────────────────┐
|
||||
│ FastMemcpy │ MIT │ /contrib/FastMemcpy/LICENSE │
|
||||
│ arrow │ Apache │ /contrib/arrow/LICENSE.txt │
|
||||
│ avro │ Apache │ /contrib/avro/LICENSE.txt │
|
||||
│ aws-c-common │ Apache │ /contrib/aws-c-common/LICENSE │
|
||||
│ aws-c-event-stream │ Apache │ /contrib/aws-c-event-stream/LICENSE │
|
||||
│ aws-checksums │ Apache │ /contrib/aws-checksums/LICENSE │
|
||||
│ aws │ Apache │ /contrib/aws/LICENSE.txt │
|
||||
│ base64 │ BSD 2-clause │ /contrib/base64/LICENSE │
|
||||
│ boost │ Boost │ /contrib/boost/LICENSE_1_0.txt │
|
||||
│ brotli │ MIT │ /contrib/brotli/LICENSE │
|
||||
│ capnproto │ MIT │ /contrib/capnproto/LICENSE │
|
||||
│ cassandra │ Apache │ /contrib/cassandra/LICENSE.txt │
|
||||
│ cctz │ Apache │ /contrib/cctz/LICENSE.txt │
|
||||
│ cityhash102 │ MIT │ /contrib/cityhash102/COPYING │
|
||||
│ cppkafka │ BSD 2-clause │ /contrib/cppkafka/LICENSE │
|
||||
└────────────────────┴──────────────┴─────────────────────────────────────┘
|
||||
|
||||
```
|
||||
|
||||
[Original article](https://clickhouse.tech/docs/en/operations/system_tables/licenses) <!--hide-->
|
@ -49,7 +49,7 @@ CurrentMetric_ReplicatedChecks: 0
|
||||
|
||||
**See also**
|
||||
|
||||
- [system.asynchronous\_metrics](../../operations/system-tables/asynchronous_metrics.md#system_tables-asynchronous_metrics) — Contains periodically calculated metrics.
|
||||
- [system.events](../../operations/system-tables/events.md#system_tables-events) — Contains a number of events that occurred.
|
||||
- [system.metrics](../../operations/system-tables/metrics.md#system_tables-metrics) — Contains instantly calculated metrics.
|
||||
- [system.asynchronous\_metrics](../../operations/system-tables/asynchronous_metrics.md) — Contains periodically calculated metrics.
|
||||
- [system.events](../../operations/system-tables/events.md) — Contains a number of events that occurred.
|
||||
- [system.metrics](../../operations/system-tables/metrics.md) — Contains instantly calculated metrics.
|
||||
- [Monitoring](../../operations/monitoring.md) — Base concepts of ClickHouse monitoring.
|
||||
|
@ -1,25 +1,46 @@
|
||||
# system.mutations {#system_tables-mutations}
|
||||
|
||||
The table contains information about [mutations](../../sql-reference/statements/alter/index.md#alter-mutations) of MergeTree tables and their progress. Each mutation command is represented by a single row. The table has the following columns:
|
||||
The table contains information about [mutations](../../sql-reference/statements/alter/index.md#mutations) of [MergeTree](../../engines/table-engines/mergetree-family/mergetree.md) tables and their progress. Each mutation command is represented by a single row.
|
||||
|
||||
**database**, **table** - The name of the database and table to which the mutation was applied.
|
||||
Columns:
|
||||
|
||||
**mutation\_id** - The ID of the mutation. For replicated tables these IDs correspond to znode names in the `<table_path_in_zookeeper>/mutations/` directory in ZooKeeper. For unreplicated tables the IDs correspond to file names in the data directory of the table.
|
||||
- `database` ([String](../../sql-reference/data-types/string.md)) — The name of the database to which the mutation was applied.
|
||||
|
||||
**command** - The mutation command string (the part of the query after `ALTER TABLE [db.]table`).
|
||||
- `table` ([String](../../sql-reference/data-types/string.md)) — The name of the table to which the mutation was applied.
|
||||
|
||||
**create\_time** - When this mutation command was submitted for execution.
|
||||
- `mutation_id` ([String](../../sql-reference/data-types/string.md)) — The ID of the mutation. For replicated tables these IDs correspond to znode names in the `<table_path_in_zookeeper>/mutations/` directory in ZooKeeper. For non-replicated tables the IDs correspond to file names in the data directory of the table.
|
||||
|
||||
**block\_numbers.partition\_id**, **block\_numbers.number** - A nested column. For mutations of replicated tables, it contains one record for each partition: the partition ID and the block number that was acquired by the mutation (in each partition, only parts that contain blocks with numbers less than the block number acquired by the mutation in that partition will be mutated). In non-replicated tables, block numbers in all partitions form a single sequence. This means that for mutations of non-replicated tables, the column will contain one record with a single block number acquired by the mutation.
|
||||
- `command` ([String](../../sql-reference/data-types/string.md)) — The mutation command string (the part of the query after `ALTER TABLE [db.]table`).
|
||||
|
||||
**parts\_to\_do** - The number of data parts that need to be mutated for the mutation to finish.
|
||||
- `create_time` ([Datetime](../../sql-reference/data-types/datetime.md)) — Date and time when the mutation command was submitted for execution.
|
||||
|
||||
**is\_done** - Is the mutation done? Note that even if `parts_to_do = 0` it is possible that a mutation of a replicated table is not done yet because of a long-running INSERT that will create a new data part that will need to be mutated.
|
||||
- `block_numbers.partition_id` ([Array](../../sql-reference/data-types/array.md)([String](../../sql-reference/data-types/string.md))) — For mutations of replicated tables, the array contains the partitions' IDs (one record for each partition). For mutations of non-replicated tables the array is empty.
|
||||
|
||||
If there were problems with mutating some parts, the following columns contain additional information:
|
||||
- `block_numbers.number` ([Array](../../sql-reference/data-types/array.md)([Int64](../../sql-reference/data-types/int-uint.md))) — For mutations of replicated tables, the array contains one record for each partition, with the block number that was acquired by the mutation. Only parts that contain blocks with numbers less than this number will be mutated in the partition.
|
||||
|
||||
**latest\_failed\_part** - The name of the most recent part that could not be mutated.
|
||||
In non-replicated tables, block numbers in all partitions form a single sequence. This means that for mutations of non-replicated tables, the column will contain one record with a single block number acquired by the mutation.
|
||||
|
||||
**latest\_fail\_time** - The time of the most recent part mutation failure.
|
||||
- `parts_to_do_names` ([Array](../../sql-reference/data-types/array.md)([String](../../sql-reference/data-types/string.md))) — An array of names of data parts that need to be mutated for the mutation to complete.
|
||||
|
||||
**latest\_fail\_reason** - The exception message that caused the most recent part mutation failure.
|
||||
- `parts_to_do` ([Int64](../../sql-reference/data-types/int-uint.md)) — The number of data parts that need to be mutated for the mutation to complete.
|
||||
|
||||
- `is_done` ([UInt8](../../sql-reference/data-types/int-uint.md)) — The flag whether the mutation is done or not. Possible values:
|
||||
- `1` if the mutation is completed,
|
||||
- `0` if the mutation is still in process.
|
||||
|
||||
!!! info "Note"
|
||||
Even if `parts_to_do = 0` it is possible that a mutation of a replicated table is not completed yet because of a long-running `INSERT` query, that will create a new data part needed to be mutated.
|
||||
|
||||
If there were problems with mutating some data parts, the following columns contain additional information:
|
||||
|
||||
- `latest_failed_part` ([String](../../sql-reference/data-types/string.md)) — The name of the most recent part that could not be mutated.
|
||||
|
||||
- `latest_fail_time` ([Datetime](../../sql-reference/data-types/datetime.md)) — The date and time of the most recent part mutation failure.
|
||||
|
||||
- `latest_fail_reason` ([String](../../sql-reference/data-types/string.md)) — The exception message that caused the most recent part mutation failure.
|
||||
|
||||
**See Also**
|
||||
|
||||
- [Mutations](../../sql-reference/statements/alter/index.md#mutations)
|
||||
- [MergeTree](../../engines/table-engines/mergetree-family/mergetree.md) table engine
|
||||
- [ReplicatedMergeTree](../../engines/table-engines/mergetree-family/replication.md) family
|
@ -6,75 +6,151 @@ Each row describes one data part.
|
||||
|
||||
Columns:
|
||||
|
||||
- `partition` (String) – The partition name. To learn what a partition is, see the description of the [ALTER](../../sql-reference/statements/alter/index.md#query_language_queries_alter) query.
|
||||
- `partition` ([String](../../sql-reference/data-types/string.md)) – The partition name. To learn what a partition is, see the description of the [ALTER](../../sql-reference/statements/alter/index.md#query_language_queries_alter) query.
|
||||
|
||||
Formats:
|
||||
|
||||
- `YYYYMM` for automatic partitioning by month.
|
||||
- `any_string` when partitioning manually.
|
||||
|
||||
- `name` (`String`) – Name of the data part.
|
||||
- `name` ([String](../../sql-reference/data-types/string.md)) – Name of the data part.
|
||||
|
||||
- `active` (`UInt8`) – Flag that indicates whether the data part is active. If a data part is active, it’s used in a table. Otherwise, it’s deleted. Inactive data parts remain after merging.
|
||||
- `part_type` ([String](../../sql-reference/data-types/string.md)) — The data part storing format.
|
||||
|
||||
- `marks` (`UInt64`) – The number of marks. To get the approximate number of rows in a data part, multiply `marks` by the index granularity (usually 8192) (this hint doesn’t work for adaptive granularity).
|
||||
Possible Values:
|
||||
|
||||
- `rows` (`UInt64`) – The number of rows.
|
||||
- `Wide` — Each column is stored in a separate file in a filesystem.
|
||||
- `Compact` — All columns are stored in one file in a filesystem.
|
||||
|
||||
- `bytes_on_disk` (`UInt64`) – Total size of all the data part files in bytes.
|
||||
Data storing format is controlled by the `min_bytes_for_wide_part` and `min_rows_for_wide_part` settings of the [MergeTree](../../engines/table-engines/mergetree-family/mergetree.md) table.
|
||||
|
||||
- `data_compressed_bytes` (`UInt64`) – Total size of compressed data in the data part. All the auxiliary files (for example, files with marks) are not included.
|
||||
- `active` ([UInt8](../../sql-reference/data-types/int-uint.md)) – Flag that indicates whether the data part is active. If a data part is active, it’s used in a table. Otherwise, it’s deleted. Inactive data parts remain after merging.
|
||||
|
||||
- `data_uncompressed_bytes` (`UInt64`) – Total size of uncompressed data in the data part. All the auxiliary files (for example, files with marks) are not included.
|
||||
- `marks` ([UInt64](../../sql-reference/data-types/int-uint.md)) – The number of marks. To get the approximate number of rows in a data part, multiply `marks` by the index granularity (usually 8192) (this hint doesn’t work for adaptive granularity).
|
||||
|
||||
- `marks_bytes` (`UInt64`) – The size of the file with marks.
|
||||
- `rows` ([UInt64](../../sql-reference/data-types/int-uint.md)) – The number of rows.
|
||||
|
||||
- `modification_time` (`DateTime`) – The time the directory with the data part was modified. This usually corresponds to the time of data part creation.\|
|
||||
- `bytes_on_disk` ([UInt64](../../sql-reference/data-types/int-uint.md)) – Total size of all the data part files in bytes.
|
||||
|
||||
- `remove_time` (`DateTime`) – The time when the data part became inactive.
|
||||
- `data_compressed_bytes` ([UInt64](../../sql-reference/data-types/int-uint.md)) – Total size of compressed data in the data part. All the auxiliary files (for example, files with marks) are not included.
|
||||
|
||||
- `refcount` (`UInt32`) – The number of places where the data part is used. A value greater than 2 indicates that the data part is used in queries or merges.
|
||||
- `data_uncompressed_bytes` ([UInt64](../../sql-reference/data-types/int-uint.md)) – Total size of uncompressed data in the data part. All the auxiliary files (for example, files with marks) are not included.
|
||||
|
||||
- `min_date` (`Date`) – The minimum value of the date key in the data part.
|
||||
- `marks_bytes` ([UInt64](../../sql-reference/data-types/int-uint.md)) – The size of the file with marks.
|
||||
|
||||
- `max_date` (`Date`) – The maximum value of the date key in the data part.
|
||||
- `modification_time` ([DateTime](../../sql-reference/data-types/datetime.md)) – The time the directory with the data part was modified. This usually corresponds to the time of data part creation.
|
||||
|
||||
- `min_time` (`DateTime`) – The minimum value of the date and time key in the data part.
|
||||
- `remove_time` ([DateTime](../../sql-reference/data-types/datetime.md)) – The time when the data part became inactive.
|
||||
|
||||
- `max_time`(`DateTime`) – The maximum value of the date and time key in the data part.
|
||||
- `refcount` ([UInt32](../../sql-reference/data-types/int-uint.md)) – The number of places where the data part is used. A value greater than 2 indicates that the data part is used in queries or merges.
|
||||
|
||||
- `partition_id` (`String`) – ID of the partition.
|
||||
- `min_date` ([Date](../../sql-reference/data-types/date.md)) – The minimum value of the date key in the data part.
|
||||
|
||||
- `min_block_number` (`UInt64`) – The minimum number of data parts that make up the current part after merging.
|
||||
- `max_date` ([Date](../../sql-reference/data-types/date.md)) – The maximum value of the date key in the data part.
|
||||
|
||||
- `max_block_number` (`UInt64`) – The maximum number of data parts that make up the current part after merging.
|
||||
- `min_time` ([DateTime](../../sql-reference/data-types/datetime.md)) – The minimum value of the date and time key in the data part.
|
||||
|
||||
- `level` (`UInt32`) – Depth of the merge tree. Zero means that the current part was created by insert rather than by merging other parts.
|
||||
- `max_time`([DateTime](../../sql-reference/data-types/datetime.md)) – The maximum value of the date and time key in the data part.
|
||||
|
||||
- `data_version` (`UInt64`) – Number that is used to determine which mutations should be applied to the data part (mutations with a version higher than `data_version`).
|
||||
- `partition_id` ([String](../../sql-reference/data-types/string.md)) – ID of the partition.
|
||||
|
||||
- `primary_key_bytes_in_memory` (`UInt64`) – The amount of memory (in bytes) used by primary key values.
|
||||
- `min_block_number` ([UInt64](../../sql-reference/data-types/int-uint.md)) – The minimum number of data parts that make up the current part after merging.
|
||||
|
||||
- `primary_key_bytes_in_memory_allocated` (`UInt64`) – The amount of memory (in bytes) reserved for primary key values.
|
||||
- `max_block_number` ([UInt64](../../sql-reference/data-types/int-uint.md)) – The maximum number of data parts that make up the current part after merging.
|
||||
|
||||
- `is_frozen` (`UInt8`) – Flag that shows that a partition data backup exists. 1, the backup exists. 0, the backup doesn’t exist. For more details, see [FREEZE PARTITION](../../sql-reference/statements/alter/partition.md#alter_freeze-partition)
|
||||
- `level` ([UInt32](../../sql-reference/data-types/int-uint.md)) – Depth of the merge tree. Zero means that the current part was created by insert rather than by merging other parts.
|
||||
|
||||
- `database` (`String`) – Name of the database.
|
||||
- `data_version` ([UInt64](../../sql-reference/data-types/int-uint.md)) – Number that is used to determine which mutations should be applied to the data part (mutations with a version higher than `data_version`).
|
||||
|
||||
- `table` (`String`) – Name of the table.
|
||||
- `primary_key_bytes_in_memory` ([UInt64](../../sql-reference/data-types/int-uint.md)) – The amount of memory (in bytes) used by primary key values.
|
||||
|
||||
- `engine` (`String`) – Name of the table engine without parameters.
|
||||
- `primary_key_bytes_in_memory_allocated` ([UInt64](../../sql-reference/data-types/int-uint.md)) – The amount of memory (in bytes) reserved for primary key values.
|
||||
|
||||
- `path` (`String`) – Absolute path to the folder with data part files.
|
||||
- `is_frozen` ([UInt8](../../sql-reference/data-types/int-uint.md)) – Flag that shows that a partition data backup exists. 1, the backup exists. 0, the backup doesn’t exist. For more details, see [FREEZE PARTITION](../../sql-reference/statements/alter/partition.md#alter_freeze-partition)
|
||||
|
||||
- `disk` (`String`) – Name of a disk that stores the data part.
|
||||
- `database` ([String](../../sql-reference/data-types/string.md)) – Name of the database.
|
||||
|
||||
- `hash_of_all_files` (`String`) – [sipHash128](../../sql-reference/functions/hash-functions.md#hash_functions-siphash128) of compressed files.
|
||||
- `table` ([String](../../sql-reference/data-types/string.md)) – Name of the table.
|
||||
|
||||
- `hash_of_uncompressed_files` (`String`) – [sipHash128](../../sql-reference/functions/hash-functions.md#hash_functions-siphash128) of uncompressed files (files with marks, index file etc.).
|
||||
- `engine` ([String](../../sql-reference/data-types/string.md)) – Name of the table engine without parameters.
|
||||
|
||||
- `uncompressed_hash_of_compressed_files` (`String`) – [sipHash128](../../sql-reference/functions/hash-functions.md#hash_functions-siphash128) of data in the compressed files as if they were uncompressed.
|
||||
- `path` ([String](../../sql-reference/data-types/string.md)) – Absolute path to the folder with data part files.
|
||||
|
||||
- `bytes` (`UInt64`) – Alias for `bytes_on_disk`.
|
||||
- `disk` ([String](../../sql-reference/data-types/string.md)) – Name of a disk that stores the data part.
|
||||
|
||||
- `marks_size` (`UInt64`) – Alias for `marks_bytes`.
|
||||
- `hash_of_all_files` ([String](../../sql-reference/data-types/string.md)) – [sipHash128](../../sql-reference/functions/hash-functions.md#hash_functions-siphash128) of compressed files.
|
||||
|
||||
- `hash_of_uncompressed_files` ([String](../../sql-reference/data-types/string.md)) – [sipHash128](../../sql-reference/functions/hash-functions.md#hash_functions-siphash128) of uncompressed files (files with marks, index file etc.).
|
||||
|
||||
- `uncompressed_hash_of_compressed_files` ([String](../../sql-reference/data-types/string.md)) – [sipHash128](../../sql-reference/functions/hash-functions.md#hash_functions-siphash128) of data in the compressed files as if they were uncompressed.
|
||||
|
||||
- `delete_ttl_info_min` ([DateTime](../../sql-reference/data-types/datetime.md)) — The minimum value of the date and time key for [TTL DELETE rule](../../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-ttl).
|
||||
|
||||
- `delete_ttl_info_max` ([DateTime](../../sql-reference/data-types/datetime.md)) — The maximum value of the date and time key for [TTL DELETE rule](../../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-ttl).
|
||||
|
||||
- `move_ttl_info.expression` ([Array](../../sql-reference/data-types/array.md)([String](../../sql-reference/data-types/string.md))) — Array of expressions. Each expression defines a [TTL MOVE rule](../../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-ttl).
|
||||
|
||||
!!! note "Warning"
|
||||
The `move_ttl_info.expression` array is kept mostly for backward compatibility, now the simpliest way to check `TTL MOVE` rule is to use the `move_ttl_info.min` and `move_ttl_info.max` fields.
|
||||
|
||||
- `move_ttl_info.min` ([Array](../../sql-reference/data-types/array.md)([DateTime](../../sql-reference/data-types/datetime.md))) — Array of date and time values. Each element describes the minimum key value for a [TTL MOVE rule](../../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-ttl).
|
||||
|
||||
- `move_ttl_info.max` ([Array](../../sql-reference/data-types/array.md)([DateTime](../../sql-reference/data-types/datetime.md))) — Array of date and time values. Each element describes the maximum key value for a [TTL MOVE rule](../../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-ttl).
|
||||
|
||||
- `bytes` ([UInt64](../../sql-reference/data-types/int-uint.md)) – Alias for `bytes_on_disk`.
|
||||
|
||||
- `marks_size` ([UInt64](../../sql-reference/data-types/int-uint.md)) – Alias for `marks_bytes`.
|
||||
|
||||
**Example**
|
||||
|
||||
``` sql
|
||||
SELECT * FROM system.parts LIMIT 1 FORMAT Vertical;
|
||||
```
|
||||
|
||||
``` text
|
||||
Row 1:
|
||||
──────
|
||||
partition: tuple()
|
||||
name: all_1_4_1_6
|
||||
part_type: Wide
|
||||
active: 1
|
||||
marks: 2
|
||||
rows: 6
|
||||
bytes_on_disk: 310
|
||||
data_compressed_bytes: 157
|
||||
data_uncompressed_bytes: 91
|
||||
marks_bytes: 144
|
||||
modification_time: 2020-06-18 13:01:49
|
||||
remove_time: 0000-00-00 00:00:00
|
||||
refcount: 1
|
||||
min_date: 0000-00-00
|
||||
max_date: 0000-00-00
|
||||
min_time: 0000-00-00 00:00:00
|
||||
max_time: 0000-00-00 00:00:00
|
||||
partition_id: all
|
||||
min_block_number: 1
|
||||
max_block_number: 4
|
||||
level: 1
|
||||
data_version: 6
|
||||
primary_key_bytes_in_memory: 8
|
||||
primary_key_bytes_in_memory_allocated: 64
|
||||
is_frozen: 0
|
||||
database: default
|
||||
table: months
|
||||
engine: MergeTree
|
||||
disk_name: default
|
||||
path: /var/lib/clickhouse/data/default/months/all_1_4_1_6/
|
||||
hash_of_all_files: 2d0657a16d9430824d35e327fcbd87bf
|
||||
hash_of_uncompressed_files: 84950cc30ba867c77a408ae21332ba29
|
||||
uncompressed_hash_of_compressed_files: 1ad78f1c6843bbfb99a2c931abe7df7d
|
||||
delete_ttl_info_min: 0000-00-00 00:00:00
|
||||
delete_ttl_info_max: 0000-00-00 00:00:00
|
||||
move_ttl_info.expression: []
|
||||
move_ttl_info.min: []
|
||||
move_ttl_info.max: []
|
||||
```
|
||||
|
||||
**See Also**
|
||||
|
||||
- [MergeTree family](../../engines/table-engines/mergetree-family/mergetree.md)
|
||||
- [TTL for Columns and Tables](../../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-ttl)
|
16
docs/en/operations/system-tables/role-grants.md
Normal file
16
docs/en/operations/system-tables/role-grants.md
Normal file
@ -0,0 +1,16 @@
|
||||
#system.role_grants {#system_tables-role_grants}
|
||||
Contains the role grants for users and roles. To add entries to this table, use `GRANT role TO user`.
|
||||
|
||||
Columns:
|
||||
|
||||
- `user_name` ([Nullable](../../sql-reference/data-types/nullable.md)([String](../../sql-reference/data-types/string.md))) — User name.
|
||||
- `role_name` ([Nullable](../../sql-reference/data-types/nullable.md)([String](../../sql-reference/data-types/string.md))) — Role name.
|
||||
- `granted_role_name` ([String](../../sql-reference/data-types/string.md)) — Name of role granted to the `role_name` role. To grant one role to another one use `GRANT role1 TO role2`.
|
||||
- `granted_role_is_default` ([UInt8](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Flag that shows whether `granted_role` is a default role. Possible values:
|
||||
- 1 — `granted_role` is a default role.
|
||||
- 0 — `granted_role` is not a default role.
|
||||
- `with_admin_option` ([UInt8](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Flag that shows whether `granted_role` is a role with [ADMIN OPTION](../../sql-reference/statements/grant.md#admin-option-privilege) privilege. Possible values:
|
||||
- 1 — The role has `ADMIN OPTION` privilege.
|
||||
- 0 — The role without `ADMIN OPTION` privilege.
|
||||
|
||||
[Original article](https://clickhouse.tech/docs/en/operations/system_tables/role-grants) <!--hide-->
|
10
docs/en/operations/system-tables/roles.md
Normal file
10
docs/en/operations/system-tables/roles.md
Normal file
@ -0,0 +1,10 @@
|
||||
#system.roles {#system_tables-roles}
|
||||
Contains information about configured [roles](../../operations/access-rights.md#role-management).
|
||||
|
||||
Columns:
|
||||
|
||||
- `name` ([String](../../sql-reference/data-types/string.md)) — Role name.
|
||||
- `id` ([UUID](../../sql-reference/data-types/uuid.md)) — Role ID.
|
||||
- `storage` ([String](../../sql-reference/data-types/string.md)) — Path to the storage of roles. Configured in the `access_control_path` parameter.
|
||||
|
||||
[Original article](https://clickhouse.tech/docs/en/operations/system_tables/roles) <!--hide-->
|
@ -16,12 +16,15 @@ By default `clickhouse-local` does not have access to data on the same host, but
|
||||
!!! warning "Warning"
|
||||
It is not recommended to load production server configuration into `clickhouse-local` because data can be damaged in case of human error.
|
||||
|
||||
For temporary data an unique temporary data directory is created by default. If you want to override this behavior the data directory can be explicitly specified with the `-- --path` option.
|
||||
|
||||
## Usage {#usage}
|
||||
|
||||
Basic usage:
|
||||
|
||||
``` bash
|
||||
$ clickhouse-local --structure "table_structure" --input-format "format_of_incoming_data" -q "query"
|
||||
$ clickhouse-local --structure "table_structure" --input-format "format_of_incoming_data" \
|
||||
--query "query"
|
||||
```
|
||||
|
||||
Arguments:
|
||||
@ -40,10 +43,12 @@ Arguments:
|
||||
|
||||
Also there are arguments for each ClickHouse configuration variable which are more commonly used instead of `--config-file`.
|
||||
|
||||
|
||||
## Examples {#examples}
|
||||
|
||||
``` bash
|
||||
$ echo -e "1,2\n3,4" | clickhouse-local -S "a Int64, b Int64" -if "CSV" -q "SELECT * FROM table"
|
||||
$ echo -e "1,2\n3,4" | clickhouse-local --structure "a Int64, b Int64" \
|
||||
--input-format "CSV" --query "SELECT * FROM table"
|
||||
Read 2 rows, 32.00 B in 0.000 sec., 5182 rows/sec., 80.97 KiB/sec.
|
||||
1 2
|
||||
3 4
|
||||
@ -52,16 +57,37 @@ Read 2 rows, 32.00 B in 0.000 sec., 5182 rows/sec., 80.97 KiB/sec.
|
||||
Previous example is the same as:
|
||||
|
||||
``` bash
|
||||
$ echo -e "1,2\n3,4" | clickhouse-local -q "CREATE TABLE table (a Int64, b Int64) ENGINE = File(CSV, stdin); SELECT a, b FROM table; DROP TABLE table"
|
||||
$ echo -e "1,2\n3,4" | clickhouse-local --query "
|
||||
CREATE TABLE table (a Int64, b Int64) ENGINE = File(CSV, stdin);
|
||||
SELECT a, b FROM table;
|
||||
DROP TABLE table"
|
||||
Read 2 rows, 32.00 B in 0.000 sec., 4987 rows/sec., 77.93 KiB/sec.
|
||||
1 2
|
||||
3 4
|
||||
```
|
||||
|
||||
You don't have to use `stdin` or `--file` argument, and can open any number of files using the [`file` table function](../../sql-reference/table-functions/file.md):
|
||||
|
||||
``` bash
|
||||
$ echo 1 | tee 1.tsv
|
||||
1
|
||||
|
||||
$ echo 2 | tee 2.tsv
|
||||
2
|
||||
|
||||
$ clickhouse-local --query "
|
||||
select * from file('1.tsv', TSV, 'a int') t1
|
||||
cross join file('2.tsv', TSV, 'b int') t2"
|
||||
1 2
|
||||
```
|
||||
|
||||
Now let’s output memory user for each Unix user:
|
||||
|
||||
``` bash
|
||||
$ ps aux | tail -n +2 | awk '{ printf("%s\t%s\n", $1, $4) }' | clickhouse-local -S "user String, mem Float64" -q "SELECT user, round(sum(mem), 2) as memTotal FROM table GROUP BY user ORDER BY memTotal DESC FORMAT Pretty"
|
||||
$ ps aux | tail -n +2 | awk '{ printf("%s\t%s\n", $1, $4) }' \
|
||||
| clickhouse-local --structure "user String, mem Float64" \
|
||||
--query "SELECT user, round(sum(mem), 2) as memTotal
|
||||
FROM table GROUP BY user ORDER BY memTotal DESC FORMAT Pretty"
|
||||
```
|
||||
|
||||
``` text
|
||||
|
@ -54,8 +54,6 @@ In this case, ClickHouse can reload the dictionary earlier if the dictionary con
|
||||
|
||||
When upgrading the dictionaries, the ClickHouse server applies different logic depending on the type of [source](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md):
|
||||
|
||||
When upgrading the dictionaries, the ClickHouse server applies different logic depending on the type of [source](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md):
|
||||
|
||||
- For a text file, it checks the time of modification. If the time differs from the previously recorded time, the dictionary is updated.
|
||||
- For MyISAM tables, the time of modification is checked using a `SHOW TABLE STATUS` query.
|
||||
- Dictionaries from other sources are updated every time by default.
|
||||
|
@ -503,3 +503,34 @@ Supported modifiers for Format:
|
||||
| %% | a % sign | % |
|
||||
|
||||
[Original article](https://clickhouse.tech/docs/en/query_language/functions/date_time_functions/) <!--hide-->
|
||||
|
||||
## FROM_UNIXTIME
|
||||
|
||||
When there is only single argument of integer type, it act in the same way as `toDateTime` and return [DateTime](../../sql-reference/data-types/datetime.md).
|
||||
type.
|
||||
|
||||
For example:
|
||||
|
||||
```sql
|
||||
SELECT FROM_UNIXTIME(423543535)
|
||||
```
|
||||
|
||||
```text
|
||||
┌─FROM_UNIXTIME(423543535)─┐
|
||||
│ 1983-06-04 10:58:55 │
|
||||
└──────────────────────────┘
|
||||
```
|
||||
|
||||
When there are two arguments, first is integer or DateTime, second is constant format string, it act in the same way as `formatDateTime` and return `String` type.
|
||||
|
||||
For example:
|
||||
|
||||
```sql
|
||||
SELECT FROM_UNIXTIME(1234334543, '%Y-%m-%d %R:%S') AS DateTime
|
||||
```
|
||||
|
||||
```text
|
||||
┌─DateTime────────────┐
|
||||
│ 2009-02-11 14:42:23 │
|
||||
└─────────────────────┘
|
||||
```
|
||||
|
@ -1350,4 +1350,80 @@ len: 30
|
||||
- [generateRandom](../../sql-reference/table-functions/generate.md#generaterandom)
|
||||
- [randomPrintableASCII](../../sql-reference/functions/other-functions.md#randomascii)
|
||||
|
||||
|
||||
## randomFixedString {#randomfixedstring}
|
||||
|
||||
Generates a binary string of the specified length filled with random bytes (including zero bytes).
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
randomFixedString(length);
|
||||
```
|
||||
|
||||
**Parameters**
|
||||
|
||||
- `length` — String length in bytes. [UInt64](../../sql-reference/data-types/int-uint.md).
|
||||
|
||||
**Returned value(s)**
|
||||
|
||||
- String filled with random bytes.
|
||||
|
||||
Type: [FixedString](../../sql-reference/data-types/fixedstring.md).
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
```sql
|
||||
SELECT randomFixedString(13) as rnd, toTypeName(rnd)
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```text
|
||||
┌─rnd──────┬─toTypeName(randomFixedString(13))─┐
|
||||
│ j▒h㋖HɨZ'▒ │ FixedString(13) │
|
||||
└──────────┴───────────────────────────────────┘
|
||||
|
||||
```
|
||||
|
||||
|
||||
## randomStringUTF8 {#randomstringutf8}
|
||||
|
||||
Generates a random string of a specified length. Result string contains valid UTF-8 code points. The value of code points may be outside of the range of assigned Unicode.
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
randomStringUTF8(length);
|
||||
```
|
||||
|
||||
**Parameters**
|
||||
|
||||
- `length` — Required length of the resulting string in code points. [UInt64](../../sql-reference/data-types/int-uint.md).
|
||||
|
||||
**Returned value(s)**
|
||||
|
||||
- UTF-8 random string.
|
||||
|
||||
Type: [String](../../sql-reference/data-types/string.md).
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
```sql
|
||||
SELECT randomStringUTF8(13)
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```text
|
||||
┌─randomStringUTF8(13)─┐
|
||||
│ 𘤗д兠庇 │
|
||||
└──────────────────────┘
|
||||
|
||||
```
|
||||
|
||||
[Original article](https://clickhouse.tech/docs/en/query_language/functions/other_functions/) <!--hide-->
|
||||
|
@ -111,4 +111,43 @@ SELECT alphaTokens('abca1abc')
|
||||
└─────────────────────────┘
|
||||
```
|
||||
|
||||
## extractAllGroups(text, regexp) {#extractallgroups}
|
||||
|
||||
Extracts all groups from non-overlapping substrings matched by a regular expression.
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
extractAllGroups(text, regexp)
|
||||
```
|
||||
|
||||
**Parameters**
|
||||
|
||||
- `text` — [String](../data-types/string.md) or [FixedString](../data-types/fixedstring.md).
|
||||
- `regexp` — Regular expression. Constant. [String](../data-types/string.md) or [FixedString](../data-types/fixedstring.md).
|
||||
|
||||
**Returned values**
|
||||
|
||||
- If the function finds at least one matching group, it returns `Array(Array(String))` column, clustered by group_id (1 to N, where N is number of capturing groups in `regexp`).
|
||||
|
||||
- If there is no matching group, returns an empty array.
|
||||
|
||||
Type: [Array](../data-types/array.md).
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
SELECT extractAllGroups('abc=123, 8="hkl"', '("[^"]+"|\\w+)=("[^"]+"|\\w+)');
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
┌─extractAllGroups('abc=123, 8="hkl"', '("[^"]+"|\\w+)=("[^"]+"|\\w+)')─┐
|
||||
│ [['abc','123'],['8','"hkl"']] │
|
||||
└───────────────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
[Original article](https://clickhouse.tech/docs/en/query_language/functions/splitting_merging_functions/) <!--hide-->
|
||||
|
@ -1,5 +1,6 @@
|
||||
---
|
||||
toc_priority: 42
|
||||
toc_title: ATTACH
|
||||
---
|
||||
|
||||
# ATTACH Statement {#attach}
|
||||
|
@ -1,5 +1,6 @@
|
||||
---
|
||||
toc_priority: 43
|
||||
toc_title: CHECK
|
||||
---
|
||||
|
||||
# CHECK TABLE Statement {#check-table}
|
||||
|
@ -7,14 +7,14 @@ toc_title: VIEW
|
||||
|
||||
Creates a new view. There are two types of views: normal and materialized.
|
||||
|
||||
## Normal {#normal}
|
||||
|
||||
Syntax:
|
||||
|
||||
``` sql
|
||||
CREATE [MATERIALIZED] VIEW [IF NOT EXISTS] [db.]table_name [TO[db.]name] [ENGINE = engine] [POPULATE] AS SELECT ...
|
||||
CREATE [OR REPLACE] VIEW [IF NOT EXISTS] [db.]table_name [ON CLUSTER] AS SELECT ...
|
||||
```
|
||||
|
||||
## Normal {#normal}
|
||||
|
||||
Normal views don’t store any data, they just perform a read from another table on each access. In other words, a normal view is nothing more than a saved query. When reading from a view, this saved query is used as a subquery in the [FROM](../../../sql-reference/statements/select/from.md) clause.
|
||||
|
||||
As an example, assume you’ve created a view:
|
||||
@ -37,6 +37,11 @@ SELECT a, b, c FROM (SELECT ...)
|
||||
|
||||
## Materialized {#materialized}
|
||||
|
||||
``` sql
|
||||
CREATE MATERIALIZED VIEW [IF NOT EXISTS] [db.]table_name [ON CLUSTER] [TO[db.]name] [ENGINE = engine] [POPULATE] AS SELECT ...
|
||||
```
|
||||
|
||||
|
||||
Materialized views store data transformed by the corresponding [SELECT](../../../sql-reference/statements/select/index.md) query.
|
||||
|
||||
When creating a materialized view without `TO [db].[table]`, you must specify `ENGINE` – the table engine for storing data.
|
||||
|
@ -1,5 +1,6 @@
|
||||
---
|
||||
toc_priority: 44
|
||||
toc_title: DESCRIBE
|
||||
---
|
||||
|
||||
# DESCRIBE TABLE Statement {#misc-describe-table}
|
||||
|
@ -1,5 +1,6 @@
|
||||
---
|
||||
toc_priority: 45
|
||||
toc_title: DETACH
|
||||
---
|
||||
|
||||
# DETACH Statement {#detach}
|
||||
@ -11,6 +12,5 @@ DETACH TABLE [IF EXISTS] [db.]name [ON CLUSTER cluster]
|
||||
```
|
||||
|
||||
This does not delete the table’s data or metadata. On the next server launch, the server will read the metadata and find out about the table again.
|
||||
Similarly, a “detached” table can be re-attached using the `ATTACH` query (with the exception of system tables, which do not have metadata stored for them).
|
||||
|
||||
There is no `DETACH DATABASE` query.
|
||||
Similarly, a “detached” table can be re-attached using the `ATTACH` query (with the exception of system tables, which do not have metadata stored for them).
|
||||
|
@ -1,5 +1,6 @@
|
||||
---
|
||||
toc_priority: 46
|
||||
toc_title: DROP
|
||||
---
|
||||
|
||||
# DROP Statements {#drop}
|
||||
@ -77,3 +78,11 @@ DROP [SETTINGS] PROFILE [IF EXISTS] name [,...] [ON CLUSTER cluster_name]
|
||||
Deletes a settings profile.
|
||||
|
||||
Deleted settings profile is revoked from all the entities where it was assigned.
|
||||
|
||||
## DROP VIEW {#drop-view}
|
||||
|
||||
``` sql
|
||||
DROP VIEW [IF EXISTS] [db.]name [ON CLUSTER cluster]
|
||||
```
|
||||
|
||||
Deletes a view. Views can be deleted by a `DROP TABLE` command as well but `DROP VIEW` checks that `[db.]name` is a view.
|
||||
|
@ -1,5 +1,6 @@
|
||||
---
|
||||
toc_priority: 47
|
||||
toc_title: EXISTS
|
||||
---
|
||||
|
||||
# EXISTS Statement {#exists-statement}
|
||||
|
@ -1,8 +1,9 @@
|
||||
---
|
||||
toc_priority: 48
|
||||
toc_title: KILL
|
||||
---
|
||||
|
||||
## KILL Statements {#kill-statements}
|
||||
# KILL Statements {#kill-statements}
|
||||
|
||||
There are two kinds of kill statements: to kill a query and to kill a mutation
|
||||
|
||||
|
@ -1,5 +1,6 @@
|
||||
---
|
||||
toc_priority: 49
|
||||
toc_title: OPTIMIZE
|
||||
---
|
||||
|
||||
# OPTIMIZE Statement {#misc_operations-optimize}
|
||||
|
@ -1,5 +1,6 @@
|
||||
---
|
||||
toc_priority: 50
|
||||
toc_title: RENAME
|
||||
---
|
||||
|
||||
# RENAME Statement {#misc_operations-rename}
|
||||
|
@ -11,7 +11,7 @@ Syntax:
|
||||
``` sql
|
||||
SELECT <expr_list>
|
||||
FROM <left_table>
|
||||
[GLOBAL] [ANY|ALL|ASOF] [INNER|LEFT|RIGHT|FULL|CROSS] [OUTER|SEMI|ANTI] JOIN <right_table>
|
||||
[GLOBAL] [INNER|LEFT|RIGHT|FULL|CROSS] [OUTER|SEMI|ANTI|ANY|ASOF] JOIN <right_table>
|
||||
(ON <expr_list>)|(USING <column_list>) ...
|
||||
```
|
||||
|
||||
@ -33,17 +33,13 @@ Additional join types available in ClickHouse:
|
||||
|
||||
- `LEFT SEMI JOIN` and `RIGHT SEMI JOIN`, a whitelist on “join keys”, without producing a cartesian product.
|
||||
- `LEFT ANTI JOIN` and `RIGHT ANTI JOIN`, a blacklist on “join keys”, without producing a cartesian product.
|
||||
- `LEFT ANY JOIN`, `RIGHT ANY JOIN` and `INNER ANY JOIN`, partially (for opposite side of `LEFT` and `RIGHT`) or completely (for `INNER` and `FULL`) disables the cartesian product for standard `JOIN` types.
|
||||
- `ASOF JOIN` and `LEFT ASOF JOIN`, joining sequences with a non-exact match. `ASOF JOIN` usage is described below.
|
||||
|
||||
## Strictness {#select-join-strictness}
|
||||
|
||||
Modifies how matching by “join keys” is performed
|
||||
|
||||
- `ALL` — The standard `JOIN` behavior in SQL as described above. The default.
|
||||
- `ANY` — Partially (for opposite side of `LEFT` and `RIGHT`) or completely (for `INNER` and `FULL`) disables the cartesian product for standard `JOIN` types.
|
||||
- `ASOF` — For joining sequences with a non-exact match. `ASOF JOIN` usage is described below.
|
||||
## Setting {#join-settings}
|
||||
|
||||
!!! note "Note"
|
||||
The default strictness value can be overriden using [join\_default\_strictness](../../../operations/settings/settings.md#settings-join_default_strictness) setting.
|
||||
The default join type can be overriden using [join\_default\_strictness](../../../operations/settings/settings.md#settings-join_default_strictness) setting.
|
||||
|
||||
Also the behavior of ClickHouse server for `ANY JOIN` operations depends on the [any_join_distinct_right_table_keys](../../../operations/settings/settings.md#any_join_distinct_right_table_keys) setting.
|
||||
|
||||
|
@ -1,5 +1,6 @@
|
||||
---
|
||||
toc_priority: 52
|
||||
toc_title: SET ROLE
|
||||
---
|
||||
|
||||
# SET ROLE Statement {#set-role-statement}
|
||||
|
@ -1,5 +1,6 @@
|
||||
---
|
||||
toc_priority: 51
|
||||
toc_title: SET
|
||||
---
|
||||
|
||||
# SET Statement {#query-set}
|
||||
|
@ -5,6 +5,8 @@ toc_title: SYSTEM
|
||||
|
||||
# SYSTEM Statements {#query-language-system}
|
||||
|
||||
The list of available `SYSTEM` statements:
|
||||
|
||||
- [RELOAD EMBEDDED DICTIONARIES](#query_language-system-reload-emdedded-dictionaries)
|
||||
- [RELOAD DICTIONARIES](#query_language-system-reload-dictionaries)
|
||||
- [RELOAD DICTIONARY](#query_language-system-reload-dictionary)
|
||||
@ -36,7 +38,7 @@ toc_title: SYSTEM
|
||||
- [RESTART REPLICA](#query_language-system-restart-replica)
|
||||
- [RESTART REPLICAS](#query_language-system-restart-replicas)
|
||||
|
||||
## RELOAD EMBEDDED DICTIONARIES\] {#query_language-system-reload-emdedded-dictionaries}
|
||||
## RELOAD EMBEDDED DICTIONARIES {#query_language-system-reload-emdedded-dictionaries}
|
||||
|
||||
Reload all [Internal dictionaries](../../sql-reference/dictionaries/internal-dicts.md).
|
||||
By default, internal dictionaries are disabled.
|
||||
@ -48,7 +50,7 @@ Reloads all dictionaries that have been successfully loaded before.
|
||||
By default, dictionaries are loaded lazily (see [dictionaries\_lazy\_load](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-dictionaries_lazy_load)), so instead of being loaded automatically at startup, they are initialized on first access through dictGet function or SELECT from tables with ENGINE = Dictionary. The `SYSTEM RELOAD DICTIONARIES` query reloads such dictionaries (LOADED).
|
||||
Always returns `Ok.` regardless of the result of the dictionary update.
|
||||
|
||||
## RELOAD DICTIONARY Dictionary\_name {#query_language-system-reload-dictionary}
|
||||
## RELOAD DICTIONARY {#query_language-system-reload-dictionary}
|
||||
|
||||
Completely reloads a dictionary `dictionary_name`, regardless of the state of the dictionary (LOADED / NOT\_LOADED / FAILED).
|
||||
Always returns `Ok.` regardless of the result of updating the dictionary.
|
||||
@ -115,7 +117,7 @@ Aborts ClickHouse process (like `kill -9 {$ pid_clickhouse-server}`)
|
||||
|
||||
## Managing Distributed Tables {#query-language-system-distributed}
|
||||
|
||||
ClickHouse can manage [distributed](../../engines/table-engines/special/distributed.md) tables. When a user inserts data into these tables, ClickHouse first creates a queue of the data that should be sent to cluster nodes, then asynchronously sends it. You can manage queue processing with the [STOP DISTRIBUTED SENDS](#query_language-system-stop-distributed-sends), [FLUSH DISTRIBUTED](#query_language-system-flush-distributed), and [START DISTRIBUTED SENDS](#query_language-system-start-distributed-sends) queries. You can also synchronously insert distributed data with the `insert_distributed_sync` setting.
|
||||
ClickHouse can manage [distributed](../../engines/table-engines/special/distributed.md) tables. When a user inserts data into these tables, ClickHouse first creates a queue of the data that should be sent to cluster nodes, then asynchronously sends it. You can manage queue processing with the [STOP DISTRIBUTED SENDS](#query_language-system-stop-distributed-sends), [FLUSH DISTRIBUTED](#query_language-system-flush-distributed), and [START DISTRIBUTED SENDS](#query_language-system-start-distributed-sends) queries. You can also synchronously insert distributed data with the [insert_distributed_sync](../../operations/settings/settings.md#insert_distributed_sync) setting.
|
||||
|
||||
### STOP DISTRIBUTED SENDS {#query_language-system-stop-distributed-sends}
|
||||
|
||||
|
@ -1,5 +1,6 @@
|
||||
---
|
||||
toc_priority: 53
|
||||
toc_title: TRUNCATE
|
||||
---
|
||||
|
||||
# TRUNCATE Statement {#truncate-statement}
|
||||
|
@ -1,5 +1,6 @@
|
||||
---
|
||||
toc_priority: 54
|
||||
toc_title: USE
|
||||
---
|
||||
|
||||
# USE Statement {#use}
|
||||
|
@ -23,7 +23,7 @@ Vea la descripción detallada del [CREATE TABLE](../../../sql-reference/statemen
|
||||
|
||||
**Parámetros del motor**
|
||||
|
||||
- `join_strictness` – [ÚNETE a la rigurosidad](../../../sql-reference/statements/select/join.md#select-join-strictness).
|
||||
- `join_strictness` – [ÚNETE a la rigurosidad](../../../sql-reference/statements/select/join.md#select-join-types).
|
||||
- `join_type` – [Tipo de unión](../../../sql-reference/statements/select/join.md#select-join-types).
|
||||
- `k1[, k2, ...]` – Key columns from the `USING` cláusula que el `JOIN` operación se hace con.
|
||||
|
||||
|
@ -12,7 +12,7 @@ Sintaxis:
|
||||
``` sql
|
||||
SELECT <expr_list>
|
||||
FROM <left_table>
|
||||
[GLOBAL] [ANY|ALL|ASOF] [INNER|LEFT|RIGHT|FULL|CROSS] [OUTER|SEMI|ANTI] JOIN <right_table>
|
||||
[GLOBAL] [INNER|LEFT|RIGHT|FULL|CROSS] [OUTER|SEMI|ANTI|ANY|ASOF] JOIN <right_table>
|
||||
(ON <expr_list>)|(USING <column_list>) ...
|
||||
```
|
||||
|
||||
@ -34,14 +34,10 @@ Tipos de unión adicionales disponibles en ClickHouse:
|
||||
|
||||
- `LEFT SEMI JOIN` y `RIGHT SEMI JOIN`, una lista blanca en “join keys”, sin producir un producto cartesiano.
|
||||
- `LEFT ANTI JOIN` y `RIGHT ANTI JOIN`, una lista negra sobre “join keys”, sin producir un producto cartesiano.
|
||||
- `LEFT ANY JOIN`, `RIGHT ANY JOIN` and `INNER ANY JOIN`, partially (for opposite side of `LEFT` and `RIGHT`) or completely (for `INNER` and `FULL`) disables the cartesian product for standard `JOIN` types.
|
||||
- `ASOF JOIN` and `LEFT ASOF JOIN`, joining sequences with a non-exact match. `ASOF JOIN` usage is described below.
|
||||
|
||||
## Rigor {#select-join-strictness}
|
||||
|
||||
Modifica cómo coincidir por “join keys” se realiza
|
||||
|
||||
- `ALL` — The standard `JOIN` comportamiento en SQL como se describió anteriormente. Predeterminado.
|
||||
- `ANY` — Partially (for opposite side of `LEFT` y `RIGHT`) o completamente (para `INNER` y `FULL`) deshabilita el producto cartesiano para `JOIN` tipo.
|
||||
- `ASOF` — For joining sequences with a non-exact match. `ASOF JOIN` el uso se describe a continuación.
|
||||
## Setting {#join-settings}
|
||||
|
||||
!!! note "Nota"
|
||||
El valor de rigor predeterminado se puede anular usando [Por favor, introduzca su dirección de correo electrónico](../../../operations/settings/settings.md#settings-join_default_strictness) configuración.
|
||||
|
@ -23,7 +23,7 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster]
|
||||
|
||||
**پارامترهای موتور**
|
||||
|
||||
- `join_strictness` – [پیوستن به سختی](../../../sql-reference/statements/select/join.md#select-join-strictness).
|
||||
- `join_strictness` – [پیوستن به سختی](../../../sql-reference/statements/select/join.md#select-join-types).
|
||||
- `join_type` – [پیوستن به نوع](../../../sql-reference/statements/select/join.md#select-join-types).
|
||||
- `k1[, k2, ...]` – Key columns from the `USING` بند که `JOIN` عملیات با ساخته شده.
|
||||
|
||||
|
@ -12,7 +12,7 @@ machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd
|
||||
``` sql
|
||||
SELECT <expr_list>
|
||||
FROM <left_table>
|
||||
[GLOBAL] [ANY|ALL|ASOF] [INNER|LEFT|RIGHT|FULL|CROSS] [OUTER|SEMI|ANTI] JOIN <right_table>
|
||||
[GLOBAL] [INNER|LEFT|RIGHT|FULL|CROSS] [OUTER|SEMI|ANTI|ANY|ASOF] JOIN <right_table>
|
||||
(ON <expr_list>)|(USING <column_list>) ...
|
||||
```
|
||||
|
||||
@ -34,15 +34,12 @@ FROM <left_table>
|
||||
|
||||
- `LEFT SEMI JOIN` و `RIGHT SEMI JOIN`, یک لیست سفید در “join keys”, بدون تولید محصول دکارتی.
|
||||
- `LEFT ANTI JOIN` و `RIGHT ANTI JOIN`, لیست سیاه در “join keys”, بدون تولید محصول دکارتی.
|
||||
- `LEFT ANY JOIN`, `RIGHT ANY JOIN` و `INNER ANY JOIN`, partially (for opposite side of `LEFT` and `RIGHT`) or completely (for `INNER` and `FULL`) disables the cartesian product for standard `JOIN` types.
|
||||
- `ASOF JOIN` و `LEFT ASOF JOIN`, joining sequences with a non-exact match. `ASOF JOIN` usage is described below.
|
||||
|
||||
## سختی {#select-join-strictness}
|
||||
## Setting {#join-settings}
|
||||
|
||||
تغییر چگونگی تطبیق توسط “join keys” انجام شده است
|
||||
|
||||
- `ALL` — The standard `JOIN` رفتار در گذاشتن همانطور که در بالا توضیح. به طور پیش فرض.
|
||||
- `ANY` — Partially (for opposite side of `LEFT` و `RIGHT`) یا به طور کامل (برای `INNER` و `FULL`) غیر فعال محصول دکارتی برای استاندارد `JOIN` انواع.
|
||||
- `ASOF` — For joining sequences with a non-exact match. `ASOF JOIN` استفاده در زیر توضیح داده شده است.
|
||||
|
||||
!!! note "یادداشت"
|
||||
مقدار سختگیرانه پیش فرض را می توان با استفاده از لغو [بررسی اجمالی](../../../operations/settings/settings.md#settings-join_default_strictness) تنظیمات.
|
||||
|
||||
|
@ -23,7 +23,7 @@ Voir la description détaillée de la [CREATE TABLE](../../../sql-reference/stat
|
||||
|
||||
**Les Paramètres Du Moteur**
|
||||
|
||||
- `join_strictness` – [ADHÉRER à la rigueur](../../../sql-reference/statements/select/join.md#select-join-strictness).
|
||||
- `join_strictness` – [ADHÉRER à la rigueur](../../../sql-reference/statements/select/join.md#select-join-types).
|
||||
- `join_type` – [Type de jointure](../../../sql-reference/statements/select/join.md#select-join-types).
|
||||
- `k1[, k2, ...]` – Key columns from the `USING` la clause que l' `JOIN` l'opération est faite avec de la.
|
||||
|
||||
|
@ -12,7 +12,7 @@ Syntaxe:
|
||||
``` sql
|
||||
SELECT <expr_list>
|
||||
FROM <left_table>
|
||||
[GLOBAL] [ANY|ALL|ASOF] [INNER|LEFT|RIGHT|FULL|CROSS] [OUTER|SEMI|ANTI] JOIN <right_table>
|
||||
[GLOBAL] [INNER|LEFT|RIGHT|FULL|CROSS] [OUTER|SEMI|ANTI|ANY|ASOF] JOIN <right_table>
|
||||
(ON <expr_list>)|(USING <column_list>) ...
|
||||
```
|
||||
|
||||
@ -34,14 +34,10 @@ Autres types de jointure disponibles dans ClickHouse:
|
||||
|
||||
- `LEFT SEMI JOIN` et `RIGHT SEMI JOIN` une liste blanche sur “join keys”, sans produire un produit cartésien.
|
||||
- `LEFT ANTI JOIN` et `RIGHT ANTI JOIN` une liste noire sur “join keys”, sans produire un produit cartésien.
|
||||
- `LEFT ANY JOIN`, `RIGHT ANY JOIN` et `INNER ANY JOIN`, partially (for opposite side of `LEFT` and `RIGHT`) or completely (for `INNER` and `FULL`) disables the cartesian product for standard `JOIN` types.
|
||||
- `ASOF JOIN` et `LEFT ASOF JOIN`, joining sequences with a non-exact match. `ASOF JOIN` usage is described below.
|
||||
|
||||
## Rigueur {#select-join-strictness}
|
||||
|
||||
Modifie la façon dont la correspondance par “join keys” est effectué
|
||||
|
||||
- `ALL` — The standard `JOIN` comportement en SQL comme décrit ci-dessus. Défaut.
|
||||
- `ANY` — Partially (for opposite side of `LEFT` et `RIGHT`) ou complètement (pour `INNER` et `FULL`) désactive le produit cartésien de la norme `JOIN` type.
|
||||
- `ASOF` — For joining sequences with a non-exact match. `ASOF JOIN` l'utilisation est décrite ci-dessous.
|
||||
## Setting {#join-settings}
|
||||
|
||||
!!! note "Note"
|
||||
La valeur de rigueur par défaut peut être remplacée à l'aide [join\_default\_strictness](../../../operations/settings/settings.md#settings-join_default_strictness) paramètre.
|
||||
|
@ -23,7 +23,7 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster]
|
||||
|
||||
**エンジン変数**
|
||||
|
||||
- `join_strictness` – [厳密に結合する](../../../sql-reference/statements/select/join.md#select-join-strictness).
|
||||
- `join_strictness` – [厳密に結合する](../../../sql-reference/statements/select/join.md#select-join-types).
|
||||
- `join_type` – [結合タイプ](../../../sql-reference/statements/select/join.md#select-join-types).
|
||||
- `k1[, k2, ...]` – Key columns from the `USING` 句は、 `JOIN` 操作はでなされる。
|
||||
|
||||
|
@ -693,6 +693,7 @@ auto s = std::string{"Hello"};
|
||||
## Сообщения об ошибках {#error-messages}
|
||||
|
||||
Сообщения об ошибках -- это часть пользовательского интерфейса программы, предназначенная для того, чтобы позволить пользователю:
|
||||
|
||||
* замечать ошибочные ситуации,
|
||||
* понимать их смысл и причины,
|
||||
* устранять эти ситуации.
|
||||
@ -700,6 +701,7 @@ auto s = std::string{"Hello"};
|
||||
Форма и содержание сообщений об ошибках должны способствовать достижению этих целей.
|
||||
|
||||
Есть два основных вида ошибок:
|
||||
|
||||
* пользовательская или системная ошибка,
|
||||
* внутренняя программная ошибка.
|
||||
|
||||
@ -722,6 +724,7 @@ While processing '(SELECT 2 AS a)'.
|
||||
The dictionary is configured incorrectly.
|
||||
```
|
||||
Из него не понятно:
|
||||
|
||||
- какой словарь?
|
||||
- в чём ошибка конфигурации?
|
||||
|
||||
@ -735,12 +738,14 @@ The dictionary is configured incorrectly.
|
||||
Появление такой ошибки всегда свидетельствует о наличии бага в программе. Пользователь не может исправить такую ошибку самостоятельно, и должен сообщить о ней разработчикам.
|
||||
|
||||
Есть два основных варианта проверки на такие ошибки:
|
||||
|
||||
* Исключение с кодом `LOGICAL_ERROR`. Его можно использовать для важных проверок, которые делаются в том числе в релизной сборке.
|
||||
* `assert`. Такие условия не проверяются в релизной сборке, можно использовать для тяжёлых и опциональных проверок.
|
||||
|
||||
Пример сообщения, у которого должен быть код `LOGICAL_ERROR`:
|
||||
`Block header is inconsistent with Chunk in ICompicatedProcessor::munge(). It is a bug!`
|
||||
По каким признакам можно заметить, что здесь говорится о внутренней программной ошибке?
|
||||
|
||||
* в сообщении упоминаются внутренние сущности из кода,
|
||||
* в сообщении написано it's a bug,
|
||||
* непосредственные действия пользователя не могут исправить эту ошибку. Мы ожидаем, что пользователь зарепортит её как баг, и будем исправлять в коде.
|
||||
@ -752,6 +757,7 @@ The dictionary is configured incorrectly.
|
||||
### Как добавить новое сообщение об ошибке? {#error-messages-add}
|
||||
|
||||
Когда добавляете сообщение об ошибке:
|
||||
|
||||
1. Опишите, что произошло, в пользовательских терминах, а не кусками кода.
|
||||
2. Добавьте максимум контекста (с чем произошло, когда, почему, и т.д.).
|
||||
3. Добавьте типичные причины.
|
||||
|
@ -67,8 +67,9 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster]
|
||||
- `min_merge_bytes_to_use_direct_io` — минимальный объём данных при слиянии, необходимый для прямого (небуферизованного) чтения/записи (direct I/O) на диск. При слиянии частей данных ClickHouse вычисляет общий объём хранения всех данных, подлежащих слиянию. Если общий объём хранения всех данных для чтения превышает `min_bytes_to_use_direct_io` байт, тогда ClickHouse использует флаг `O_DIRECT` при чтении данных с диска. Если `min_merge_bytes_to_use_direct_io = 0`, тогда прямой ввод-вывод отключен. Значение по умолчанию: `10 * 1024 * 1024 * 1024` байтов.
|
||||
- <a name="mergetree_setting-merge_with_ttl_timeout"></a>`merge_with_ttl_timeout` — минимальное время в секундах перед повторным слиянием с TTL. По умолчанию — 86400 (1 день).
|
||||
- `write_final_mark` — включает или отключает запись последней засечки индекса в конце куска данных, указывающей за последний байт. По умолчанию — 1. Не отключайте её.
|
||||
- `merge_max_block_size` — Максимальное количество строк в блоке для операций слияния. Значение по умолчанию: 8192.
|
||||
- `merge_max_block_size` — максимальное количество строк в блоке для операций слияния. Значение по умолчанию: 8192.
|
||||
- `storage_policy` — политика хранения данных. Смотрите [Хранение данных таблицы на нескольких блочных устройствах](#table_engine-mergetree-multiple-volumes).
|
||||
- `min_bytes_for_wide_part`, `min_rows_for_wide_part` — минимальное количество байт/строк в куске данных для хранения в формате `Wide`. Можно задать одну или обе настройки или не задавать ни одной. Подробнее см. в разделе [Хранение данных](#mergetree-data-storage).
|
||||
|
||||
**Пример задания секций**
|
||||
|
||||
@ -123,6 +124,10 @@ MergeTree(EventDate, intHash32(UserID), (CounterID, EventDate, intHash32(UserID)
|
||||
|
||||
Данные, относящиеся к разным партициям, разбиваются на разные куски. В фоновом режиме ClickHouse выполняет слияния (merge) кусков данных для более эффективного хранения. Куски, относящиеся к разным партициям не объединяются. Механизм слияния не гарантирует, что все строки с одинаковым первичным ключом окажутся в одном куске.
|
||||
|
||||
Куски данных могут храниться в формате `Wide` или `Compact`. В формате `Wide` каждый столбец хранится в отдельном файле, а в формате `Compact` все столбцы хранятся в одном файле. Формат `Compact` может быть полезен для повышения производительности при частом добавлении небольших объемов данных.
|
||||
|
||||
Формат хранения определяется настройками движка `min_bytes_for_wide_part` и `min_rows_for_wide_part`. Если число байт или строк в куске данных меньше значения, указанного в соответствующей настройке, тогда этот кусок данных хранится в формате `Compact`. В противном случае кусок данных хранится в формате `Wide`. Если ни одна из настроек не задана, куски данных хранятся в формате `Wide`.
|
||||
|
||||
Каждый кусок данных логически делится на гранулы. Гранула — это минимальный неделимый набор данных, который ClickHouse считывает при выборке данных. ClickHouse не разбивает строки и значения и гранула всегда содержит целое число строк. Первая строка гранулы помечается значением первичного ключа для этой строки (засечка). Для каждого куска данных ClickHouse создаёт файл с засечками (индексный файл). Для каждого столбца, независимо от того, входит он в первичный ключ или нет, ClickHouse также сохраняет эти же засечки. Засечки используются для поиска данных напрямую в файлах столбцов.
|
||||
|
||||
Размер гранул оганичен настройками движка `index_granularity` и `index_granularity_bytes`. Количество строк в грануле лежит в диапазоне `[1, index_granularity]`, в зависимости от размера строк. Размер гранулы может превышать `index_granularity_bytes` в том случае, когда размер единственной строки в грануле превышает значение настройки. В этом случае, размер гранулы равен размеру строки.
|
||||
|
@ -16,7 +16,7 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster]
|
||||
|
||||
**Параметры движка**
|
||||
|
||||
- `join_strictness` – [строгость JOIN](../../../engines/table-engines/special/join.md#select-join-strictness).
|
||||
- `join_strictness` – [строгость JOIN](../../../engines/table-engines/special/join.md#select-join-types).
|
||||
- `join_type` – [тип JOIN](../../../engines/table-engines/special/join.md#select-join-types).
|
||||
- `k1[, k2, ...]` – ключевые столбцы секции `USING` с которыми выполняется операция `JOIN`.
|
||||
|
||||
|
@ -385,12 +385,37 @@ ClickHouse проверит условия `min_part_size` и `min_part_size_rat
|
||||
|
||||
**Дополнительная информация**
|
||||
|
||||
На серверах с небольшим объёмом RAM и файла подкачки может потребоваться настройка `max_server_memory_usage_to_ram_ratio > 1`.
|
||||
Значение по умолчанию для `max_server_memory_usage` рассчитывается как `memory_amount * max_server_memory_usage_to_ram_ratio`.
|
||||
|
||||
**См. также**
|
||||
|
||||
- [max_memory_usage](../settings/query-complexity.md#settings_max_memory_usage)
|
||||
|
||||
## max_server_memory_usage_to_ram_ratio {#max_server_memory_usage_to_ram_ratio}
|
||||
|
||||
Определяет долю оперативной памяти, доступную для использования сервером Clickhouse. Если сервер попытается использовать больше, предоставляемый ему объём памяти будет ограничен до расчётного значения.
|
||||
|
||||
Возможные значения:
|
||||
|
||||
- Положительное число с плавающей запятой.
|
||||
- 0 — сервер Clickhouse может использовать всю оперативную память.
|
||||
|
||||
Значение по умолчанию: `0`.
|
||||
|
||||
**Использование**
|
||||
|
||||
На серверах с небольшим объёмом оперативной памяти и файла подкачки может потребоваться установить настройку `max_server_memory_usage_to_ram_ratio` в значение, большее 1.
|
||||
|
||||
**Пример**
|
||||
|
||||
``` xml
|
||||
<max_server_memory_usage_to_ram_ratio>0.9</max_server_memory_usage_to_ram_ratio>
|
||||
```
|
||||
|
||||
**См. также**
|
||||
|
||||
- [max_server_memory_usage](#max_server_memory_usage)
|
||||
|
||||
## max\_connections {#max-connections}
|
||||
|
||||
Максимальное количество входящих соединений.
|
||||
|
@ -520,6 +520,31 @@ ClickHouse использует этот параметр при чтении д
|
||||
|
||||
Значение по умолчанию: 0.
|
||||
|
||||
## network_compression_method {#network_compression_method}
|
||||
|
||||
Задает метод сжатия данных, используемый при обмене данными между серверами и при обмене между сервером и [clickhouse-client](../../interfaces/cli.md).
|
||||
|
||||
Возможные значения:
|
||||
|
||||
- `LZ4` — устанавливает метод сжатия LZ4.
|
||||
- `ZSTD` — устанавливает метод сжатия ZSTD.
|
||||
|
||||
Значение по умолчанию: `LZ4`.
|
||||
|
||||
См. также:
|
||||
|
||||
- [network_zstd_compression_level](#network_zstd_compression_level)
|
||||
|
||||
## network_zstd_compression_level {#network_zstd_compression_level}
|
||||
|
||||
Регулирует уровень сжатия ZSTD. Используется только тогда, когда [network_compression_method](#network_compression_method) имеет значение `ZSTD`.
|
||||
|
||||
Возможные значения:
|
||||
|
||||
- Положительное целое число от 1 до 15.
|
||||
|
||||
Значение по умолчанию: `1`.
|
||||
|
||||
## log\_queries {#settings-log-queries}
|
||||
|
||||
Установка логирования запроса.
|
||||
@ -700,6 +725,17 @@ log_query_threads=1
|
||||
|
||||
Значение по умолчанию: 50.
|
||||
|
||||
## connection\_pool\_max\_wait\_ms {#connection-pool-max-wait-ms}
|
||||
|
||||
Время ожидания соединения в миллисекундах, когда пул соединений заполнен.
|
||||
|
||||
Возможные значения:
|
||||
|
||||
- Положительное целое число.
|
||||
- 0 — Бесконечный таймаут.
|
||||
|
||||
Значение по умолчанию: 0.
|
||||
|
||||
## connections\_with\_failover\_max\_tries {#connections-with-failover-max-tries}
|
||||
|
||||
Максимальное количество попыток соединения с каждой репликой, для движка таблиц Distributed.
|
||||
@ -711,6 +747,21 @@ log_query_threads=1
|
||||
Считать ли экстремальные значения (минимумы и максимумы по столбцам результата запроса). Принимает 0 или 1. По умолчанию - 0 (выключено).
|
||||
Подробнее смотрите раздел «Экстремальные значения».
|
||||
|
||||
## kafka\_max\_wait\_ms {#kafka-max-wait-ms}
|
||||
|
||||
Время ожидания в миллисекундах для чтения сообщений из [Kafka](../../engines/table-engines/integrations/kafka.md#kafka) перед повторной попыткой.
|
||||
|
||||
Возможные значения:
|
||||
|
||||
- Положительное целое число.
|
||||
- 0 — Бесконечный таймаут.
|
||||
|
||||
Значение по умолчанию: 5000.
|
||||
|
||||
См. также:
|
||||
|
||||
- [Apache Kafka](https://kafka.apache.org/)
|
||||
|
||||
## use\_uncompressed\_cache {#setting-use_uncompressed_cache}
|
||||
|
||||
Использовать ли кэш разжатых блоков. Принимает 0 или 1. По умолчанию - 0 (выключено).
|
||||
@ -730,6 +781,17 @@ log_query_threads=1
|
||||
|
||||
Эта настройка, выставленная в 1, используется в Яндекс.Метрике для реализации suggest-а значений для условий сегментации. После ввода очередного символа, если старый запрос ещё не выполнился, его следует отменить.
|
||||
|
||||
## replace\_running\_query\_max\_wait\_ms {#replace-running-query-max-wait-ms}
|
||||
|
||||
Время ожидания завершения выполнения запроса с тем же `query_id`, когда активирована настройка [replace_running_query](#replace-running-query).
|
||||
|
||||
Возможные значения:
|
||||
|
||||
- Положительное целое число.
|
||||
- 0 — Создание исключения, которое не позволяет выполнить новый запрос, если сервер уже выполняет запрос с тем же `query_id`.
|
||||
|
||||
Значение по умолчанию: 5000.
|
||||
|
||||
## stream\_flush\_interval\_ms {#stream-flush-interval-ms}
|
||||
|
||||
Работает для таблиц со стриммингом в случае тайм-аута, или когда поток генерирует [max\_insert\_block\_size](#settings-max_insert_block_size) строк.
|
||||
@ -1216,6 +1278,34 @@ Default value: 0.
|
||||
|
||||
Значение по умолчанию: 16.
|
||||
|
||||
## insert_distributed_sync {#insert_distributed_sync}
|
||||
|
||||
Включает или отключает режим синхронного добавления данных в распределенные таблицы (таблицы с движком [Distributed](../../engines/table-engines/special/distributed.md#distributed)).
|
||||
|
||||
По умолчанию ClickHouse вставляет данные в распределённую таблицу в асинхронном режиме. Если `insert_distributed_sync=1`, то данные вставляются сихронно, а запрос `INSERT` считается выполненным успешно, когда данные записаны на все шарды (по крайней мере на одну реплику для каждого шарда, если `internal_replication = true`).
|
||||
|
||||
Возможные значения:
|
||||
|
||||
- 0 — Данные добавляются в асинхронном режиме.
|
||||
- 1 — Данные добавляются в синхронном режиме.
|
||||
|
||||
Значение по умолчанию: `0`.
|
||||
|
||||
**См. также**
|
||||
|
||||
- [Движок Distributed](../../engines/table-engines/special/distributed.md#distributed)
|
||||
- [Управление распределёнными таблицами](../../sql-reference/statements/system.md#query-language-system-distributed)
|
||||
## validate\_polygons {#validate_polygons}
|
||||
|
||||
Включает или отключает генерирование исключения в функции [pointInPolygon](../../sql-reference/functions/geo.md#pointinpolygon), если многоугольник самопересекающийся или самокасающийся.
|
||||
|
||||
Допустимые значения:
|
||||
|
||||
- 0 — генерирование исключения отключено. `pointInPolygon` принимает недопустимые многоугольники и возвращает для них, возможно, неверные результаты.
|
||||
- 1 — генерирование исключения включено.
|
||||
|
||||
Значение по умолчанию: 1.
|
||||
|
||||
## always_fetch_merged_part {#always_fetch_merged_part}
|
||||
|
||||
Запрещает слияние данных для таблиц семейства [Replicated*MergeTree](../../engines/table-engines/mergetree-family/replication.md).
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user