Merge branch 'master' of https://github.com/ClickHouse/ClickHouse into bayesAB

This commit is contained in:
philip.han 2020-07-23 22:21:15 +09:00
commit b71fe9dab9
621 changed files with 11715 additions and 3107 deletions

View File

@ -287,7 +287,7 @@ endif ()
include(cmake/dbms_glob_sources.cmake)
if (OS_LINUX)
if (OS_LINUX OR OS_ANDROID)
include(cmake/linux/default_libs.cmake)
elseif (OS_DARWIN)
include(cmake/darwin/default_libs.cmake)

View File

@ -16,5 +16,4 @@ ClickHouse is an open-source column-oriented database management system that all
## Upcoming Events
* [ClickHouse virtual office hours](https://www.eventbrite.com/e/clickhouse-july-virtual-meetup-tickets-111199787558) on July 15, 2020.
* [ClickHouse at ByteDance (in Chinese)](https://mp.weixin.qq.com/s/Em-HjPylO8D7WPui4RREAQ) on July 17, 2020.
* [ClickHouse at ByteDance (in Chinese)](https://mp.weixin.qq.com/s/Em-HjPylO8D7WPui4RREAQ) on July 31, 2020.

View File

@ -30,7 +30,7 @@ struct StringRef
constexpr StringRef(const CharT * data_, size_t size_) : data(reinterpret_cast<const char *>(data_)), size(size_) {}
StringRef(const std::string & s) : data(s.data()), size(s.size()) {}
constexpr StringRef(const std::string_view & s) : data(s.data()), size(s.size()) {}
constexpr explicit StringRef(const std::string_view & s) : data(s.data()), size(s.size()) {}
constexpr StringRef(const char * data_) : StringRef(std::string_view{data_}) {}
constexpr StringRef() = default;

View File

@ -1,6 +1,9 @@
#include <common/getThreadId.h>
#if defined(OS_LINUX)
#if defined(OS_ANDROID)
#include <sys/types.h>
#include <unistd.h>
#elif defined(OS_LINUX)
#include <unistd.h>
#include <syscall.h>
#elif defined(OS_FREEBSD)
@ -16,7 +19,9 @@ uint64_t getThreadId()
{
if (!current_tid)
{
#if defined(OS_LINUX)
#if defined(OS_ANDROID)
current_tid = gettid();
#elif defined(OS_LINUX)
current_tid = syscall(SYS_gettid); /// This call is always successful. - man gettid
#elif defined(OS_FREEBSD)
current_tid = pthread_getthreadid_np();

View File

@ -9,7 +9,6 @@
#include <string.h>
#include <signal.h>
#include <cxxabi.h>
#include <execinfo.h>
#include <unistd.h>
#include <typeinfo>

View File

@ -7,7 +7,7 @@
#
# Sets values of:
# OPENLDAP_FOUND - TRUE if found
# OPENLDAP_INCLUDE_DIR - path to the include directory
# OPENLDAP_INCLUDE_DIRS - paths to the include directories
# OPENLDAP_LIBRARIES - paths to the libldap and liblber libraries
# OPENLDAP_LDAP_LIBRARY - paths to the libldap library
# OPENLDAP_LBER_LIBRARY - paths to the liblber library
@ -28,11 +28,11 @@ if(OPENLDAP_USE_REENTRANT_LIBS)
endif()
if(OPENLDAP_ROOT_DIR)
find_path(OPENLDAP_INCLUDE_DIR NAMES "ldap.h" "lber.h" PATHS "${OPENLDAP_ROOT_DIR}" PATH_SUFFIXES "include" NO_DEFAULT_PATH)
find_path(OPENLDAP_INCLUDE_DIRS NAMES "ldap.h" "lber.h" PATHS "${OPENLDAP_ROOT_DIR}" PATH_SUFFIXES "include" NO_DEFAULT_PATH)
find_library(OPENLDAP_LDAP_LIBRARY NAMES "ldap${_r_suffix}" PATHS "${OPENLDAP_ROOT_DIR}" PATH_SUFFIXES "lib" NO_DEFAULT_PATH)
find_library(OPENLDAP_LBER_LIBRARY NAMES "lber" PATHS "${OPENLDAP_ROOT_DIR}" PATH_SUFFIXES "lib" NO_DEFAULT_PATH)
else()
find_path(OPENLDAP_INCLUDE_DIR NAMES "ldap.h" "lber.h")
find_path(OPENLDAP_INCLUDE_DIRS NAMES "ldap.h" "lber.h")
find_library(OPENLDAP_LDAP_LIBRARY NAMES "ldap${_r_suffix}")
find_library(OPENLDAP_LBER_LIBRARY NAMES "lber")
endif()
@ -44,10 +44,10 @@ set(OPENLDAP_LIBRARIES ${OPENLDAP_LDAP_LIBRARY} ${OPENLDAP_LBER_LIBRARY})
include(FindPackageHandleStandardArgs)
find_package_handle_standard_args(
OpenLDAP DEFAULT_MSG
OPENLDAP_INCLUDE_DIR OPENLDAP_LDAP_LIBRARY OPENLDAP_LBER_LIBRARY
OPENLDAP_INCLUDE_DIRS OPENLDAP_LDAP_LIBRARY OPENLDAP_LBER_LIBRARY
)
mark_as_advanced(OPENLDAP_INCLUDE_DIR OPENLDAP_LIBRARIES OPENLDAP_LDAP_LIBRARY OPENLDAP_LBER_LIBRARY)
mark_as_advanced(OPENLDAP_INCLUDE_DIRS OPENLDAP_LIBRARIES OPENLDAP_LDAP_LIBRARY OPENLDAP_LBER_LIBRARY)
if(OPENLDAP_USE_STATIC_LIBS)
set(CMAKE_FIND_LIBRARY_SUFFIXES ${_orig_CMAKE_FIND_LIBRARY_SUFFIXES})

View File

@ -1,4 +1,5 @@
SET(ENABLE_AMQPCPP ${ENABLE_LIBRARIES})
option(ENABLE_AMQPCPP "Enalbe AMQP-CPP" ${ENABLE_LIBRARIES})
if (NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/AMQP-CPP/CMakeLists.txt")
message (WARNING "submodule contrib/AMQP-CPP is missing. to fix try run: \n git submodule update --init --recursive")
set (ENABLE_AMQPCPP 0)

View File

@ -1,3 +1,7 @@
option (ENABLE_GTEST_LIBRARY "Enable gtest library" ${ENABLE_LIBRARIES})
if (ENABLE_GTEST_LIBRARY)
option (USE_INTERNAL_GTEST_LIBRARY "Set to FALSE to use system Google Test instead of bundled" ${NOT_UNBUNDLED})
if (NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/googletest/googletest/CMakeLists.txt")
@ -28,4 +32,6 @@ if((GTEST_INCLUDE_DIRS AND GTEST_BOTH_LIBRARIES) OR GTEST_SRC_DIR)
set(USE_GTEST 1)
endif()
endif()
message (STATUS "Using gtest=${USE_GTEST}: ${GTEST_INCLUDE_DIRS} : ${GTEST_BOTH_LIBRARIES} : ${GTEST_SRC_DIR}")

View File

@ -16,11 +16,16 @@ if (ENABLE_LDAP)
set (OPENLDAP_USE_REENTRANT_LIBS 1)
if (NOT USE_INTERNAL_LDAP_LIBRARY)
if (APPLE AND NOT OPENLDAP_ROOT_DIR)
set (OPENLDAP_ROOT_DIR "/usr/local/opt/openldap")
endif ()
if (OPENLDAP_USE_STATIC_LIBS)
message (WARNING "Unable to use external static OpenLDAP libraries, falling back to the bundled version.")
set (USE_INTERNAL_LDAP_LIBRARY 1)
else ()
if (APPLE AND NOT OPENLDAP_ROOT_DIR)
set (OPENLDAP_ROOT_DIR "/usr/local/opt/openldap")
endif ()
find_package (OpenLDAP)
find_package (OpenLDAP)
endif ()
endif ()
if (NOT OPENLDAP_FOUND AND NOT MISSING_INTERNAL_LDAP_LIBRARY)
@ -54,7 +59,10 @@ if (ENABLE_LDAP)
else ()
set (USE_INTERNAL_LDAP_LIBRARY 1)
set (OPENLDAP_ROOT_DIR "${ClickHouse_SOURCE_DIR}/contrib/openldap")
set (OPENLDAP_INCLUDE_DIR "${ClickHouse_SOURCE_DIR}/contrib/openldap/include")
set (OPENLDAP_INCLUDE_DIRS
"${ClickHouse_SOURCE_DIR}/contrib/openldap-cmake/${_system_name}_${_system_processor}/include"
"${ClickHouse_SOURCE_DIR}/contrib/openldap/include"
)
# Below, 'ldap'/'ldap_r' and 'lber' will be resolved to
# the targets defined in contrib/openldap-cmake/CMakeLists.txt
if (OPENLDAP_USE_REENTRANT_LIBS)
@ -73,4 +81,4 @@ if (ENABLE_LDAP)
endif ()
endif ()
message (STATUS "Using ldap=${USE_LDAP}: ${OPENLDAP_INCLUDE_DIR} : ${OPENLDAP_LIBRARIES}")
message (STATUS "Using ldap=${USE_LDAP}: ${OPENLDAP_INCLUDE_DIRS} : ${OPENLDAP_LIBRARIES}")

View File

@ -1,3 +1,7 @@
option(ENABLE_GSASL_LIBRARY "Enable gsasl library" ${ENABLE_LIBRARIES})
if (ENABLE_GSASL_LIBRARY)
option (USE_INTERNAL_LIBGSASL_LIBRARY "Set to FALSE to use system libgsasl library instead of bundled" ${NOT_UNBUNDLED})
if (NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/libgsasl/src/gsasl.h")
@ -24,4 +28,6 @@ if(LIBGSASL_LIBRARY AND LIBGSASL_INCLUDE_DIR)
set (USE_LIBGSASL 1)
endif()
endif()
message (STATUS "Using libgsasl=${USE_LIBGSASL}: ${LIBGSASL_INCLUDE_DIR} : ${LIBGSASL_LIBRARY}")

View File

@ -1,3 +1,7 @@
option (ENABLE_MSGPACK "Enable msgpack library" ${ENABLE_LIBRARIES})
if (ENABLE_MSGPACK)
option (USE_INTERNAL_MSGPACK_LIBRARY "Set to FALSE to use system msgpack library instead of bundled" ${NOT_UNBUNDLED})
if (USE_INTERNAL_MSGPACK_LIBRARY)
@ -14,4 +18,10 @@ else()
find_path(MSGPACK_INCLUDE_DIR NAMES msgpack.hpp PATHS ${MSGPACK_INCLUDE_PATHS})
endif()
message(STATUS "Using msgpack: ${MSGPACK_INCLUDE_DIR}")
if (MSGPACK_INCLUDE_DIR)
set(USE_MSGPACK 1)
endif()
endif()
message(STATUS "Using msgpack=${USE_MSGPACK}: ${MSGPACK_INCLUDE_DIR}")

View File

@ -1,17 +1,8 @@
if (NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/simdjson/include/simdjson/jsonparser.h")
if (NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/simdjson/include/simdjson.h")
message (WARNING "submodule contrib/simdjson is missing. to fix try run: \n git submodule update --init --recursive")
return()
endif ()
if (NOT HAVE_SSE42)
message (WARNING "submodule contrib/simdjson requires support of SSE4.2 instructions")
return()
elseif (NOT HAVE_PCLMULQDQ)
message (WARNING "submodule contrib/simdjson requires support of PCLMULQDQ instructions")
return()
endif ()
option (USE_SIMDJSON "Use simdjson" ON)
set (SIMDJSON_LIBRARY "simdjson")
message(STATUS "Using simdjson=${USE_SIMDJSON}: ${SIMDJSON_LIBRARY}")
message(STATUS "Using simdjson=${USE_SIMDJSON}")

View File

@ -11,7 +11,12 @@ else ()
set (BUILTINS_LIBRARY "-lgcc")
endif ()
if (OS_ANDROID)
# pthread and rt are included in libc
set (DEFAULT_LIBS "${DEFAULT_LIBS} ${BUILTINS_LIBRARY} ${COVERAGE_OPTION} -lc -lm -ldl")
else ()
set (DEFAULT_LIBS "${DEFAULT_LIBS} ${BUILTINS_LIBRARY} ${COVERAGE_OPTION} -lc -lm -lrt -lpthread -ldl")
endif ()
message(STATUS "Default libraries: ${DEFAULT_LIBS}")
@ -35,7 +40,11 @@ add_library(global-libs INTERFACE)
set(THREADS_PREFER_PTHREAD_FLAG ON)
find_package(Threads REQUIRED)
add_subdirectory(base/glibc-compatibility)
if (NOT OS_ANDROID)
# Our compatibility layer doesn't build under Android, many errors in musl.
add_subdirectory(base/glibc-compatibility)
endif ()
include (cmake/find/unwind.cmake)
include (cmake/find/cxx.cmake)

View File

@ -1,6 +1,11 @@
if (CMAKE_SYSTEM_NAME MATCHES "Linux")
set (OS_LINUX 1)
add_definitions(-D OS_LINUX)
elseif (CMAKE_SYSTEM_NAME MATCHES "Android")
# This is a toy configuration and not in CI, so expect it to be broken.
# Use cmake flags such as: -DCMAKE_TOOLCHAIN_FILE=~/ch2/android-ndk-r21d/build/cmake/android.toolchain.cmake -DANDROID_ABI=arm64-v8a -DANDROID_PLATFORM=28
set (OS_ANDROID 1)
add_definitions(-D OS_ANDROID)
elseif (CMAKE_SYSTEM_NAME MATCHES "FreeBSD")
set (OS_FREEBSD 1)
add_definitions(-D OS_FREEBSD)
@ -17,7 +22,7 @@ if (CMAKE_CROSSCOMPILING)
set (ENABLE_PARQUET OFF CACHE INTERNAL "")
set (ENABLE_ICU OFF CACHE INTERNAL "")
set (ENABLE_FASTOPS OFF CACHE INTERNAL "")
elseif (OS_LINUX)
elseif (OS_LINUX OR OS_ANDROID)
if (ARCH_AARCH64)
# FIXME: broken dependencies
set (ENABLE_PROTOBUF OFF CACHE INTERNAL "")

View File

@ -22,7 +22,7 @@ elseif (COMPILER_CLANG)
if (CMAKE_CXX_COMPILER_VERSION VERSION_LESS ${APPLE_CLANG_MINIMUM_VERSION})
message (FATAL_ERROR "AppleClang compiler version must be at least ${APPLE_CLANG_MINIMUM_VERSION} (Xcode ${XCODE_MINIMUM_VERSION}).")
elseif (CMAKE_CXX_COMPILER_VERSION VERSION_LESS 11.0.0)
# char8_t is available staring (upstream vanilla) Clang 7, but prior to Clang 8,
# char8_t is available starting (upstream vanilla) Clang 7, but prior to Clang 8,
# it is not enabled by -std=c++20 and can be enabled with an explicit -fchar8_t.
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fchar8_t")
set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fchar8_t")

View File

@ -102,7 +102,7 @@ if (USE_INTERNAL_SSL_LIBRARY)
add_library(OpenSSL::SSL ALIAS ${OPENSSL_SSL_LIBRARY})
endif ()
if (ENABLE_LDAP AND USE_INTERNAL_LDAP_LIBRARY)
if (USE_INTERNAL_LDAP_LIBRARY)
add_subdirectory (openldap-cmake)
endif ()

2
contrib/simdjson vendored

@ -1 +1 @@
Subproject commit 560f0742cc0895d00d78359dbdeb82064a24adb8
Subproject commit 1e4aa116e5a39e4ba23b9a93e6c7f048c5105b20

View File

@ -1,14 +1,6 @@
set(SIMDJSON_INCLUDE_DIR "${ClickHouse_SOURCE_DIR}/contrib/simdjson/include")
set(SIMDJSON_SRC_DIR "${SIMDJSON_INCLUDE_DIR}/../src")
set(SIMDJSON_SRC
${SIMDJSON_SRC_DIR}/document.cpp
${SIMDJSON_SRC_DIR}/error.cpp
${SIMDJSON_SRC_DIR}/implementation.cpp
${SIMDJSON_SRC_DIR}/jsonioutil.cpp
${SIMDJSON_SRC_DIR}/jsonminifier.cpp
${SIMDJSON_SRC_DIR}/stage1_find_marks.cpp
${SIMDJSON_SRC_DIR}/stage2_build_tape.cpp
)
set(SIMDJSON_SRC_DIR "${ClickHouse_SOURCE_DIR}/contrib/simdjson/src")
set(SIMDJSON_SRC ${SIMDJSON_SRC_DIR}/simdjson.cpp)
add_library(${SIMDJSON_LIBRARY} ${SIMDJSON_SRC})
target_include_directories(${SIMDJSON_LIBRARY} SYSTEM PUBLIC "${SIMDJSON_INCLUDE_DIR}" PRIVATE "${SIMDJSON_SRC_DIR}")
add_library(simdjson ${SIMDJSON_SRC})
target_include_directories(simdjson SYSTEM PUBLIC "${SIMDJSON_INCLUDE_DIR}" PRIVATE "${SIMDJSON_SRC_DIR}")

View File

@ -88,6 +88,10 @@
"name": "yandex/clickhouse-testflows-runner",
"dependent": []
},
"docker/test/fasttest": {
"name": "yandex/clickhouse-fasttest",
"dependent": []
},
"docker/test/integration/s3_proxy": {
"name": "yandex/clickhouse-s3-proxy",
"dependent": []
@ -95,5 +99,9 @@
"docker/test/integration/resolver": {
"name": "yandex/clickhouse-python-bottle",
"dependent": []
},
"docker/test/integration/helper_container": {
"name": "yandex/clickhouse-integration-helper",
"dependent": []
}
}

View File

@ -0,0 +1,65 @@
# docker build -t yandex/clickhouse-fasttest .
FROM ubuntu:19.10
ARG odbc_driver_url="https://github.com/ClickHouse/clickhouse-odbc/releases/download/v1.1.4.20200302/clickhouse-odbc-1.1.4-Linux.tar.gz"
ENV COMMIT_SHA=''
ENV PULL_REQUEST_NUMBER=''
RUN apt-get --allow-unauthenticated update -y && apt-get install --yes wget gnupg
RUN wget -O - https://apt.llvm.org/llvm-snapshot.gpg.key | apt-key add -
RUN echo "deb [trusted=yes] http://apt.llvm.org/eoan/ llvm-toolchain-eoan-10 main" >> /etc/apt/sources.list
RUN apt-get --allow-unauthenticated update -y \
&& env DEBIAN_FRONTEND=noninteractive \
apt-get --allow-unauthenticated install --yes --no-install-recommends \
bash \
fakeroot \
ccache \
software-properties-common \
apt-transport-https \
ca-certificates \
wget \
bash \
fakeroot \
cmake \
ccache \
llvm-10 \
clang-10 \
lld-10 \
clang-tidy-10 \
ninja-build \
gperf \
git \
tzdata \
gperf \
rename \
build-essential \
expect \
python \
python-lxml \
python-termcolor \
python-requests \
unixodbc \
qemu-user-static \
sudo \
moreutils \
curl \
brotli
RUN mkdir -p /tmp/clickhouse-odbc-tmp \
&& wget --quiet -O - ${odbc_driver_url} | tar --strip-components=1 -xz -C /tmp/clickhouse-odbc-tmp \
&& cp /tmp/clickhouse-odbc-tmp/lib64/*.so /usr/local/lib/ \
&& odbcinst -i -d -f /tmp/clickhouse-odbc-tmp/share/doc/clickhouse-odbc/config/odbcinst.ini.sample \
&& odbcinst -i -s -l -f /tmp/clickhouse-odbc-tmp/share/doc/clickhouse-odbc/config/odbc.ini.sample \
&& rm -rf /tmp/clickhouse-odbc-tmp
# This symlink required by gcc to find lld compiler
RUN ln -s /usr/bin/lld-10 /usr/bin/ld.lld
ENV TZ=Europe/Moscow
RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone
COPY run.sh /
CMD ["/bin/bash", "/run.sh"]

97
docker/test/fasttest/run.sh Executable file
View File

@ -0,0 +1,97 @@
#!/bin/bash
set -x -e
ls -la
git clone https://github.com/ClickHouse/ClickHouse.git | ts '%Y-%m-%d %H:%M:%S' | tee /test_output/clone_log.txt
cd ClickHouse
CLICKHOUSE_DIR=`pwd`
if [ "$PULL_REQUEST_NUMBER" != "0" ]; then
if git fetch origin "+refs/pull/$PULL_REQUEST_NUMBER/merge"; then
git checkout FETCH_HEAD
echo 'Clonned merge head'
else
git fetch
git checkout $COMMIT_SHA
echo 'Checked out to commit'
fi
else
if [ "$COMMIT_SHA" != "" ]; then
git checkout $COMMIT_SHA
fi
fi
SUBMODULES_TO_UPDATE="contrib/boost contrib/zlib-ng contrib/libxml2 contrib/poco contrib/libunwind contrib/ryu contrib/fmtlib contrib/base64 contrib/cctz contrib/libcpuid contrib/double-conversion contrib/libcxx contrib/libcxxabi contrib/libc-headers contrib/lz4 contrib/zstd contrib/fastops contrib/rapidjson contrib/re2 contrib/sparsehash-c11"
git submodule update --init --recursive $SUBMODULES_TO_UPDATE | ts '%Y-%m-%d %H:%M:%S' | tee /test_output/submodule_log.txt
export CMAKE_LIBS_CONFIG="-DENABLE_LIBRARIES=0 -DENABLE_TESTS=0 -DENABLE_UTILS=0 -DENABLE_EMBEDDED_COMPILER=0 -DENABLE_THINLTO=0 -DUSE_UNWIND=1"
export CCACHE_DIR=/ccache
export CCACHE_BASEDIR=/ClickHouse
export CCACHE_NOHASHDIR=true
export CCACHE_COMPILERCHECK=content
export CCACHE_MAXSIZE=15G
ccache --show-stats ||:
ccache --zero-stats ||:
mkdir build
cd build
CLICKHOUSE_BUILD_DIR=`pwd`
cmake .. -DCMAKE_INSTALL_PREFIX=/usr -DCMAKE_CXX_COMPILER=clang++-10 -DCMAKE_C_COMPILER=clang-10 $CMAKE_LIBS_CONFIG | ts '%Y-%m-%d %H:%M:%S' | tee /test_output/cmake_log.txt
ninja clickhouse-bundle | ts '%Y-%m-%d %H:%M:%S' | tee /test_output/build_log.txt
ninja install | ts '%Y-%m-%d %H:%M:%S' | tee /test_output/install_log.txt
ccache --show-stats ||:
mkdir -p /etc/clickhouse-server
mkdir -p /etc/clickhouse-client
mkdir -p /etc/clickhouse-server/config.d
mkdir -p /etc/clickhouse-server/users.d
mkdir -p /var/log/clickhouse-server
cp $CLICKHOUSE_DIR/programs/server/config.xml /etc/clickhouse-server/
cp $CLICKHOUSE_DIR/programs/server/users.xml /etc/clickhouse-server/
mkdir -p /etc/clickhouse-server/dict_examples
ln -s /usr/share/clickhouse-test/config/ints_dictionary.xml /etc/clickhouse-server/dict_examples/
ln -s /usr/share/clickhouse-test/config/strings_dictionary.xml /etc/clickhouse-server/dict_examples/
ln -s /usr/share/clickhouse-test/config/decimals_dictionary.xml /etc/clickhouse-server/dict_examples/
ln -s /usr/share/clickhouse-test/config/zookeeper.xml /etc/clickhouse-server/config.d/
ln -s /usr/share/clickhouse-test/config/listen.xml /etc/clickhouse-server/config.d/
ln -s /usr/share/clickhouse-test/config/part_log.xml /etc/clickhouse-server/config.d/
ln -s /usr/share/clickhouse-test/config/text_log.xml /etc/clickhouse-server/config.d/
ln -s /usr/share/clickhouse-test/config/metric_log.xml /etc/clickhouse-server/config.d/
ln -s /usr/share/clickhouse-test/config/query_masking_rules.xml /etc/clickhouse-server/config.d/
ln -s /usr/share/clickhouse-test/config/log_queries.xml /etc/clickhouse-server/users.d/
ln -s /usr/share/clickhouse-test/config/readonly.xml /etc/clickhouse-server/users.d/
ln -s /usr/share/clickhouse-test/config/access_management.xml /etc/clickhouse-server/users.d/
ln -s /usr/share/clickhouse-test/config/ints_dictionary.xml /etc/clickhouse-server/
ln -s /usr/share/clickhouse-test/config/strings_dictionary.xml /etc/clickhouse-server/
ln -s /usr/share/clickhouse-test/config/decimals_dictionary.xml /etc/clickhouse-server/
ln -s /usr/share/clickhouse-test/config/macros.xml /etc/clickhouse-server/config.d/
ln -s /usr/share/clickhouse-test/config/disks.xml /etc/clickhouse-server/config.d/
#ln -s /usr/share/clickhouse-test/config/secure_ports.xml /etc/clickhouse-server/config.d/
ln -s /usr/share/clickhouse-test/config/clusters.xml /etc/clickhouse-server/config.d/
ln -s /usr/share/clickhouse-test/config/graphite.xml /etc/clickhouse-server/config.d/
ln -s /usr/share/clickhouse-test/config/server.key /etc/clickhouse-server/
ln -s /usr/share/clickhouse-test/config/server.crt /etc/clickhouse-server/
ln -s /usr/share/clickhouse-test/config/dhparam.pem /etc/clickhouse-server/
ln -sf /usr/share/clickhouse-test/config/client_config.xml /etc/clickhouse-client/config.xml
clickhouse-server --config /etc/clickhouse-server/config.xml --daemon
until clickhouse-client --query "SELECT 1"
do
sleep 0.1
done
TESTS_TO_SKIP="parquet avro h3 odbc mysql sha256 _orc_ arrow 01098_temporary_and_external_tables 01083_expressions_in_engine_arguments hdfs 00911_tautological_compare protobuf capnproto java_hash hashing secure 00490_special_line_separators_and_characters_outside_of_bmp 00436_convert_charset 00105_shard_collations 01354_order_by_tuple_collate_const 01292_create_user 01098_msgpack_format 00929_multi_match_edit_distance 00926_multimatch 00834_cancel_http_readonly_queries_on_client_close brotli parallel_alter 00302_http_compression 00417_kill_query 01294_lazy_database_concurrent 01193_metadata_loading base64 01031_mutations_interpreter_and_context json client 01305_replica_create_drop_zookeeper 01092_memory_profiler 01355_ilike 01281_unsucceeded_insert_select_queries_counter live_view limit_memory memory_limit memory_leak 00110_external_sort 00682_empty_parts_merge 00701_rollup 00109_shard_totals_after_having ddl_dictionaries 01251_dict_is_in_infinite_loop 01259_dictionary_custom_settings_ddl 01268_dictionary_direct_layout 01280_ssd_complex_key_dictionary 00652_replicated_mutations_zookeeper"
clickhouse-test -j 4 --no-long --testname --shard --zookeeper --skip $TESTS_TO_SKIP 2>&1 | ts '%Y-%m-%d %H:%M:%S' | tee /test_output/test_log.txt
mv /var/log/clickhouse-server/* /test_output

View File

@ -37,6 +37,8 @@ function download
wget -nv -nd -c "https://clickhouse-builds.s3.yandex.net/$PR_TO_TEST/$SHA_TO_TEST/clickhouse_build_check/clang-10_debug_none_bundled_unsplitted_disable_False_binary/clickhouse"
chmod +x clickhouse
ln -s ./clickhouse ./clickhouse-server
ln -s ./clickhouse ./clickhouse-client
}
function configure
@ -45,7 +47,8 @@ function configure
mkdir db ||:
cp -av "$repo_dir"/programs/server/config* db
cp -av "$repo_dir"/programs/server/user* db
cp -av "$repo_dir"/tests/config db/config.d
# TODO figure out which ones are needed
cp -av "$repo_dir"/tests/config/listen.xml db/config.d
cp -av "$script_dir"/query-fuzzer-tweaks-users.xml db/users.d
}
@ -54,31 +57,55 @@ function watchdog
sleep 3600
echo "Fuzzing run has timed out"
./clickhouse client --query "select elapsed, query from system.processes" ||:
killall -9 clickhouse clickhouse-server clickhouse-client ||:
killall clickhouse-client ||:
for x in {1..10}
do
if ! pgrep -f clickhouse-client
then
break
fi
sleep 1
done
killall -9 clickhouse-client ||:
}
function fuzz
{
./clickhouse server --config-file db/config.xml -- --path db 2>&1 | tail -100000 > server.log &
./clickhouse-server --config-file db/config.xml -- --path db 2>&1 | tail -100000 > server.log &
server_pid=$!
kill -0 $server_pid
while ! ./clickhouse client --query "select 1" && kill -0 $server_pid ; do echo . ; sleep 1 ; done
./clickhouse client --query "select 1"
while ! ./clickhouse-client --query "select 1" && kill -0 $server_pid ; do echo . ; sleep 1 ; done
./clickhouse-client --query "select 1"
kill -0 $server_pid
echo Server started
fuzzer_exit_code=0
./clickhouse client --query-fuzzer-runs=1000 \
./clickhouse-client --query-fuzzer-runs=1000 \
< <(for f in $(ls ch/tests/queries/0_stateless/*.sql | sort -R); do cat "$f"; echo ';'; done) \
> >(tail -100000 > fuzzer.log) \
2>&1 \
|| fuzzer_exit_code=$?
echo "Fuzzer exit code is $fuzzer_exit_code"
./clickhouse client --query "select elapsed, query from system.processes" ||:
kill -9 $server_pid ||:
return $fuzzer_exit_code
./clickhouse-client --query "select elapsed, query from system.processes" ||:
killall clickhouse-server ||:
for x in {1..10}
do
if ! pgrep -f clickhouse-server
then
break
fi
sleep 1
done
killall -9 clickhouse-server ||:
if [ "$fuzzer_exit_code" == "143" ]
then
# Killed by watchdog, meaning, no errors.
fuzzer_exit_code=0
fi
}
case "$stage" in
@ -106,11 +133,19 @@ case "$stage" in
time configure
;&
"fuzz")
# Start a watchdog that should kill the fuzzer on timeout.
# The shell won't kill the child sleep when we kill it, so we have to put it
# into a separate process group so that we can kill them all.
set -m
watchdog &
watchdog_pid=$!
set +m
# Check that the watchdog has started
kill -0 $watchdog_pid
fuzzer_exit_code=0
time fuzz || fuzzer_exit_code=$?
kill $watchdog_pid ||:
kill -- -$watchdog_pid ||:
# Debug
date
@ -118,6 +153,19 @@ case "$stage" in
jobs
pstree -aspgT
# Make files with status and description we'll show for this check on Github
if [ "$fuzzer_exit_code" == 0 ]
then
echo "OK" > description.txt
echo "success" > status.txt
else
echo "failure" > status.txt
if ! grep -a "received signal \|Logical error" server.log > description.txt
then
echo "Fuzzer exit code $fuzzer_exit_code. See the logs" > description.txt
fi
fi
exit $fuzzer_exit_code
;&
esac

View File

@ -1,3 +1,4 @@
# docker build -t yandex/clickhouse-integration-helper .
# Helper docker container to run iptables without sudo
FROM alpine

View File

@ -17,6 +17,7 @@ RUN apt-get update \
libc6-dbg \
moreutils \
ncdu \
numactl \
p7zip-full \
parallel \
psmisc \

View File

@ -55,18 +55,21 @@ ln -s /usr/share/clickhouse-test/config/ints_dictionary.xml /etc/clickhouse-serv
ln -s /usr/share/clickhouse-test/config/strings_dictionary.xml /etc/clickhouse-server/dict_examples/; \
ln -s /usr/share/clickhouse-test/config/decimals_dictionary.xml /etc/clickhouse-server/dict_examples/;
ln -s /usr/share/clickhouse-test/config/zookeeper.xml /etc/clickhouse-server/config.d/; \
ln -s /usr/share/clickhouse-test/config/listen.xml /etc/clickhouse-server/config.d/; \
ln -s /usr/share/clickhouse-test/config/part_log.xml /etc/clickhouse-server/config.d/; \
ln -s /usr/share/clickhouse-test/config/text_log.xml /etc/clickhouse-server/config.d/; \
ln -s /usr/share/clickhouse-test/config/metric_log.xml /etc/clickhouse-server/config.d/; \
ln -s /usr/share/clickhouse-test/config/query_masking_rules.xml /etc/clickhouse-server/config.d/; \
ln -s /usr/share/clickhouse-test/config/log_queries.xml /etc/clickhouse-server/users.d/; \
ln -s /usr/share/clickhouse-test/config/readonly.xml /etc/clickhouse-server/users.d/; \
ln -s /usr/share/clickhouse-test/config/ints_dictionary.xml /etc/clickhouse-server/; \
ln -s /usr/share/clickhouse-test/config/strings_dictionary.xml /etc/clickhouse-server/; \
ln -s /usr/share/clickhouse-test/config/decimals_dictionary.xml /etc/clickhouse-server/; \
ln -s /usr/share/clickhouse-test/config/macros.xml /etc/clickhouse-server/config.d/;
ln -s /usr/share/clickhouse-test/config/zookeeper.xml /etc/clickhouse-server/config.d/
ln -s /usr/share/clickhouse-test/config/listen.xml /etc/clickhouse-server/config.d/
ln -s /usr/share/clickhouse-test/config/part_log.xml /etc/clickhouse-server/config.d/
ln -s /usr/share/clickhouse-test/config/text_log.xml /etc/clickhouse-server/config.d/
ln -s /usr/share/clickhouse-test/config/metric_log.xml /etc/clickhouse-server/config.d/
ln -s /usr/share/clickhouse-test/config/log_queries.xml /etc/clickhouse-server/users.d/
ln -s /usr/share/clickhouse-test/config/readonly.xml /etc/clickhouse-server/users.d/
ln -s /usr/share/clickhouse-test/config/ints_dictionary.xml /etc/clickhouse-server/
ln -s /usr/share/clickhouse-test/config/strings_dictionary.xml /etc/clickhouse-server/
ln -s /usr/share/clickhouse-test/config/decimals_dictionary.xml /etc/clickhouse-server/
ln -s /usr/share/clickhouse-test/config/macros.xml /etc/clickhouse-server/config.d/
# Retain any pre-existing config and allow ClickHouse to load those if required
ln -s --backup=simple --suffix=_original.xml \
/usr/share/clickhouse-test/config/query_masking_rules.xml /etc/clickhouse-server/config.d/
service zookeeper start

View File

@ -17,7 +17,6 @@ ln -s /usr/share/clickhouse-test/config/listen.xml /etc/clickhouse-server/config
ln -s /usr/share/clickhouse-test/config/part_log.xml /etc/clickhouse-server/config.d/
ln -s /usr/share/clickhouse-test/config/text_log.xml /etc/clickhouse-server/config.d/
ln -s /usr/share/clickhouse-test/config/metric_log.xml /etc/clickhouse-server/config.d/
ln -s /usr/share/clickhouse-test/config/query_masking_rules.xml /etc/clickhouse-server/config.d/
ln -s /usr/share/clickhouse-test/config/log_queries.xml /etc/clickhouse-server/users.d/
ln -s /usr/share/clickhouse-test/config/readonly.xml /etc/clickhouse-server/users.d/
ln -s /usr/share/clickhouse-test/config/access_management.xml /etc/clickhouse-server/users.d/
@ -33,6 +32,10 @@ ln -s /usr/share/clickhouse-test/config/server.key /etc/clickhouse-server/
ln -s /usr/share/clickhouse-test/config/server.crt /etc/clickhouse-server/
ln -s /usr/share/clickhouse-test/config/dhparam.pem /etc/clickhouse-server/
# Retain any pre-existing config and allow ClickHouse to load it if required
ln -s --backup=simple --suffix=_original.xml \
/usr/share/clickhouse-test/config/query_masking_rules.xml /etc/clickhouse-server/config.d/
if [[ -n "$USE_POLYMORPHIC_PARTS" ]] && [[ "$USE_POLYMORPHIC_PARTS" -eq 1 ]]; then
ln -s /usr/share/clickhouse-test/config/polymorphic_parts.xml /etc/clickhouse-server/config.d/
fi

View File

@ -46,27 +46,30 @@ ln -s /usr/share/clickhouse-test/config/ints_dictionary.xml /etc/clickhouse-serv
ln -s /usr/share/clickhouse-test/config/strings_dictionary.xml /etc/clickhouse-server/dict_examples/; \
ln -s /usr/share/clickhouse-test/config/decimals_dictionary.xml /etc/clickhouse-server/dict_examples/;
ln -s /usr/share/clickhouse-test/config/zookeeper.xml /etc/clickhouse-server/config.d/; \
ln -s /usr/share/clickhouse-test/config/listen.xml /etc/clickhouse-server/config.d/; \
ln -s /usr/share/clickhouse-test/config/part_log.xml /etc/clickhouse-server/config.d/; \
ln -s /usr/share/clickhouse-test/config/text_log.xml /etc/clickhouse-server/config.d/; \
ln -s /usr/share/clickhouse-test/config/metric_log.xml /etc/clickhouse-server/config.d/; \
ln -s /usr/share/clickhouse-test/config/query_masking_rules.xml /etc/clickhouse-server/config.d/; \
ln -s /usr/share/clickhouse-test/config/log_queries.xml /etc/clickhouse-server/users.d/; \
ln -s /usr/share/clickhouse-test/config/readonly.xml /etc/clickhouse-server/users.d/; \
ln -s /usr/share/clickhouse-test/config/access_management.xml /etc/clickhouse-server/users.d/; \
ln -s /usr/share/clickhouse-test/config/ints_dictionary.xml /etc/clickhouse-server/; \
ln -s /usr/share/clickhouse-test/config/strings_dictionary.xml /etc/clickhouse-server/; \
ln -s /usr/share/clickhouse-test/config/decimals_dictionary.xml /etc/clickhouse-server/; \
ln -s /usr/share/clickhouse-test/config/macros.xml /etc/clickhouse-server/config.d/; \
ln -s /usr/share/clickhouse-test/config/disks.xml /etc/clickhouse-server/config.d/; \
ln -s /usr/share/clickhouse-test/config/secure_ports.xml /etc/clickhouse-server/config.d/; \
ln -s /usr/share/clickhouse-test/config/clusters.xml /etc/clickhouse-server/config.d/; \
ln -s /usr/share/clickhouse-test/config/graphite.xml /etc/clickhouse-server/config.d/; \
ln -s /usr/share/clickhouse-test/config/server.key /etc/clickhouse-server/; \
ln -s /usr/share/clickhouse-test/config/server.crt /etc/clickhouse-server/; \
ln -s /usr/share/clickhouse-test/config/dhparam.pem /etc/clickhouse-server/; \
ln -sf /usr/share/clickhouse-test/config/client_config.xml /etc/clickhouse-client/config.xml
ln -s /usr/share/clickhouse-test/config/zookeeper.xml /etc/clickhouse-server/config.d/
ln -s /usr/share/clickhouse-test/config/listen.xml /etc/clickhouse-server/config.d/
ln -s /usr/share/clickhouse-test/config/part_log.xml /etc/clickhouse-server/config.d/
ln -s /usr/share/clickhouse-test/config/text_log.xml /etc/clickhouse-server/config.d/
ln -s /usr/share/clickhouse-test/config/metric_log.xml /etc/clickhouse-server/config.d/
ln -s /usr/share/clickhouse-test/config/log_queries.xml /etc/clickhouse-server/users.d/
ln -s /usr/share/clickhouse-test/config/readonly.xml /etc/clickhouse-server/users.d/
ln -s /usr/share/clickhouse-test/config/access_management.xml /etc/clickhouse-server/users.d/
ln -s /usr/share/clickhouse-test/config/ints_dictionary.xml /etc/clickhouse-server/
ln -s /usr/share/clickhouse-test/config/strings_dictionary.xml /etc/clickhouse-server/
ln -s /usr/share/clickhouse-test/config/decimals_dictionary.xml /etc/clickhouse-server/
ln -s /usr/share/clickhouse-test/config/macros.xml /etc/clickhouse-server/config.d/
ln -s /usr/share/clickhouse-test/config/disks.xml /etc/clickhouse-server/config.d/
ln -s /usr/share/clickhouse-test/config/secure_ports.xml /etc/clickhouse-server/config.d/
ln -s /usr/share/clickhouse-test/config/clusters.xml /etc/clickhouse-server/config.d/
ln -s /usr/share/clickhouse-test/config/graphite.xml /etc/clickhouse-server/config.d/
ln -s /usr/share/clickhouse-test/config/server.key /etc/clickhouse-server/
ln -s /usr/share/clickhouse-test/config/server.crt /etc/clickhouse-server/
ln -s /usr/share/clickhouse-test/config/dhparam.pem /etc/clickhouse-server/
ln -sf /usr/share/clickhouse-test/config/client_config.xml /etc/clickhouse-client/config.xml
# Retain any pre-existing config and allow ClickHouse to load it if required
ln -s --backup=simple --suffix=_original.xml \
/usr/share/clickhouse-test/config/query_masking_rules.xml /etc/clickhouse-server/config.d/
service zookeeper start
sleep 5

View File

@ -35,7 +35,7 @@ RUN apt-get update \
ENV TZ=Europe/Moscow
RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone
RUN pip3 install urllib3 testflows==1.6.24 docker-compose docker dicttoxml kazoo tzlocal
RUN pip3 install urllib3 testflows==1.6.39 docker-compose docker dicttoxml kazoo tzlocal
ENV DOCKER_CHANNEL stable
ENV DOCKER_VERSION 17.09.1-ce

View File

@ -1,25 +0,0 @@
{
"checkYo": false,
"excludeFiles": [],
"fileExtensions": [],
"format": "auto",
"ignoreTags": [
"code",
"kbd",
"object",
"samp",
"script",
"style",
"var"
],
"maxRequests": 2,
"lang": "en,ru",
"report": ["console"],
"dictionary": [
"(C|c)lick(H|h)ouse",
"CatBoost",
"(Ш|ш)ард(ы|ов|а|у|е|ам|ирование|ированы|ах)?",
"логир(ование|уются|ования)?",
"конфиг(а|е|ом|у)"
]
}

View File

@ -0,0 +1,24 @@
# Statement name (for example, SHOW USER)
Brief description of what the statement does.
Syntax:
```sql
Syntax of the statement.
```
## Other necessary sections of the description (Optional)
Examples of descriptions with a complicated structure:
- https://clickhouse.tech/docs/en/sql-reference/statements/grant/
- https://clickhouse.tech/docs/en/sql-reference/statements/revoke/
- https://clickhouse.tech/docs/en/sql-reference/statements/select/join/
## See Also (Optional)
Links to related topics as a list.
- [link](#)

View File

@ -1,6 +1,6 @@
---
toc_priority: 71
toc_title: Source Code
toc_title: Source Code Browser
---
# Browse ClickHouse Source Code {#browse-clickhouse-source-code}

View File

@ -1,6 +1,6 @@
---
toc_priority: 67
toc_title: How to Build ClickHouse on Linux for AARCH64 (ARM64)
toc_title: Build on Linux for AARCH64 (ARM64)
---
# How to Build ClickHouse on Linux for AARCH64 (ARM64) Architecture {#how-to-build-clickhouse-on-linux-for-aarch64-arm64-architecture}
@ -9,7 +9,7 @@ This is for the case when you have Linux machine and want to use it to build `cl
The cross-build for AARCH64 is based on the [Build instructions](../development/build.md), follow them first.
# Install Clang-8 {#install-clang-8}
## Install Clang-8 {#install-clang-8}
Follow the instructions from https://apt.llvm.org/ for your Ubuntu or Debian setup.
For example, in Ubuntu Bionic you can use the following commands:
@ -20,7 +20,7 @@ sudo apt-get update
sudo apt-get install clang-8
```
# Install Cross-Compilation Toolset {#install-cross-compilation-toolset}
## Install Cross-Compilation Toolset {#install-cross-compilation-toolset}
``` bash
cd ClickHouse
@ -29,7 +29,7 @@ wget 'https://developer.arm.com/-/media/Files/downloads/gnu-a/8.3-2019.03/binrel
tar xJf gcc-arm-8.3-2019.03-x86_64-aarch64-linux-gnu.tar.xz -C build-aarch64/cmake/toolchain/linux-aarch64 --strip-components=1
```
# Build ClickHouse {#build-clickhouse}
## Build ClickHouse {#build-clickhouse}
``` bash
cd ClickHouse

View File

@ -1,6 +1,6 @@
---
toc_priority: 66
toc_title: How to Build ClickHouse on Linux for Mac OS X
toc_title: Build on Linux for Mac OS X
---
# How to Build ClickHouse on Linux for Mac OS X {#how-to-build-clickhouse-on-linux-for-mac-os-x}
@ -9,7 +9,7 @@ This is for the case when you have Linux machine and want to use it to build `cl
The cross-build for Mac OS X is based on the [Build instructions](../development/build.md), follow them first.
# Install Clang-8 {#install-clang-8}
## Install Clang-8 {#install-clang-8}
Follow the instructions from https://apt.llvm.org/ for your Ubuntu or Debian setup.
For example the commands for Bionic are like:
@ -19,7 +19,7 @@ sudo echo "deb [trusted=yes] http://apt.llvm.org/bionic/ llvm-toolchain-bionic-8
sudo apt-get install clang-8
```
# Install Cross-Compilation Toolset {#install-cross-compilation-toolset}
## Install Cross-Compilation Toolset {#install-cross-compilation-toolset}
Lets remember the path where we install `cctools` as ${CCTOOLS}
@ -47,7 +47,7 @@ mkdir -p build-darwin/cmake/toolchain/darwin-x86_64
tar xJf MacOSX10.14.sdk.tar.xz -C build-darwin/cmake/toolchain/darwin-x86_64 --strip-components=1
```
# Build ClickHouse {#build-clickhouse}
## Build ClickHouse {#build-clickhouse}
``` bash
cd ClickHouse

View File

@ -1,6 +1,6 @@
---
toc_priority: 65
toc_title: How to Build ClickHouse on Mac OS X
toc_title: Build on Mac OS X
---
# How to Build ClickHouse on Mac OS X {#how-to-build-clickhouse-on-mac-os-x}
@ -45,14 +45,12 @@ $ cd ..
## Caveats {#caveats}
If you intend to run clickhouse-server, make sure to increase the systems maxfiles variable.
If you intend to run `clickhouse-server`, make sure to increase the systems maxfiles variable.
!!! info "Note"
Youll need to use sudo.
To do so, create the following file:
/Library/LaunchDaemons/limit.maxfiles.plist:
To do so, create the `/Library/LaunchDaemons/limit.maxfiles.plist` file with the following content:
``` xml
<?xml version="1.0" encoding="UTF-8"?>

View File

@ -1,11 +1,9 @@
---
toc_priority: 64
toc_title: How to Build ClickHouse on Linux
toc_title: Build on Linux
---
# How to Build ClickHouse for Development {#how-to-build-clickhouse-for-development}
The following tutorial is based on the Ubuntu Linux system. With appropriate changes, it should also work on any other Linux distribution.
# How to Build ClickHouse on Linux {#how-to-build-clickhouse-for-development}
Supported platforms:
@ -13,7 +11,11 @@ Supported platforms:
- AArch64
- Power9 (experimental)
## Install Git, CMake, Python and Ninja {#install-git-cmake-python-and-ninja}
## Normal Build for Development on Ubuntu
The following tutorial is based on the Ubuntu Linux system. With appropriate changes, it should also work on any other Linux distribution.
### Install Git, CMake, Python and Ninja {#install-git-cmake-python-and-ninja}
``` bash
$ sudo apt-get install git cmake python ninja-build
@ -21,18 +23,18 @@ $ sudo apt-get install git cmake python ninja-build
Or cmake3 instead of cmake on older systems.
## Install GCC 9 {#install-gcc-9}
### Install GCC 9 {#install-gcc-9}
There are several ways to do this.
### Install from Repository {#install-from-repository}
#### Install from Repository {#install-from-repository}
On Ubuntu 19.10 or newer:
$ sudo apt-get update
$ sudo apt-get install gcc-9 g++-9
### Install from a PPA Package {#install-from-a-ppa-package}
#### Install from a PPA Package {#install-from-a-ppa-package}
On older Ubuntu:
@ -43,18 +45,18 @@ $ sudo apt-get update
$ sudo apt-get install gcc-9 g++-9
```
### Install from Sources {#install-from-sources}
#### Install from Sources {#install-from-sources}
See [utils/ci/build-gcc-from-sources.sh](https://github.com/ClickHouse/ClickHouse/blob/master/utils/ci/build-gcc-from-sources.sh)
## Use GCC 9 for Builds {#use-gcc-9-for-builds}
### Use GCC 9 for Builds {#use-gcc-9-for-builds}
``` bash
$ export CC=gcc-9
$ export CXX=g++-9
```
## Checkout ClickHouse Sources {#checkout-clickhouse-sources}
### Checkout ClickHouse Sources {#checkout-clickhouse-sources}
``` bash
$ git clone --recursive git@github.com:ClickHouse/ClickHouse.git
@ -66,7 +68,7 @@ or
$ git clone --recursive https://github.com/ClickHouse/ClickHouse.git
```
## Build ClickHouse {#build-clickhouse}
### Build ClickHouse {#build-clickhouse}
``` bash
$ cd ClickHouse
@ -79,7 +81,7 @@ $ ninja
To create an executable, run `ninja clickhouse`.
This will create the `programs/clickhouse` executable, which can be used with `client` or `server` arguments.
# How to Build ClickHouse on Any Linux {#how-to-build-clickhouse-on-any-linux}
## How to Build ClickHouse on Any Linux {#how-to-build-clickhouse-on-any-linux}
The build requires the following components:
@ -93,32 +95,58 @@ The build requires the following components:
If all the components are installed, you may build in the same way as the steps above.
Example for Ubuntu Eoan:
sudo apt update
sudo apt install git cmake ninja-build g++ python
git clone --recursive https://github.com/ClickHouse/ClickHouse.git
mkdir build && cd build
cmake ../ClickHouse
ninja
``` bash
sudo apt update
sudo apt install git cmake ninja-build g++ python
git clone --recursive https://github.com/ClickHouse/ClickHouse.git
mkdir build && cd build
cmake ../ClickHouse
ninja
```
Example for OpenSUSE Tumbleweed:
sudo zypper install git cmake ninja gcc-c++ python lld
git clone --recursive https://github.com/ClickHouse/ClickHouse.git
mkdir build && cd build
cmake ../ClickHouse
ninja
``` bash
sudo zypper install git cmake ninja gcc-c++ python lld
git clone --recursive https://github.com/ClickHouse/ClickHouse.git
mkdir build && cd build
cmake ../ClickHouse
ninja
```
Example for Fedora Rawhide:
``` bash
sudo yum update
yum --nogpg install git cmake make gcc-c++ python2
git clone --recursive https://github.com/ClickHouse/ClickHouse.git
mkdir build && cd build
cmake ../ClickHouse
make -j $(nproc)
```
sudo yum update
yum --nogpg install git cmake make gcc-c++ python2
git clone --recursive https://github.com/ClickHouse/ClickHouse.git
mkdir build && cd build
cmake ../ClickHouse
make -j $(nproc)
# You Dont Have to Build ClickHouse {#you-dont-have-to-build-clickhouse}
## How to Build ClickHouse Debian Package {#how-to-build-clickhouse-debian-package}
### Install Git and Pbuilder {#install-git-and-pbuilder}
``` bash
$ sudo apt-get update
$ sudo apt-get install git python pbuilder debhelper lsb-release fakeroot sudo debian-archive-keyring debian-keyring
```
### Checkout ClickHouse Sources {#checkout-clickhouse-sources-1}
``` bash
$ git clone --recursive --branch master https://github.com/ClickHouse/ClickHouse.git
$ cd ClickHouse
```
### Run Release Script {#run-release-script}
``` bash
$ ./release
```
## You Dont Have to Build ClickHouse {#you-dont-have-to-build-clickhouse}
ClickHouse is available in pre-built binaries and packages. Binaries are portable and can be run on any Linux flavour.
@ -126,26 +154,4 @@ They are built for stable, prestable and testing releases as long as for every c
To find the freshest build from `master`, go to [commits page](https://github.com/ClickHouse/ClickHouse/commits/master), click on the first green checkmark or red cross near commit, and click to the “Details” link right after “ClickHouse Build Check”.
# How to Build ClickHouse Debian Package {#how-to-build-clickhouse-debian-package}
## Install Git and Pbuilder {#install-git-and-pbuilder}
``` bash
$ sudo apt-get update
$ sudo apt-get install git python pbuilder debhelper lsb-release fakeroot sudo debian-archive-keyring debian-keyring
```
## Checkout ClickHouse Sources {#checkout-clickhouse-sources-1}
``` bash
$ git clone --recursive --branch master https://github.com/ClickHouse/ClickHouse.git
$ cd ClickHouse
```
## Run Release Script {#run-release-script}
``` bash
$ ./release
```
[Original article](https://clickhouse.tech/docs/en/development/build/) <!--hide-->

View File

@ -35,6 +35,7 @@ toc_title: Third-Party Libraries Used
| poco | [Boost Software License - Version 1.0](https://github.com/ClickHouse-Extras/poco/blob/fe5505e56c27b6ecb0dcbc40c49dc2caf4e9637f/LICENSE) |
| protobuf | [BSD 3-Clause License](https://github.com/ClickHouse-Extras/protobuf/blob/12735370922a35f03999afff478e1c6d7aa917a4/LICENSE) |
| re2 | [BSD 3-Clause License](https://github.com/google/re2/blob/7cf8b88e8f70f97fd4926b56aa87e7f53b2717e0/LICENSE) |
| sentry-native | [MIT License](https://github.com/getsentry/sentry-native/blob/master/LICENSE) |
| UnixODBC | [LGPL v2.1](https://github.com/ClickHouse-Extras/UnixODBC/tree/b0ad30f7f6289c12b76f04bfb9d466374bb32168) |
| zlib-ng | [Zlib License](https://github.com/ClickHouse-Extras/zlib-ng/blob/develop/LICENSE.md) |
| zstd | [BSD 3-Clause License](https://github.com/facebook/zstd/blob/dev/LICENSE) |

View File

@ -1,6 +1,6 @@
---
toc_priority: 68
toc_title: How to Write C++ Code
toc_title: C++ Guide
---
# How to Write C++ Code {#how-to-write-c-code}

View File

@ -1,6 +1,6 @@
---
toc_priority: 69
toc_title: How to Run ClickHouse Tests
toc_title: Testing
---
# ClickHouse Testing {#clickhouse-testing}
@ -25,12 +25,7 @@ Tests should use (create, drop, etc) only tables in `test` database that is assu
If you want to use distributed queries in functional tests, you can leverage `remote` table function with `127.0.0.{1..2}` addresses for the server to query itself; or you can use predefined test clusters in server configuration file like `test_shard_localhost`.
Some tests are marked with `zookeeper`, `shard` or `long` in their names.
`zookeeper` is for tests that are using ZooKeeper. `shard` is for tests that
requires server to listen `127.0.0.*`; `distributed` or `global` have the same
meaning. `long` is for tests that run slightly longer that one second. You can
disable these groups of tests using `--no-zookeeper`, `--no-shard` and
`--no-long` options, respectively.
Some tests are marked with `zookeeper`, `shard` or `long` in their names. `zookeeper` is for tests that are using ZooKeeper. `shard` is for tests that requires server to listen `127.0.0.*`; `distributed` or `global` have the same meaning. `long` is for tests that run slightly longer that one second. You can disable these groups of tests using `--no-zookeeper`, `--no-shard` and `--no-long` options, respectively.
## Known Bugs {#known-bugs}
@ -153,11 +148,11 @@ Motivation:
Normally we release and run all tests on a single variant of ClickHouse build. But there are alternative build variants that are not thoroughly tested. Examples:
- build on FreeBSD;
- build on Debian with libraries from system packages;
- build with shared linking of libraries;
- build on AArch64 platform;
- build on PowerPc platform.
- build on FreeBSD
- build on Debian with libraries from system packages
- build with shared linking of libraries
- build on AArch64 platform
- build on PowerPc platform
For example, build with system packages is bad practice, because we cannot guarantee what exact version of packages a system will have. But this is really needed by Debian maintainers. For this reason we at least have to support this variant of build. Another example: shared linking is a common source of trouble, but it is needed for some enthusiasts.
@ -177,22 +172,22 @@ For production builds, gcc is used (it still generates slightly more efficient c
## Sanitizers {#sanitizers}
**Address sanitizer**.
### Address sanitizer
We run functional and integration tests under ASan on per-commit basis.
**Valgrind (Memcheck)**.
### Valgrind (Memcheck)
We run functional tests under Valgrind overnight. It takes multiple hours. Currently there is one known false positive in `re2` library, see [this article](https://research.swtch.com/sparse).
**Undefined behaviour sanitizer.**
### Undefined behaviour sanitizer
We run functional and integration tests under ASan on per-commit basis.
**Thread sanitizer**.
### Thread sanitizer
We run functional tests under TSan on per-commit basis. We still dont run integration tests under TSan on per-commit basis.
**Memory sanitizer**.
### Memory sanitizer
Currently we still dont use MSan.
**Debug allocator.**
### Debug allocator
Debug version of `jemalloc` is used for debug build.
## Fuzzing {#fuzzing}
@ -227,7 +222,7 @@ If you use `CLion` as an IDE, you can leverage some `clang-tidy` checks out of t
## Code Style {#code-style}
Code style rules are described [here](https://clickhouse.tech/docs/en/development/style/).
Code style rules are described [here](style.md).
To check for some common style violations, you can use `utils/check-style` script.

View File

@ -96,6 +96,7 @@ For a description of parameters, see the [CREATE query description](../../../sql
- `write_final_mark` — Enables or disables writing the final index mark at the end of data part (after the last byte). Default value: 1. Dont turn it off.
- `merge_max_block_size` — Maximum number of rows in block for merge operations. Default value: 8192.
- `storage_policy` — Storage policy. See [Using Multiple Block Devices for Data Storage](#table_engine-mergetree-multiple-volumes).
- `min_bytes_for_wide_part`, `min_rows_for_wide_part` — Minimum number of bytes/rows in a data part that can be stored in `Wide` format. You can set one, both or none of these settings. See [Data Storage](#mergetree-data-storage).
**Example of Sections Setting**
@ -149,6 +150,10 @@ When data is inserted in a table, separate data parts are created and each of th
Data belonging to different partitions are separated into different parts. In the background, ClickHouse merges data parts for more efficient storage. Parts belonging to different partitions are not merged. The merge mechanism does not guarantee that all rows with the same primary key will be in the same data part.
Data parts can be stored in `Wide` or `Compact` format. In `Wide` format each column is stored in a separate file in a filesystem, in `Compact` format all columns are stored in one file. `Compact` format can be used to increase performance of small and frequent inserts.
Data storing format is controlled by the `min_bytes_for_wide_part` and `min_rows_for_wide_part` settings of the table engine. If the number of bytes or rows in a data part is less then the corresponding setting's value, the part is stored in `Compact` format. Otherwise it is stored in `Wide` format. If none of these settings is set, data parts are stored in `Wide` format.
Each data part is logically divided into granules. A granule is the smallest indivisible data set that ClickHouse reads when selecting data. ClickHouse doesnt split rows or values, so each granule always contains an integer number of rows. The first row of a granule is marked with the value of the primary key for the row. For each data part, ClickHouse creates an index file that stores the marks. For each column, whether its in the primary key or not, ClickHouse also stores the same marks. These marks let you find data directly in column files.
The granule size is restricted by the `index_granularity` and `index_granularity_bytes` settings of the table engine. The number of rows in a granule lays in the `[1, index_granularity]` range, depending on the size of the rows. The size of a granule can exceed `index_granularity_bytes` if the size of a single row is greater than the value of the setting. In this case, the size of the granule equals the size of the row.

View File

@ -22,7 +22,7 @@ The Distributed engine accepts parameters:
See also:
- `insert_distributed_sync` setting
- [insert_distributed_sync](../../../operations/settings/settings.md#insert_distributed_sync) setting
- [MergeTree](../../../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-multiple-volumes) for the examples
Example:

View File

@ -24,7 +24,7 @@ See the detailed description of the [CREATE TABLE](../../../sql-reference/statem
**Engine Parameters**
- `join_strictness` [JOIN strictness](../../../sql-reference/statements/select/join.md#select-join-strictness).
- `join_strictness` [JOIN strictness](../../../sql-reference/statements/select/join.md#select-join-types).
- `join_type` [JOIN type](../../../sql-reference/statements/select/join.md#select-join-types).
- `k1[, k2, ...]` Key columns from the `USING` clause that the `JOIN` operation is made with.

View File

@ -16,7 +16,7 @@ One of the following batches of those t-shirts was supposed to be given away on
So, what does it mean? Here are some ways to translate *“не тормозит”*:
- If you translate it literally, itd be something like *“ClickHouse doesnt press the brake pedal”*.
- If youd want to express it as close to how it sounds to a Russian person with IT background, itd be something like *“If you larger system lags, its not because it uses ClickHouse”*.
- If youd want to express it as close to how it sounds to a Russian person with IT background, itd be something like *“If your larger system lags, its not because it uses ClickHouse”*.
- Shorter, but not so precise versions could be *“ClickHouse is not slow”*, *“ClickHouse doesnt lag”* or just *“ClickHouse is fast”*.
If you havent seen one of those t-shirts in person, you can check them out online in many ClickHouse-related videos. For example, this one:

View File

@ -30,7 +30,7 @@ See [File](../../engines/table-engines/special/file.md) table engine.
## Using Command-Line Redirection {#using-command-line-redirection}
``` sql
``` bash
$ clickhouse-client --query "SELECT * from table" --format FormatName > result.txt
```

View File

@ -1,13 +1,14 @@
---
toc_folder_title: Example Datasets
toc_priority: 12
toc_priority: 15
toc_title: Introduction
---
# Example Datasets {#example-datasets}
This section describes how to obtain example datasets and import them into ClickHouse.
For some datasets example queries are also available.
This section describes how to obtain example datasets and import them into ClickHouse. For some datasets example queries are also available.
The list of documented datasets:
- [Anonymized Yandex.Metrica Dataset](../../getting-started/example-datasets/metrica.md)
- [Star Schema Benchmark](../../getting-started/example-datasets/star-schema.md)

View File

@ -37,6 +37,7 @@ The queries are executed as a read-only user. It implies some limitations:
- INSERT queries are not allowed
The following settings are also enforced:
- [max\_result\_bytes=10485760](../operations/settings/query_complexity/#max-result-bytes)
- [max\_result\_rows=2000](../operations/settings/query_complexity/#setting-max_result_rows)
- [result\_overflow\_mode=break](../operations/settings/query_complexity/#result-overflow-mode)

View File

@ -20,6 +20,7 @@ toc_title: Adopters
| <a href="https://www.bloomberg.com/" class="favicon">Bloomberg</a> | Finance, Media | Monitoring | 102 servers | — | [Slides, May 2018](https://www.slideshare.net/Altinity/http-analytics-for-6m-requests-per-second-using-clickhouse-by-alexander-bocharov) |
| <a href="https://bloxy.info" class="favicon">Bloxy</a> | Blockchain | Analytics | — | — | [Slides in Russian, August 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup17/4_bloxy.pptx) |
| <a href="https://www.chinatelecomglobal.com/" class="favicon">Dataliance for China Telecom</a> | Telecom | Analytics | — | — | [Slides in Chinese, January 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup12/telecom.pdf) |
| <a href="https://cardsmobile.ru/" class="favicon">CardsMobile</a> | Finance | Analytics | — | — | [VC.ru](https://vc.ru/s/cardsmobile/143449-rukovoditel-gruppy-analiza-dannyh) |
| <a href="https://carto.com/" class="favicon">CARTO</a> | Business Intelligence | Geo analytics | — | — | [Geospatial processing with ClickHouse](https://carto.com/blog/geospatial-processing-with-clickhouse/) |
| <a href="http://public.web.cern.ch/public/" class="favicon">CERN</a> | Research | Experiment | — | — | [Press release, April 2012](https://www.yandex.com/company/press_center/press_releases/2012/2012-04-10/) |
| <a href="http://cisco.com/" class="favicon">Cisco</a> | Networking | Traffic analysis | — | — | [Lightning talk, October 2019](https://youtu.be/-hI1vDR2oPY?t=5057) |
@ -44,6 +45,7 @@ toc_title: Adopters
| <a href="https://www.lbl.gov" class="favicon">Lawrence Berkeley National Laboratory</a> | Research | Traffic analysis | 1 server | 11.8 TiB | [Slides in English, April 2019](https://www.smitasin.com/presentations/2019-04-17_DOE-NSM.pdf) |
| <a href="https://lifestreet.com/" class="favicon">LifeStreet</a> | Ad network | Main product | 75 servers (3 replicas) | 5.27 PiB | [Blog post in Russian, February 2017](https://habr.com/en/post/322620/) |
| <a href="https://mcs.mail.ru/" class="favicon">Mail.ru Cloud Solutions</a> | Cloud services | Main product | — | — | [Article in Russian](https://mcs.mail.ru/help/db-create/clickhouse#) |
| <a href="https://tech.mymarilyn.ru" class="favicon">Marilyn</a> | Advertising | Statistics | — | — | [Talk in Russian, June 2017](https://www.youtube.com/watch?v=iXlIgx2khwc) |
| <a href="https://www.messagebird.com" class="favicon">MessageBird</a> | Telecommunications | Statistics | — | — | [Slides in English, November 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup20/messagebird.pdf) |
| <a href="https://www.mgid.com/" class="favicon">MGID</a> | Ad network | Web-analytics | — | — | [Blog post in Russian, April 2020](http://gs-studio.com/news-about-it/32777----clickhouse---c) |
| <a href="https://www.oneapm.com/" class="favicon">OneAPM</a> | Monitorings and Data Analysis | Main product | — | — | [Slides in Chinese, October 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup19/8.%20clickhouse在OneAPM的应用%20杜龙.pdf) |

View File

@ -8,73 +8,54 @@ toc_title: Testing Hardware
With this instruction you can run basic ClickHouse performance test on any server without installation of ClickHouse packages.
1. Go to “commits” page: https://github.com/ClickHouse/ClickHouse/commits/master
2. Click on the first green check mark or red cross with green “ClickHouse Build Check” and click on the “Details” link near “ClickHouse Build Check”. There is no such link in some commits, for example commits with documentation. In this case, choose the nearest commit having this link.
3. Copy the link to “clickhouse” binary for amd64 or aarch64.
3. Copy the link to `clickhouse` binary for amd64 or aarch64.
4. ssh to the server and download it with wget:
<!-- -->
# For amd64:
wget https://clickhouse-builds.s3.yandex.net/0/00ba767f5d2a929394ea3be193b1f79074a1c4bc/1578163263_binary/clickhouse
# For aarch64:
wget https://clickhouse-builds.s3.yandex.net/0/00ba767f5d2a929394ea3be193b1f79074a1c4bc/1578161264_binary/clickhouse
# Then do:
chmod a+x clickhouse
1. Download configs:
<!-- -->
wget https://raw.githubusercontent.com/ClickHouse/ClickHouse/master/programs/server/config.xml
wget https://raw.githubusercontent.com/ClickHouse/ClickHouse/master/programs/server/users.xml
mkdir config.d
wget https://raw.githubusercontent.com/ClickHouse/ClickHouse/master/programs/server/config.d/path.xml -O config.d/path.xml
wget https://raw.githubusercontent.com/ClickHouse/ClickHouse/master/programs/server/config.d/log_to_console.xml -O config.d/log_to_console.xml
1. Download benchmark files:
<!-- -->
wget https://raw.githubusercontent.com/ClickHouse/ClickHouse/master/benchmark/clickhouse/benchmark-new.sh
chmod a+x benchmark-new.sh
wget https://raw.githubusercontent.com/ClickHouse/ClickHouse/master/benchmark/clickhouse/queries.sql
1. Download test data according to the [Yandex.Metrica dataset](../getting-started/example-datasets/metrica.md) instruction (“hits” table containing 100 million rows).
<!-- -->
wget https://clickhouse-datasets.s3.yandex.net/hits/partitions/hits_100m_obfuscated_v1.tar.xz
tar xvf hits_100m_obfuscated_v1.tar.xz -C .
mv hits_100m_obfuscated_v1/* .
1. Run the server:
<!-- -->
./clickhouse server
1. Check the data: ssh to the server in another terminal
<!-- -->
./clickhouse client --query "SELECT count() FROM hits_100m_obfuscated"
100000000
1. Edit the benchmark-new.sh, change `clickhouse-client` to `./clickhouse client` and add `-max_memory_usage 100000000000` parameter.
<!-- -->
mcedit benchmark-new.sh
1. Run the benchmark:
<!-- -->
./benchmark-new.sh hits_100m_obfuscated
1. Send the numbers and the info about your hardware configuration to clickhouse-feedback@yandex-team.com
```bash
# For amd64:
wget https://clickhouse-builds.s3.yandex.net/0/00ba767f5d2a929394ea3be193b1f79074a1c4bc/1578163263_binary/clickhouse
# For aarch64:
wget https://clickhouse-builds.s3.yandex.net/0/00ba767f5d2a929394ea3be193b1f79074a1c4bc/1578161264_binary/clickhouse
# Then do:
chmod a+x clickhouse
```
5. Download configs:
```bash
wget https://raw.githubusercontent.com/ClickHouse/ClickHouse/master/programs/server/config.xml
wget https://raw.githubusercontent.com/ClickHouse/ClickHouse/master/programs/server/users.xml
mkdir config.d
wget https://raw.githubusercontent.com/ClickHouse/ClickHouse/master/programs/server/config.d/path.xml -O config.d/path.xml
wget https://raw.githubusercontent.com/ClickHouse/ClickHouse/master/programs/server/config.d/log_to_console.xml -O config.d/log_to_console.xml
```
6. Download benchmark files:
```bash
wget https://raw.githubusercontent.com/ClickHouse/ClickHouse/master/benchmark/clickhouse/benchmark-new.sh
chmod a+x benchmark-new.sh
wget https://raw.githubusercontent.com/ClickHouse/ClickHouse/master/benchmark/clickhouse/queries.sql
```
7. Download test data according to the [Yandex.Metrica dataset](../getting-started/example-datasets/metrica.md) instruction (“hits” table containing 100 million rows).
```bash
wget https://clickhouse-datasets.s3.yandex.net/hits/partitions/hits_100m_obfuscated_v1.tar.xz
tar xvf hits_100m_obfuscated_v1.tar.xz -C .
mv hits_100m_obfuscated_v1/* .
```
8. Run the server:
```bash
./clickhouse server
```
9. Check the data: ssh to the server in another terminal
```bash
./clickhouse client --query "SELECT count() FROM hits_100m_obfuscated"
100000000
```
10. Edit the benchmark-new.sh, change `clickhouse-client` to `./clickhouse client` and add `--max_memory_usage 100000000000` parameter.
```bash
mcedit benchmark-new.sh
```
11. Run the benchmark:
```bash
./benchmark-new.sh hits_100m_obfuscated
```
12. Send the numbers and the info about your hardware configuration to clickhouse-feedback@yandex-team.com
All the results are published here: https://clickhouse.tech/benchmark/hardware/

View File

@ -397,7 +397,6 @@ The cache is shared for the server and memory is allocated as needed. The cache
``` xml
<mark_cache_size>5368709120</mark_cache_size>
```
## max\_server\_memory\_usage {#max_server_memory_usage}
Limits total RAM usage by the ClickHouse server. You can specify it only for the default profile.
@ -411,11 +410,37 @@ Default value: `0`.
**Additional Info**
On hosts with low RAM and swap, you possibly need setting `max_server_memory_usage_to_ram_ratio > 1`.
The default `max_server_memory_usage` value is calculated as `memory_amount * max_server_memory_usage_to_ram_ratio`.
**See also**
- [max\_memory\_usage](../../operations/settings/query-complexity.md#settings_max_memory_usage)
- [max_server_memory_usage_to_ram_ratio](#max_server_memory_usage_to_ram_ratio)
## max_server_memory_usage_to_ram_ratio {#max_server_memory_usage_to_ram_ratio}
Defines the fraction of total physical RAM amount, available to the Clickhouse server. If the server tries to utilize more, the memory is cut down to the appropriate amount.
Possible values:
- Positive double.
- 0 — The Clickhouse server can use all available RAM.
Default value: `0`.
**Usage**
On hosts with low RAM and swap, you possibly need setting `max_server_memory_usage_to_ram_ratio` larger than 1.
**Example**
``` xml
<max_server_memory_usage_to_ram_ratio>0.9</max_server_memory_usage_to_ram_ratio>
```
**See Also**
- [max_server_memory_usage](#max_server_memory_usage)
## max\_concurrent\_queries {#max-concurrent-queries}

View File

@ -471,7 +471,7 @@ Default value: 0.
See also:
- [JOIN strictness](../../sql-reference/statements/select/join.md#select-join-strictness)
- [JOIN strictness](../../sql-reference/statements/select/join.md#join-settings)
## temporary\_files\_codec {#temporary_files_codec}
@ -585,6 +585,31 @@ Possible values:
Default value: 0.
## network_compression_method {#network_compression_method}
Sets the method of data compression that is used for communication between servers and between server and [clickhouse-client](../../interfaces/cli.md).
Possible values:
- `LZ4` — sets LZ4 compression method.
- `ZSTD` — sets ZSTD compression method.
Default value: `LZ4`.
**See Also**
- [network_zstd_compression_level](#network_zstd_compression_level)
## network_zstd_compression_level {#network_zstd_compression_level}
Adjusts the level of ZSTD compression. Used only when [network_compression_method](#network_compression_method) is set to `ZSTD`.
Possible values:
- Positive integer from 1 to 15.
Default value: `1`.
## log\_queries {#settings-log-queries}
Setting up query logging.
@ -783,6 +808,17 @@ If unsuccessful, several attempts are made to connect to various replicas.
Default value: 50.
## connection\_pool\_max\_wait\_ms {#connection-pool-max-wait-ms}
The wait time in milliseconds for a connection when the connection pool is full.
Possible values:
- Positive integer.
- 0 — Infinite timeout.
Default value: 0.
## connections\_with\_failover\_max\_tries {#connections-with-failover-max-tries}
The maximum number of connection attempts with each replica for the Distributed table engine.
@ -794,6 +830,21 @@ Default value: 3.
Whether to count extreme values (the minimums and maximums in columns of a query result). Accepts 0 or 1. By default, 0 (disabled).
For more information, see the section “Extreme values”.
## kafka\_max\_wait\_ms {#kafka-max-wait-ms}
The wait time in milliseconds for reading messages from [Kafka](../../engines/table-engines/integrations/kafka.md#kafka) before retry.
Possible values:
- Positive integer.
- 0 — Infinite timeout.
Default value: 5000.
See also:
- [Apache Kafka](https://kafka.apache.org/)
## use\_uncompressed\_cache {#setting-use_uncompressed_cache}
Whether to use a cache of uncompressed blocks. Accepts 0 or 1. By default, 0 (disabled).
@ -812,6 +863,17 @@ If a query from the same user with the same query\_id already exists at th
Yandex.Metrica uses this parameter set to 1 for implementing suggestions for segmentation conditions. After entering the next character, if the old query hasnt finished yet, it should be cancelled.
## replace\_running\_query\_max\_wait\_ms {#replace-running-query-max-wait-ms}
The wait time for running query with the same `query_id` to finish, when the [replace_running_query](#replace-running-query) setting is active.
Possible values:
- Positive integer.
- 0 — Throwing an exception that does not allow to run a new query if the server already executes a query with the same `query_id`.
Default value: 5000.
## stream\_flush\_interval\_ms {#stream-flush-interval-ms}
Works for tables with streaming in the case of a timeout, or when a thread generates [max\_insert\_block\_size](#settings-max_insert_block_size) rows.
@ -1397,6 +1459,23 @@ Possible values:
Default value: 16.
## insert_distributed_sync {#insert_distributed_sync}
Enables or disables synchronous data insertion into a [Distributed](../../engines/table-engines/special/distributed.md#distributed) table.
By default, when inserting data into a `Distributed` table, the ClickHouse server sends data to cluster nodes in asynchronous mode. When `insert_distributed_sync=1`, the data is processed synchronously, and the `INSERT` operation succeeds only after all the data is saved on all shards (at least one replica for each shard if `internal_replication` is true).
Possible values:
- 0 — Data is inserted in asynchronous mode.
- 1 — Data is inserted in synchronous mode.
Default value: `0`.
**See Also**
- [Distributed Table Engine](../../engines/table-engines/special/distributed.md#distributed)
- [Managing Distributed Tables](../../sql-reference/statements/system.md#query-language-system-distributed)
## background\_buffer\_flush\_schedule\_pool\_size {#background_buffer_flush_schedule_pool_size}
Sets the number of threads performing background flush in [Buffer](../../engines/table-engines/special/buffer.md)-engine tables. This setting is applied at ClickHouse server start and cant be changed in a user session.
@ -1454,6 +1533,17 @@ Possible values:
Default value: 16.
## validate\_polygons {#validate_polygons}
Enables or disables throwing an exception in the [pointInPolygon](../../sql-reference/functions/geo.md#pointinpolygon) function, if the polygon is self-intersecting or self-tangent.
Possible values:
- 0 — Throwing an exception is disabled. `pointInPolygon` accepts invalid polygons and returns possibly incorrect results for them.
- 1 — Throwing an exception is enabled.
Default value: 1.
## transform\_null\_in {#transform_null_in}
Enables equality of [NULL](../../sql-reference/syntax.md#null-literal) values for [IN](../../sql-reference/operators/in.md) operator.

View File

@ -1,3 +1,36 @@
## system.asynchronous\_metric\_log {#system-tables-async-log}
Contains the historical values for `system.asynchronous_log` (see [system.asynchronous\_metrics](../../operations/system-tables/asynchronous_metrics.md#system_tables-asynchronous_metrics))
Contains the historical values for `system.asynchronous_metrics`, which are saved once per minute. This feature is enabled by default.
Columns:
- `event_date` ([Date](../../sql-reference/data-types/date.md)) — Event date.
- `event_time` ([DateTime](../../sql-reference/data-types/datetime.md)) — Event time.
- `name` ([String](../../sql-reference/data-types/string.md)) — Metric name.
- `value` ([Float64](../../sql-reference/data-types/float.md)) — Metric value.
**Example**
``` sql
SELECT * FROM system.asynchronous_metric_log LIMIT 10
```
``` text
┌─event_date─┬──────────event_time─┬─name─────────────────────────────────────┬────value─┐
│ 2020-06-22 │ 2020-06-22 06:57:30 │ jemalloc.arenas.all.pmuzzy │ 0 │
│ 2020-06-22 │ 2020-06-22 06:57:30 │ jemalloc.arenas.all.pdirty │ 4214 │
│ 2020-06-22 │ 2020-06-22 06:57:30 │ jemalloc.background_thread.run_intervals │ 0 │
│ 2020-06-22 │ 2020-06-22 06:57:30 │ jemalloc.background_thread.num_runs │ 0 │
│ 2020-06-22 │ 2020-06-22 06:57:30 │ jemalloc.retained │ 17657856 │
│ 2020-06-22 │ 2020-06-22 06:57:30 │ jemalloc.mapped │ 71471104 │
│ 2020-06-22 │ 2020-06-22 06:57:30 │ jemalloc.resident │ 61538304 │
│ 2020-06-22 │ 2020-06-22 06:57:30 │ jemalloc.metadata │ 6199264 │
│ 2020-06-22 │ 2020-06-22 06:57:30 │ jemalloc.allocated │ 38074336 │
│ 2020-06-22 │ 2020-06-22 06:57:30 │ jemalloc.epoch │ 2 │
└────────────┴─────────────────────┴──────────────────────────────────────────┴──────────┘
```
**See Also**
- [system.asynchronous\_metrics](../system-tables/asynchronous_metrics.md) — Contains metrics that are calculated periodically in the background.
- [system.metric_log](../operations/system-tables/metric_log) — Contains history of metrics values from tables `system.metrics` and `system.events`, periodically flushed to disk.

View File

@ -0,0 +1,10 @@
#system.current_roles {#system_tables-current_roles}
Contains active roles of a current user. `SET ROLE` changes the contents of this table.
Columns:
- `role_name` ([String](../../sql-reference/data-types/string.md))) — Role name.
- `with_admin_option` ([UInt8](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Flag that shows whether `current_role` is a role with `ADMIN OPTION` privilege.
- `is_default` ([UInt8](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Flag that shows whether `current_role` is a default role.
[Original article](https://clickhouse.tech/docs/en/operations/system_tables/current-roles) <!--hide-->

View File

@ -0,0 +1,11 @@
#system.enabled_roles {#system_tables-enabled_roles}
Contains all active roles at the moment, including current role of the current user and granted roles for current role.
Columns:
- `role_name` ([String](../../sql-reference/data-types/string.md))) — Role name.
- `with_admin_option` ([UInt8](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Flag that shows whether `enabled_role` is a role with `ADMIN OPTION` privilege.
- `is_current` ([UInt8](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Flag that shows whether `enabled_role` is a current role of a current user.
- `is_default` ([UInt8](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Flag that shows whether `enabled_role` is a default role.
[Original article](https://clickhouse.tech/docs/en/operations/system_tables/enabled-roles) <!--hide-->

View File

@ -0,0 +1,39 @@
# system.licenses {#system-tables_system.licenses}
Сontains licenses of third-party libraries that are located in the [contrib](https://github.com/ClickHouse/ClickHouse/tree/master/contrib) directory of ClickHouse sources.
Columns:
- `library_name` ([String](../../sql-reference/data-types/string.md)) — Name of the library, which is license connected with.
- `license_type` ([String](../../sql-reference/data-types/string.md)) — License type — e.g. Apache, MIT.
- `license_path` ([String](../../sql-reference/data-types/string.md)) — Path to the file with the license text.
- `license_text` ([String](../../sql-reference/data-types/string.md)) — License text.
**Example**
``` sql
SELECT library_name, license_type, license_path FROM system.licenses LIMIT 15
```
``` text
┌─library_name───────┬─license_type─┬─license_path────────────────────────┐
│ FastMemcpy │ MIT │ /contrib/FastMemcpy/LICENSE │
│ arrow │ Apache │ /contrib/arrow/LICENSE.txt │
│ avro │ Apache │ /contrib/avro/LICENSE.txt │
│ aws-c-common │ Apache │ /contrib/aws-c-common/LICENSE │
│ aws-c-event-stream │ Apache │ /contrib/aws-c-event-stream/LICENSE │
│ aws-checksums │ Apache │ /contrib/aws-checksums/LICENSE │
│ aws │ Apache │ /contrib/aws/LICENSE.txt │
│ base64 │ BSD 2-clause │ /contrib/base64/LICENSE │
│ boost │ Boost │ /contrib/boost/LICENSE_1_0.txt │
│ brotli │ MIT │ /contrib/brotli/LICENSE │
│ capnproto │ MIT │ /contrib/capnproto/LICENSE │
│ cassandra │ Apache │ /contrib/cassandra/LICENSE.txt │
│ cctz │ Apache │ /contrib/cctz/LICENSE.txt │
│ cityhash102 │ MIT │ /contrib/cityhash102/COPYING │
│ cppkafka │ BSD 2-clause │ /contrib/cppkafka/LICENSE │
└────────────────────┴──────────────┴─────────────────────────────────────┘
```
[Original article](https://clickhouse.tech/docs/en/operations/system_tables/licenses) <!--hide-->

View File

@ -49,7 +49,7 @@ CurrentMetric_ReplicatedChecks: 0
**See also**
- [system.asynchronous\_metrics](../../operations/system-tables/asynchronous_metrics.md#system_tables-asynchronous_metrics) — Contains periodically calculated metrics.
- [system.events](../../operations/system-tables/events.md#system_tables-events) — Contains a number of events that occurred.
- [system.metrics](../../operations/system-tables/metrics.md#system_tables-metrics) — Contains instantly calculated metrics.
- [system.asynchronous\_metrics](../../operations/system-tables/asynchronous_metrics.md) — Contains periodically calculated metrics.
- [system.events](../../operations/system-tables/events.md) — Contains a number of events that occurred.
- [system.metrics](../../operations/system-tables/metrics.md) — Contains instantly calculated metrics.
- [Monitoring](../../operations/monitoring.md) — Base concepts of ClickHouse monitoring.

View File

@ -1,25 +1,46 @@
# system.mutations {#system_tables-mutations}
The table contains information about [mutations](../../sql-reference/statements/alter/index.md#alter-mutations) of MergeTree tables and their progress. Each mutation command is represented by a single row. The table has the following columns:
The table contains information about [mutations](../../sql-reference/statements/alter/index.md#mutations) of [MergeTree](../../engines/table-engines/mergetree-family/mergetree.md) tables and their progress. Each mutation command is represented by a single row.
**database**, **table** - The name of the database and table to which the mutation was applied.
Columns:
**mutation\_id** - The ID of the mutation. For replicated tables these IDs correspond to znode names in the `<table_path_in_zookeeper>/mutations/` directory in ZooKeeper. For unreplicated tables the IDs correspond to file names in the data directory of the table.
- `database` ([String](../../sql-reference/data-types/string.md)) — The name of the database to which the mutation was applied.
**command** - The mutation command string (the part of the query after `ALTER TABLE [db.]table`).
- `table` ([String](../../sql-reference/data-types/string.md)) — The name of the table to which the mutation was applied.
**create\_time** - When this mutation command was submitted for execution.
- `mutation_id` ([String](../../sql-reference/data-types/string.md)) — The ID of the mutation. For replicated tables these IDs correspond to znode names in the `<table_path_in_zookeeper>/mutations/` directory in ZooKeeper. For non-replicated tables the IDs correspond to file names in the data directory of the table.
**block\_numbers.partition\_id**, **block\_numbers.number** - A nested column. For mutations of replicated tables, it contains one record for each partition: the partition ID and the block number that was acquired by the mutation (in each partition, only parts that contain blocks with numbers less than the block number acquired by the mutation in that partition will be mutated). In non-replicated tables, block numbers in all partitions form a single sequence. This means that for mutations of non-replicated tables, the column will contain one record with a single block number acquired by the mutation.
- `command` ([String](../../sql-reference/data-types/string.md)) — The mutation command string (the part of the query after `ALTER TABLE [db.]table`).
**parts\_to\_do** - The number of data parts that need to be mutated for the mutation to finish.
- `create_time` ([Datetime](../../sql-reference/data-types/datetime.md)) — Date and time when the mutation command was submitted for execution.
**is\_done** - Is the mutation done? Note that even if `parts_to_do = 0` it is possible that a mutation of a replicated table is not done yet because of a long-running INSERT that will create a new data part that will need to be mutated.
- `block_numbers.partition_id` ([Array](../../sql-reference/data-types/array.md)([String](../../sql-reference/data-types/string.md))) — For mutations of replicated tables, the array contains the partitions' IDs (one record for each partition). For mutations of non-replicated tables the array is empty.
If there were problems with mutating some parts, the following columns contain additional information:
- `block_numbers.number` ([Array](../../sql-reference/data-types/array.md)([Int64](../../sql-reference/data-types/int-uint.md))) — For mutations of replicated tables, the array contains one record for each partition, with the block number that was acquired by the mutation. Only parts that contain blocks with numbers less than this number will be mutated in the partition.
In non-replicated tables, block numbers in all partitions form a single sequence. This means that for mutations of non-replicated tables, the column will contain one record with a single block number acquired by the mutation.
**latest\_failed\_part** - The name of the most recent part that could not be mutated.
- `parts_to_do_names` ([Array](../../sql-reference/data-types/array.md)([String](../../sql-reference/data-types/string.md))) — An array of names of data parts that need to be mutated for the mutation to complete.
**latest\_fail\_time** - The time of the most recent part mutation failure.
- `parts_to_do` ([Int64](../../sql-reference/data-types/int-uint.md)) — The number of data parts that need to be mutated for the mutation to complete.
**latest\_fail\_reason** - The exception message that caused the most recent part mutation failure.
- `is_done` ([UInt8](../../sql-reference/data-types/int-uint.md)) — The flag whether the mutation is done or not. Possible values:
- `1` if the mutation is completed,
- `0` if the mutation is still in process.
!!! info "Note"
Even if `parts_to_do = 0` it is possible that a mutation of a replicated table is not completed yet because of a long-running `INSERT` query, that will create a new data part needed to be mutated.
If there were problems with mutating some data parts, the following columns contain additional information:
- `latest_failed_part` ([String](../../sql-reference/data-types/string.md)) — The name of the most recent part that could not be mutated.
- `latest_fail_time` ([Datetime](../../sql-reference/data-types/datetime.md)) — The date and time of the most recent part mutation failure.
- `latest_fail_reason` ([String](../../sql-reference/data-types/string.md)) — The exception message that caused the most recent part mutation failure.
**See Also**
- [Mutations](../../sql-reference/statements/alter/index.md#mutations)
- [MergeTree](../../engines/table-engines/mergetree-family/mergetree.md) table engine
- [ReplicatedMergeTree](../../engines/table-engines/mergetree-family/replication.md) family

View File

@ -6,75 +6,151 @@ Each row describes one data part.
Columns:
- `partition` (String) The partition name. To learn what a partition is, see the description of the [ALTER](../../sql-reference/statements/alter/index.md#query_language_queries_alter) query.
- `partition` ([String](../../sql-reference/data-types/string.md)) The partition name. To learn what a partition is, see the description of the [ALTER](../../sql-reference/statements/alter/index.md#query_language_queries_alter) query.
Formats:
- `YYYYMM` for automatic partitioning by month.
- `any_string` when partitioning manually.
- `name` (`String`) Name of the data part.
- `name` ([String](../../sql-reference/data-types/string.md)) Name of the data part.
- `active` (`UInt8`) Flag that indicates whether the data part is active. If a data part is active, its used in a table. Otherwise, its deleted. Inactive data parts remain after merging.
- `part_type` ([String](../../sql-reference/data-types/string.md)) — The data part storing format.
- `marks` (`UInt64`) The number of marks. To get the approximate number of rows in a data part, multiply `marks` by the index granularity (usually 8192) (this hint doesnt work for adaptive granularity).
Possible Values:
- `rows` (`UInt64`) The number of rows.
- `Wide` — Each column is stored in a separate file in a filesystem.
- `Compact` — All columns are stored in one file in a filesystem.
- `bytes_on_disk` (`UInt64`) Total size of all the data part files in bytes.
Data storing format is controlled by the `min_bytes_for_wide_part` and `min_rows_for_wide_part` settings of the [MergeTree](../../engines/table-engines/mergetree-family/mergetree.md) table.
- `data_compressed_bytes` (`UInt64`) Total size of compressed data in the data part. All the auxiliary files (for example, files with marks) are not included.
- `active` ([UInt8](../../sql-reference/data-types/int-uint.md)) Flag that indicates whether the data part is active. If a data part is active, its used in a table. Otherwise, its deleted. Inactive data parts remain after merging.
- `data_uncompressed_bytes` (`UInt64`) Total size of uncompressed data in the data part. All the auxiliary files (for example, files with marks) are not included.
- `marks` ([UInt64](../../sql-reference/data-types/int-uint.md)) The number of marks. To get the approximate number of rows in a data part, multiply `marks` by the index granularity (usually 8192) (this hint doesnt work for adaptive granularity).
- `marks_bytes` (`UInt64`) The size of the file with marks.
- `rows` ([UInt64](../../sql-reference/data-types/int-uint.md)) The number of rows.
- `modification_time` (`DateTime`) The time the directory with the data part was modified. This usually corresponds to the time of data part creation.\|
- `bytes_on_disk` ([UInt64](../../sql-reference/data-types/int-uint.md)) Total size of all the data part files in bytes.
- `remove_time` (`DateTime`) The time when the data part became inactive.
- `data_compressed_bytes` ([UInt64](../../sql-reference/data-types/int-uint.md)) Total size of compressed data in the data part. All the auxiliary files (for example, files with marks) are not included.
- `refcount` (`UInt32`) The number of places where the data part is used. A value greater than 2 indicates that the data part is used in queries or merges.
- `data_uncompressed_bytes` ([UInt64](../../sql-reference/data-types/int-uint.md)) Total size of uncompressed data in the data part. All the auxiliary files (for example, files with marks) are not included.
- `min_date` (`Date`) The minimum value of the date key in the data part.
- `marks_bytes` ([UInt64](../../sql-reference/data-types/int-uint.md)) The size of the file with marks.
- `max_date` (`Date`) The maximum value of the date key in the data part.
- `modification_time` ([DateTime](../../sql-reference/data-types/datetime.md)) The time the directory with the data part was modified. This usually corresponds to the time of data part creation.
- `min_time` (`DateTime`) The minimum value of the date and time key in the data part.
- `remove_time` ([DateTime](../../sql-reference/data-types/datetime.md)) The time when the data part became inactive.
- `max_time`(`DateTime`) The maximum value of the date and time key in the data part.
- `refcount` ([UInt32](../../sql-reference/data-types/int-uint.md)) The number of places where the data part is used. A value greater than 2 indicates that the data part is used in queries or merges.
- `partition_id` (`String`) ID of the partition.
- `min_date` ([Date](../../sql-reference/data-types/date.md)) The minimum value of the date key in the data part.
- `min_block_number` (`UInt64`) The minimum number of data parts that make up the current part after merging.
- `max_date` ([Date](../../sql-reference/data-types/date.md)) The maximum value of the date key in the data part.
- `max_block_number` (`UInt64`) The maximum number of data parts that make up the current part after merging.
- `min_time` ([DateTime](../../sql-reference/data-types/datetime.md)) The minimum value of the date and time key in the data part.
- `level` (`UInt32`) Depth of the merge tree. Zero means that the current part was created by insert rather than by merging other parts.
- `max_time`([DateTime](../../sql-reference/data-types/datetime.md)) The maximum value of the date and time key in the data part.
- `data_version` (`UInt64`) Number that is used to determine which mutations should be applied to the data part (mutations with a version higher than `data_version`).
- `partition_id` ([String](../../sql-reference/data-types/string.md)) ID of the partition.
- `primary_key_bytes_in_memory` (`UInt64`) The amount of memory (in bytes) used by primary key values.
- `min_block_number` ([UInt64](../../sql-reference/data-types/int-uint.md)) The minimum number of data parts that make up the current part after merging.
- `primary_key_bytes_in_memory_allocated` (`UInt64`) The amount of memory (in bytes) reserved for primary key values.
- `max_block_number` ([UInt64](../../sql-reference/data-types/int-uint.md)) The maximum number of data parts that make up the current part after merging.
- `is_frozen` (`UInt8`) Flag that shows that a partition data backup exists. 1, the backup exists. 0, the backup doesnt exist. For more details, see [FREEZE PARTITION](../../sql-reference/statements/alter/partition.md#alter_freeze-partition)
- `level` ([UInt32](../../sql-reference/data-types/int-uint.md)) Depth of the merge tree. Zero means that the current part was created by insert rather than by merging other parts.
- `database` (`String`) Name of the database.
- `data_version` ([UInt64](../../sql-reference/data-types/int-uint.md)) Number that is used to determine which mutations should be applied to the data part (mutations with a version higher than `data_version`).
- `table` (`String`) Name of the table.
- `primary_key_bytes_in_memory` ([UInt64](../../sql-reference/data-types/int-uint.md)) The amount of memory (in bytes) used by primary key values.
- `engine` (`String`) Name of the table engine without parameters.
- `primary_key_bytes_in_memory_allocated` ([UInt64](../../sql-reference/data-types/int-uint.md)) The amount of memory (in bytes) reserved for primary key values.
- `path` (`String`) Absolute path to the folder with data part files.
- `is_frozen` ([UInt8](../../sql-reference/data-types/int-uint.md)) Flag that shows that a partition data backup exists. 1, the backup exists. 0, the backup doesnt exist. For more details, see [FREEZE PARTITION](../../sql-reference/statements/alter/partition.md#alter_freeze-partition)
- `disk` (`String`) Name of a disk that stores the data part.
- `database` ([String](../../sql-reference/data-types/string.md)) Name of the database.
- `hash_of_all_files` (`String`) [sipHash128](../../sql-reference/functions/hash-functions.md#hash_functions-siphash128) of compressed files.
- `table` ([String](../../sql-reference/data-types/string.md)) Name of the table.
- `hash_of_uncompressed_files` (`String`) [sipHash128](../../sql-reference/functions/hash-functions.md#hash_functions-siphash128) of uncompressed files (files with marks, index file etc.).
- `engine` ([String](../../sql-reference/data-types/string.md)) Name of the table engine without parameters.
- `uncompressed_hash_of_compressed_files` (`String`) [sipHash128](../../sql-reference/functions/hash-functions.md#hash_functions-siphash128) of data in the compressed files as if they were uncompressed.
- `path` ([String](../../sql-reference/data-types/string.md)) Absolute path to the folder with data part files.
- `bytes` (`UInt64`) Alias for `bytes_on_disk`.
- `disk` ([String](../../sql-reference/data-types/string.md)) Name of a disk that stores the data part.
- `marks_size` (`UInt64`) Alias for `marks_bytes`.
- `hash_of_all_files` ([String](../../sql-reference/data-types/string.md)) [sipHash128](../../sql-reference/functions/hash-functions.md#hash_functions-siphash128) of compressed files.
- `hash_of_uncompressed_files` ([String](../../sql-reference/data-types/string.md)) [sipHash128](../../sql-reference/functions/hash-functions.md#hash_functions-siphash128) of uncompressed files (files with marks, index file etc.).
- `uncompressed_hash_of_compressed_files` ([String](../../sql-reference/data-types/string.md)) [sipHash128](../../sql-reference/functions/hash-functions.md#hash_functions-siphash128) of data in the compressed files as if they were uncompressed.
- `delete_ttl_info_min` ([DateTime](../../sql-reference/data-types/datetime.md)) — The minimum value of the date and time key for [TTL DELETE rule](../../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-ttl).
- `delete_ttl_info_max` ([DateTime](../../sql-reference/data-types/datetime.md)) — The maximum value of the date and time key for [TTL DELETE rule](../../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-ttl).
- `move_ttl_info.expression` ([Array](../../sql-reference/data-types/array.md)([String](../../sql-reference/data-types/string.md))) — Array of expressions. Each expression defines a [TTL MOVE rule](../../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-ttl).
!!! note "Warning"
The `move_ttl_info.expression` array is kept mostly for backward compatibility, now the simpliest way to check `TTL MOVE` rule is to use the `move_ttl_info.min` and `move_ttl_info.max` fields.
- `move_ttl_info.min` ([Array](../../sql-reference/data-types/array.md)([DateTime](../../sql-reference/data-types/datetime.md))) — Array of date and time values. Each element describes the minimum key value for a [TTL MOVE rule](../../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-ttl).
- `move_ttl_info.max` ([Array](../../sql-reference/data-types/array.md)([DateTime](../../sql-reference/data-types/datetime.md))) — Array of date and time values. Each element describes the maximum key value for a [TTL MOVE rule](../../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-ttl).
- `bytes` ([UInt64](../../sql-reference/data-types/int-uint.md)) Alias for `bytes_on_disk`.
- `marks_size` ([UInt64](../../sql-reference/data-types/int-uint.md)) Alias for `marks_bytes`.
**Example**
``` sql
SELECT * FROM system.parts LIMIT 1 FORMAT Vertical;
```
``` text
Row 1:
──────
partition: tuple()
name: all_1_4_1_6
part_type: Wide
active: 1
marks: 2
rows: 6
bytes_on_disk: 310
data_compressed_bytes: 157
data_uncompressed_bytes: 91
marks_bytes: 144
modification_time: 2020-06-18 13:01:49
remove_time: 0000-00-00 00:00:00
refcount: 1
min_date: 0000-00-00
max_date: 0000-00-00
min_time: 0000-00-00 00:00:00
max_time: 0000-00-00 00:00:00
partition_id: all
min_block_number: 1
max_block_number: 4
level: 1
data_version: 6
primary_key_bytes_in_memory: 8
primary_key_bytes_in_memory_allocated: 64
is_frozen: 0
database: default
table: months
engine: MergeTree
disk_name: default
path: /var/lib/clickhouse/data/default/months/all_1_4_1_6/
hash_of_all_files: 2d0657a16d9430824d35e327fcbd87bf
hash_of_uncompressed_files: 84950cc30ba867c77a408ae21332ba29
uncompressed_hash_of_compressed_files: 1ad78f1c6843bbfb99a2c931abe7df7d
delete_ttl_info_min: 0000-00-00 00:00:00
delete_ttl_info_max: 0000-00-00 00:00:00
move_ttl_info.expression: []
move_ttl_info.min: []
move_ttl_info.max: []
```
**See Also**
- [MergeTree family](../../engines/table-engines/mergetree-family/mergetree.md)
- [TTL for Columns and Tables](../../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-ttl)

View File

@ -0,0 +1,16 @@
#system.role_grants {#system_tables-role_grants}
Contains the role grants for users and roles. To add entries to this table, use `GRANT role TO user`.
Columns:
- `user_name` ([Nullable](../../sql-reference/data-types/nullable.md)([String](../../sql-reference/data-types/string.md))) — User name.
- `role_name` ([Nullable](../../sql-reference/data-types/nullable.md)([String](../../sql-reference/data-types/string.md))) — Role name.
- `granted_role_name` ([String](../../sql-reference/data-types/string.md)) — Name of role granted to the `role_name` role. To grant one role to another one use `GRANT role1 TO role2`.
- `granted_role_is_default` ([UInt8](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Flag that shows whether `granted_role` is a default role. Possible values:
- 1 — `granted_role` is a default role.
- 0 — `granted_role` is not a default role.
- `with_admin_option` ([UInt8](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Flag that shows whether `granted_role` is a role with [ADMIN OPTION](../../sql-reference/statements/grant.md#admin-option-privilege) privilege. Possible values:
- 1 — The role has `ADMIN OPTION` privilege.
- 0 — The role without `ADMIN OPTION` privilege.
[Original article](https://clickhouse.tech/docs/en/operations/system_tables/role-grants) <!--hide-->

View File

@ -0,0 +1,10 @@
#system.roles {#system_tables-roles}
Contains information about configured [roles](../../operations/access-rights.md#role-management).
Columns:
- `name` ([String](../../sql-reference/data-types/string.md)) — Role name.
- `id` ([UUID](../../sql-reference/data-types/uuid.md)) — Role ID.
- `storage` ([String](../../sql-reference/data-types/string.md)) — Path to the storage of roles. Configured in the `access_control_path` parameter.
[Original article](https://clickhouse.tech/docs/en/operations/system_tables/roles) <!--hide-->

View File

@ -16,12 +16,15 @@ By default `clickhouse-local` does not have access to data on the same host, but
!!! warning "Warning"
It is not recommended to load production server configuration into `clickhouse-local` because data can be damaged in case of human error.
For temporary data an unique temporary data directory is created by default. If you want to override this behavior the data directory can be explicitly specified with the `-- --path` option.
## Usage {#usage}
Basic usage:
``` bash
$ clickhouse-local --structure "table_structure" --input-format "format_of_incoming_data" -q "query"
$ clickhouse-local --structure "table_structure" --input-format "format_of_incoming_data" \
--query "query"
```
Arguments:
@ -40,10 +43,12 @@ Arguments:
Also there are arguments for each ClickHouse configuration variable which are more commonly used instead of `--config-file`.
## Examples {#examples}
``` bash
$ echo -e "1,2\n3,4" | clickhouse-local -S "a Int64, b Int64" -if "CSV" -q "SELECT * FROM table"
$ echo -e "1,2\n3,4" | clickhouse-local --structure "a Int64, b Int64" \
--input-format "CSV" --query "SELECT * FROM table"
Read 2 rows, 32.00 B in 0.000 sec., 5182 rows/sec., 80.97 KiB/sec.
1 2
3 4
@ -52,16 +57,37 @@ Read 2 rows, 32.00 B in 0.000 sec., 5182 rows/sec., 80.97 KiB/sec.
Previous example is the same as:
``` bash
$ echo -e "1,2\n3,4" | clickhouse-local -q "CREATE TABLE table (a Int64, b Int64) ENGINE = File(CSV, stdin); SELECT a, b FROM table; DROP TABLE table"
$ echo -e "1,2\n3,4" | clickhouse-local --query "
CREATE TABLE table (a Int64, b Int64) ENGINE = File(CSV, stdin);
SELECT a, b FROM table;
DROP TABLE table"
Read 2 rows, 32.00 B in 0.000 sec., 4987 rows/sec., 77.93 KiB/sec.
1 2
3 4
```
You don't have to use `stdin` or `--file` argument, and can open any number of files using the [`file` table function](../../sql-reference/table-functions/file.md):
``` bash
$ echo 1 | tee 1.tsv
1
$ echo 2 | tee 2.tsv
2
$ clickhouse-local --query "
select * from file('1.tsv', TSV, 'a int') t1
cross join file('2.tsv', TSV, 'b int') t2"
1 2
```
Now lets output memory user for each Unix user:
``` bash
$ ps aux | tail -n +2 | awk '{ printf("%s\t%s\n", $1, $4) }' | clickhouse-local -S "user String, mem Float64" -q "SELECT user, round(sum(mem), 2) as memTotal FROM table GROUP BY user ORDER BY memTotal DESC FORMAT Pretty"
$ ps aux | tail -n +2 | awk '{ printf("%s\t%s\n", $1, $4) }' \
| clickhouse-local --structure "user String, mem Float64" \
--query "SELECT user, round(sum(mem), 2) as memTotal
FROM table GROUP BY user ORDER BY memTotal DESC FORMAT Pretty"
```
``` text

View File

@ -54,8 +54,6 @@ In this case, ClickHouse can reload the dictionary earlier if the dictionary con
When upgrading the dictionaries, the ClickHouse server applies different logic depending on the type of [source](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md):
When upgrading the dictionaries, the ClickHouse server applies different logic depending on the type of [source](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md):
- For a text file, it checks the time of modification. If the time differs from the previously recorded time, the dictionary is updated.
- For MyISAM tables, the time of modification is checked using a `SHOW TABLE STATUS` query.
- Dictionaries from other sources are updated every time by default.

View File

@ -503,3 +503,34 @@ Supported modifiers for Format:
| %% | a % sign | % |
[Original article](https://clickhouse.tech/docs/en/query_language/functions/date_time_functions/) <!--hide-->
## FROM_UNIXTIME
When there is only single argument of integer type, it act in the same way as `toDateTime` and return [DateTime](../../sql-reference/data-types/datetime.md).
type.
For example:
```sql
SELECT FROM_UNIXTIME(423543535)
```
```text
┌─FROM_UNIXTIME(423543535)─┐
│ 1983-06-04 10:58:55 │
└──────────────────────────┘
```
When there are two arguments, first is integer or DateTime, second is constant format string, it act in the same way as `formatDateTime` and return `String` type.
For example:
```sql
SELECT FROM_UNIXTIME(1234334543, '%Y-%m-%d %R:%S') AS DateTime
```
```text
┌─DateTime────────────┐
│ 2009-02-11 14:42:23 │
└─────────────────────┘
```

View File

@ -1350,4 +1350,80 @@ len: 30
- [generateRandom](../../sql-reference/table-functions/generate.md#generaterandom)
- [randomPrintableASCII](../../sql-reference/functions/other-functions.md#randomascii)
## randomFixedString {#randomfixedstring}
Generates a binary string of the specified length filled with random bytes (including zero bytes).
**Syntax**
``` sql
randomFixedString(length);
```
**Parameters**
- `length` — String length in bytes. [UInt64](../../sql-reference/data-types/int-uint.md).
**Returned value(s)**
- String filled with random bytes.
Type: [FixedString](../../sql-reference/data-types/fixedstring.md).
**Example**
Query:
```sql
SELECT randomFixedString(13) as rnd, toTypeName(rnd)
```
Result:
```text
┌─rnd──────┬─toTypeName(randomFixedString(13))─┐
│ j▒h㋖HɨZ'▒ │ FixedString(13) │
└──────────┴───────────────────────────────────┘
```
## randomStringUTF8 {#randomstringutf8}
Generates a random string of a specified length. Result string contains valid UTF-8 code points. The value of code points may be outside of the range of assigned Unicode.
**Syntax**
``` sql
randomStringUTF8(length);
```
**Parameters**
- `length` — Required length of the resulting string in code points. [UInt64](../../sql-reference/data-types/int-uint.md).
**Returned value(s)**
- UTF-8 random string.
Type: [String](../../sql-reference/data-types/string.md).
**Example**
Query:
```sql
SELECT randomStringUTF8(13)
```
Result:
```text
┌─randomStringUTF8(13)─┐
│ 𘤗𙉝д兠庇󡅴󱱎󦐪􂕌𔊹𓰛 │
└──────────────────────┘
```
[Original article](https://clickhouse.tech/docs/en/query_language/functions/other_functions/) <!--hide-->

View File

@ -111,4 +111,43 @@ SELECT alphaTokens('abca1abc')
└─────────────────────────┘
```
## extractAllGroups(text, regexp) {#extractallgroups}
Extracts all groups from non-overlapping substrings matched by a regular expression.
**Syntax**
``` sql
extractAllGroups(text, regexp)
```
**Parameters**
- `text` — [String](../data-types/string.md) or [FixedString](../data-types/fixedstring.md).
- `regexp` — Regular expression. Constant. [String](../data-types/string.md) or [FixedString](../data-types/fixedstring.md).
**Returned values**
- If the function finds at least one matching group, it returns `Array(Array(String))` column, clustered by group_id (1 to N, where N is number of capturing groups in `regexp`).
- If there is no matching group, returns an empty array.
Type: [Array](../data-types/array.md).
**Example**
Query:
``` sql
SELECT extractAllGroups('abc=123, 8="hkl"', '("[^"]+"|\\w+)=("[^"]+"|\\w+)');
```
Result:
``` text
┌─extractAllGroups('abc=123, 8="hkl"', '("[^"]+"|\\w+)=("[^"]+"|\\w+)')─┐
│ [['abc','123'],['8','"hkl"']] │
└───────────────────────────────────────────────────────────────────────┘
```
[Original article](https://clickhouse.tech/docs/en/query_language/functions/splitting_merging_functions/) <!--hide-->

View File

@ -7,14 +7,14 @@ toc_title: VIEW
Creates a new view. There are two types of views: normal and materialized.
## Normal {#normal}
Syntax:
``` sql
CREATE [MATERIALIZED] VIEW [IF NOT EXISTS] [db.]table_name [TO[db.]name] [ENGINE = engine] [POPULATE] AS SELECT ...
CREATE [OR REPLACE] VIEW [IF NOT EXISTS] [db.]table_name [ON CLUSTER] AS SELECT ...
```
## Normal {#normal}
Normal views dont store any data, they just perform a read from another table on each access. In other words, a normal view is nothing more than a saved query. When reading from a view, this saved query is used as a subquery in the [FROM](../../../sql-reference/statements/select/from.md) clause.
As an example, assume youve created a view:
@ -37,6 +37,11 @@ SELECT a, b, c FROM (SELECT ...)
## Materialized {#materialized}
``` sql
CREATE MATERIALIZED VIEW [IF NOT EXISTS] [db.]table_name [ON CLUSTER] [TO[db.]name] [ENGINE = engine] [POPULATE] AS SELECT ...
```
Materialized views store data transformed by the corresponding [SELECT](../../../sql-reference/statements/select/index.md) query.
When creating a materialized view without `TO [db].[table]`, you must specify `ENGINE` the table engine for storing data.

View File

@ -1,5 +1,6 @@
---
toc_priority: 46
toc_title: DROP
---
# DROP Statements {#drop}
@ -77,3 +78,11 @@ DROP [SETTINGS] PROFILE [IF EXISTS] name [,...] [ON CLUSTER cluster_name]
Deletes a settings profile.
Deleted settings profile is revoked from all the entities where it was assigned.
## DROP VIEW {#drop-view}
``` sql
DROP VIEW [IF EXISTS] [db.]name [ON CLUSTER cluster]
```
Deletes a view. Views can be deleted by a `DROP TABLE` command as well but `DROP VIEW` checks that `[db.]name` is a view.

View File

@ -1,5 +1,6 @@
---
toc_priority: 47
toc_title: EXISTS
---
# EXISTS Statement {#exists-statement}

View File

@ -1,8 +1,9 @@
---
toc_priority: 48
toc_title: KILL
---
## KILL Statements {#kill-statements}
# KILL Statements {#kill-statements}
There are two kinds of kill statements: to kill a query and to kill a mutation

View File

@ -1,5 +1,6 @@
---
toc_priority: 49
toc_title: OPTIMIZE
---
# OPTIMIZE Statement {#misc_operations-optimize}

View File

@ -1,5 +1,6 @@
---
toc_priority: 50
toc_title: RENAME
---
# RENAME Statement {#misc_operations-rename}

View File

@ -11,7 +11,7 @@ Syntax:
``` sql
SELECT <expr_list>
FROM <left_table>
[GLOBAL] [ANY|ALL|ASOF] [INNER|LEFT|RIGHT|FULL|CROSS] [OUTER|SEMI|ANTI] JOIN <right_table>
[GLOBAL] [INNER|LEFT|RIGHT|FULL|CROSS] [OUTER|SEMI|ANTI|ANY|ASOF] JOIN <right_table>
(ON <expr_list>)|(USING <column_list>) ...
```
@ -33,17 +33,13 @@ Additional join types available in ClickHouse:
- `LEFT SEMI JOIN` and `RIGHT SEMI JOIN`, a whitelist on “join keys”, without producing a cartesian product.
- `LEFT ANTI JOIN` and `RIGHT ANTI JOIN`, a blacklist on “join keys”, without producing a cartesian product.
- `LEFT ANY JOIN`, `RIGHT ANY JOIN` and `INNER ANY JOIN`, partially (for opposite side of `LEFT` and `RIGHT`) or completely (for `INNER` and `FULL`) disables the cartesian product for standard `JOIN` types.
- `ASOF JOIN` and `LEFT ASOF JOIN`, joining sequences with a non-exact match. `ASOF JOIN` usage is described below.
## Strictness {#select-join-strictness}
Modifies how matching by “join keys” is performed
- `ALL` — The standard `JOIN` behavior in SQL as described above. The default.
- `ANY` — Partially (for opposite side of `LEFT` and `RIGHT`) or completely (for `INNER` and `FULL`) disables the cartesian product for standard `JOIN` types.
- `ASOF` — For joining sequences with a non-exact match. `ASOF JOIN` usage is described below.
## Setting {#join-settings}
!!! note "Note"
The default strictness value can be overriden using [join\_default\_strictness](../../../operations/settings/settings.md#settings-join_default_strictness) setting.
The default join type can be overriden using [join\_default\_strictness](../../../operations/settings/settings.md#settings-join_default_strictness) setting.
Also the behavior of ClickHouse server for `ANY JOIN` operations depends on the [any_join_distinct_right_table_keys](../../../operations/settings/settings.md#any_join_distinct_right_table_keys) setting.

View File

@ -1,5 +1,6 @@
---
toc_priority: 52
toc_title: SET ROLE
---
# SET ROLE Statement {#set-role-statement}

View File

@ -1,5 +1,6 @@
---
toc_priority: 51
toc_title: SET
---
# SET Statement {#query-set}

View File

@ -115,7 +115,7 @@ Aborts ClickHouse process (like `kill -9 {$ pid_clickhouse-server}`)
## Managing Distributed Tables {#query-language-system-distributed}
ClickHouse can manage [distributed](../../engines/table-engines/special/distributed.md) tables. When a user inserts data into these tables, ClickHouse first creates a queue of the data that should be sent to cluster nodes, then asynchronously sends it. You can manage queue processing with the [STOP DISTRIBUTED SENDS](#query_language-system-stop-distributed-sends), [FLUSH DISTRIBUTED](#query_language-system-flush-distributed), and [START DISTRIBUTED SENDS](#query_language-system-start-distributed-sends) queries. You can also synchronously insert distributed data with the `insert_distributed_sync` setting.
ClickHouse can manage [distributed](../../engines/table-engines/special/distributed.md) tables. When a user inserts data into these tables, ClickHouse first creates a queue of the data that should be sent to cluster nodes, then asynchronously sends it. You can manage queue processing with the [STOP DISTRIBUTED SENDS](#query_language-system-stop-distributed-sends), [FLUSH DISTRIBUTED](#query_language-system-flush-distributed), and [START DISTRIBUTED SENDS](#query_language-system-start-distributed-sends) queries. You can also synchronously insert distributed data with the [insert_distributed_sync](../../operations/settings/settings.md#insert_distributed_sync) setting.
### STOP DISTRIBUTED SENDS {#query_language-system-stop-distributed-sends}

View File

@ -1,5 +1,6 @@
---
toc_priority: 53
toc_title: TRUNCATE
---
# TRUNCATE Statement {#truncate-statement}

View File

@ -1,5 +1,6 @@
---
toc_priority: 54
toc_title: USE
---
# USE Statement {#use}

View File

@ -23,7 +23,7 @@ Vea la descripción detallada del [CREATE TABLE](../../../sql-reference/statemen
**Parámetros del motor**
- `join_strictness` [ÚNETE a la rigurosidad](../../../sql-reference/statements/select/join.md#select-join-strictness).
- `join_strictness` [ÚNETE a la rigurosidad](../../../sql-reference/statements/select/join.md#select-join-types).
- `join_type` [Tipo de unión](../../../sql-reference/statements/select/join.md#select-join-types).
- `k1[, k2, ...]` Key columns from the `USING` cláusula que el `JOIN` operación se hace con.

View File

@ -12,7 +12,7 @@ Sintaxis:
``` sql
SELECT <expr_list>
FROM <left_table>
[GLOBAL] [ANY|ALL|ASOF] [INNER|LEFT|RIGHT|FULL|CROSS] [OUTER|SEMI|ANTI] JOIN <right_table>
[GLOBAL] [INNER|LEFT|RIGHT|FULL|CROSS] [OUTER|SEMI|ANTI|ANY|ASOF] JOIN <right_table>
(ON <expr_list>)|(USING <column_list>) ...
```
@ -34,14 +34,10 @@ Tipos de unión adicionales disponibles en ClickHouse:
- `LEFT SEMI JOIN` y `RIGHT SEMI JOIN`, una lista blanca en “join keys”, sin producir un producto cartesiano.
- `LEFT ANTI JOIN` y `RIGHT ANTI JOIN`, una lista negra sobre “join keys”, sin producir un producto cartesiano.
- `LEFT ANY JOIN`, `RIGHT ANY JOIN` and `INNER ANY JOIN`, partially (for opposite side of `LEFT` and `RIGHT`) or completely (for `INNER` and `FULL`) disables the cartesian product for standard `JOIN` types.
- `ASOF JOIN` and `LEFT ASOF JOIN`, joining sequences with a non-exact match. `ASOF JOIN` usage is described below.
## Rigor {#select-join-strictness}
Modifica cómo coincidir por “join keys” se realiza
- `ALL` — The standard `JOIN` comportamiento en SQL como se describió anteriormente. Predeterminado.
- `ANY` — Partially (for opposite side of `LEFT` y `RIGHT`) o completamente (para `INNER` y `FULL`) deshabilita el producto cartesiano para `JOIN` tipo.
- `ASOF` — For joining sequences with a non-exact match. `ASOF JOIN` el uso se describe a continuación.
## Setting {#join-settings}
!!! note "Nota"
El valor de rigor predeterminado se puede anular usando [Por favor, introduzca su dirección de correo electrónico](../../../operations/settings/settings.md#settings-join_default_strictness) configuración.

View File

@ -23,7 +23,7 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster]
**پارامترهای موتور**
- `join_strictness` [پیوستن به سختی](../../../sql-reference/statements/select/join.md#select-join-strictness).
- `join_strictness` [پیوستن به سختی](../../../sql-reference/statements/select/join.md#select-join-types).
- `join_type` [پیوستن به نوع](../../../sql-reference/statements/select/join.md#select-join-types).
- `k1[, k2, ...]` Key columns from the `USING` بند که `JOIN` عملیات با ساخته شده.

View File

@ -12,7 +12,7 @@ machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd
``` sql
SELECT <expr_list>
FROM <left_table>
[GLOBAL] [ANY|ALL|ASOF] [INNER|LEFT|RIGHT|FULL|CROSS] [OUTER|SEMI|ANTI] JOIN <right_table>
[GLOBAL] [INNER|LEFT|RIGHT|FULL|CROSS] [OUTER|SEMI|ANTI|ANY|ASOF] JOIN <right_table>
(ON <expr_list>)|(USING <column_list>) ...
```
@ -34,15 +34,12 @@ FROM <left_table>
- `LEFT SEMI JOIN` و `RIGHT SEMI JOIN`, یک لیست سفید در “join keys”, بدون تولید محصول دکارتی.
- `LEFT ANTI JOIN` و `RIGHT ANTI JOIN`, لیست سیاه در “join keys”, بدون تولید محصول دکارتی.
- `LEFT ANY JOIN`, `RIGHT ANY JOIN` و `INNER ANY JOIN`, partially (for opposite side of `LEFT` and `RIGHT`) or completely (for `INNER` and `FULL`) disables the cartesian product for standard `JOIN` types.
- `ASOF JOIN` و `LEFT ASOF JOIN`, joining sequences with a non-exact match. `ASOF JOIN` usage is described below.
## سختی {#select-join-strictness}
## Setting {#join-settings}
تغییر چگونگی تطبیق توسط “join keys” انجام شده است
- `ALL` — The standard `JOIN` رفتار در گذاشتن همانطور که در بالا توضیح. به طور پیش فرض.
- `ANY` — Partially (for opposite side of `LEFT` و `RIGHT`) یا به طور کامل (برای `INNER` و `FULL`) غیر فعال محصول دکارتی برای استاندارد `JOIN` انواع.
- `ASOF` — For joining sequences with a non-exact match. `ASOF JOIN` استفاده در زیر توضیح داده شده است.
!!! note "یادداشت"
مقدار سختگیرانه پیش فرض را می توان با استفاده از لغو [بررسی اجمالی](../../../operations/settings/settings.md#settings-join_default_strictness) تنظیمات.

View File

@ -23,7 +23,7 @@ Voir la description détaillée de la [CREATE TABLE](../../../sql-reference/stat
**Les Paramètres Du Moteur**
- `join_strictness` [ADHÉRER à la rigueur](../../../sql-reference/statements/select/join.md#select-join-strictness).
- `join_strictness` [ADHÉRER à la rigueur](../../../sql-reference/statements/select/join.md#select-join-types).
- `join_type` [Type de jointure](../../../sql-reference/statements/select/join.md#select-join-types).
- `k1[, k2, ...]` Key columns from the `USING` la clause que l' `JOIN` l'opération est faite avec de la.

View File

@ -12,7 +12,7 @@ Syntaxe:
``` sql
SELECT <expr_list>
FROM <left_table>
[GLOBAL] [ANY|ALL|ASOF] [INNER|LEFT|RIGHT|FULL|CROSS] [OUTER|SEMI|ANTI] JOIN <right_table>
[GLOBAL] [INNER|LEFT|RIGHT|FULL|CROSS] [OUTER|SEMI|ANTI|ANY|ASOF] JOIN <right_table>
(ON <expr_list>)|(USING <column_list>) ...
```
@ -34,14 +34,10 @@ Autres types de jointure disponibles dans ClickHouse:
- `LEFT SEMI JOIN` et `RIGHT SEMI JOIN` une liste blanche sur “join keys”, sans produire un produit cartésien.
- `LEFT ANTI JOIN` et `RIGHT ANTI JOIN` une liste noire sur “join keys”, sans produire un produit cartésien.
- `LEFT ANY JOIN`, `RIGHT ANY JOIN` et `INNER ANY JOIN`, partially (for opposite side of `LEFT` and `RIGHT`) or completely (for `INNER` and `FULL`) disables the cartesian product for standard `JOIN` types.
- `ASOF JOIN` et `LEFT ASOF JOIN`, joining sequences with a non-exact match. `ASOF JOIN` usage is described below.
## Rigueur {#select-join-strictness}
Modifie la façon dont la correspondance par “join keys” est effectué
- `ALL` — The standard `JOIN` comportement en SQL comme décrit ci-dessus. Défaut.
- `ANY` — Partially (for opposite side of `LEFT` et `RIGHT`) ou complètement (pour `INNER` et `FULL`) désactive le produit cartésien de la norme `JOIN` type.
- `ASOF` — For joining sequences with a non-exact match. `ASOF JOIN` l'utilisation est décrite ci-dessous.
## Setting {#join-settings}
!!! note "Note"
La valeur de rigueur par défaut peut être remplacée à l'aide [join\_default\_strictness](../../../operations/settings/settings.md#settings-join_default_strictness) paramètre.

View File

@ -23,7 +23,7 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster]
**エンジン変数**
- `join_strictness` [厳密に結合する](../../../sql-reference/statements/select/join.md#select-join-strictness).
- `join_strictness` [厳密に結合する](../../../sql-reference/statements/select/join.md#select-join-types).
- `join_type` [結合タイプ](../../../sql-reference/statements/select/join.md#select-join-types).
- `k1[, k2, ...]` Key columns from the `USING` 句は、 `JOIN` 操作はでなされる。

View File

@ -693,6 +693,7 @@ auto s = std::string{"Hello"};
## Сообщения об ошибках {#error-messages}
Сообщения об ошибках -- это часть пользовательского интерфейса программы, предназначенная для того, чтобы позволить пользователю:
* замечать ошибочные ситуации,
* понимать их смысл и причины,
* устранять эти ситуации.
@ -700,6 +701,7 @@ auto s = std::string{"Hello"};
Форма и содержание сообщений об ошибках должны способствовать достижению этих целей.
Есть два основных вида ошибок:
* пользовательская или системная ошибка,
* внутренняя программная ошибка.
@ -722,6 +724,7 @@ While processing '(SELECT 2 AS a)'.
The dictionary is configured incorrectly.
```
Из него не понятно:
- какой словарь?
- в чём ошибка конфигурации?
@ -735,12 +738,14 @@ The dictionary is configured incorrectly.
Появление такой ошибки всегда свидетельствует о наличии бага в программе. Пользователь не может исправить такую ошибку самостоятельно, и должен сообщить о ней разработчикам.
Есть два основных варианта проверки на такие ошибки:
* Исключение с кодом `LOGICAL_ERROR`. Его можно использовать для важных проверок, которые делаются в том числе в релизной сборке.
* `assert`. Такие условия не проверяются в релизной сборке, можно использовать для тяжёлых и опциональных проверок.
Пример сообщения, у которого должен быть код `LOGICAL_ERROR`:
`Block header is inconsistent with Chunk in ICompicatedProcessor::munge(). It is a bug!`
По каким признакам можно заметить, что здесь говорится о внутренней программной ошибке?
По каким признакам можно заметить, что здесь говорится о внутренней программной ошибке?
* в сообщении упоминаются внутренние сущности из кода,
* в сообщении написано it's a bug,
* непосредственные действия пользователя не могут исправить эту ошибку. Мы ожидаем, что пользователь зарепортит её как баг, и будем исправлять в коде.
@ -752,6 +757,7 @@ The dictionary is configured incorrectly.
### Как добавить новое сообщение об ошибке? {#error-messages-add}
Когда добавляете сообщение об ошибке:
1. Опишите, что произошло, в пользовательских терминах, а не кусками кода.
2. Добавьте максимум контекста (с чем произошло, когда, почему, и т.д.).
3. Добавьте типичные причины.

View File

@ -67,8 +67,9 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster]
- `min_merge_bytes_to_use_direct_io` — минимальный объём данных при слиянии, необходимый для прямого (небуферизованного) чтения/записи (direct I/O) на диск. При слиянии частей данных ClickHouse вычисляет общий объём хранения всех данных, подлежащих слиянию. Если общий объём хранения всех данных для чтения превышает `min_bytes_to_use_direct_io` байт, тогда ClickHouse использует флаг `O_DIRECT` при чтении данных с диска. Если `min_merge_bytes_to_use_direct_io = 0`, тогда прямой ввод-вывод отключен. Значение по умолчанию: `10 * 1024 * 1024 * 1024` байтов.
- <a name="mergetree_setting-merge_with_ttl_timeout"></a>`merge_with_ttl_timeout` — минимальное время в секундах перед повторным слиянием с TTL. По умолчанию — 86400 (1 день).
- `write_final_mark` — включает или отключает запись последней засечки индекса в конце куска данных, указывающей за последний байт. По умолчанию — 1. Не отключайте её.
- `merge_max_block_size`Максимальное количество строк в блоке для операций слияния. Значение по умолчанию: 8192.
- `merge_max_block_size`максимальное количество строк в блоке для операций слияния. Значение по умолчанию: 8192.
- `storage_policy` — политика хранения данных. Смотрите [Хранение данных таблицы на нескольких блочных устройствах](#table_engine-mergetree-multiple-volumes).
- `min_bytes_for_wide_part`, `min_rows_for_wide_part` — минимальное количество байт/строк в куске данных для хранения в формате `Wide`. Можно задать одну или обе настройки или не задавать ни одной. Подробнее см. в разделе [Хранение данных](#mergetree-data-storage).
**Пример задания секций**
@ -123,6 +124,10 @@ MergeTree(EventDate, intHash32(UserID), (CounterID, EventDate, intHash32(UserID)
Данные, относящиеся к разным партициям, разбиваются на разные куски. В фоновом режиме ClickHouse выполняет слияния (merge) кусков данных для более эффективного хранения. Куски, относящиеся к разным партициям не объединяются. Механизм слияния не гарантирует, что все строки с одинаковым первичным ключом окажутся в одном куске.
Куски данных могут храниться в формате `Wide` или `Compact`. В формате `Wide` каждый столбец хранится в отдельном файле, а в формате `Compact` все столбцы хранятся в одном файле. Формат `Compact` может быть полезен для повышения производительности при частом добавлении небольших объемов данных.
Формат хранения определяется настройками движка `min_bytes_for_wide_part` и `min_rows_for_wide_part`. Если число байт или строк в куске данных меньше значения, указанного в соответствующей настройке, тогда этот кусок данных хранится в формате `Compact`. В противном случае кусок данных хранится в формате `Wide`. Если ни одна из настроек не задана, куски данных хранятся в формате `Wide`.
Каждый кусок данных логически делится на гранулы. Гранула — это минимальный неделимый набор данных, который ClickHouse считывает при выборке данных. ClickHouse не разбивает строки и значения и гранула всегда содержит целое число строк. Первая строка гранулы помечается значением первичного ключа для этой строки (засечка). Для каждого куска данных ClickHouse создаёт файл с засечками (индексный файл). Для каждого столбца, независимо от того, входит он в первичный ключ или нет, ClickHouse также сохраняет эти же засечки. Засечки используются для поиска данных напрямую в файлах столбцов.
Размер гранул оганичен настройками движка `index_granularity` и `index_granularity_bytes`. Количество строк в грануле лежит в диапазоне `[1, index_granularity]`, в зависимости от размера строк. Размер гранулы может превышать `index_granularity_bytes` в том случае, когда размер единственной строки в грануле превышает значение настройки. В этом случае, размер гранулы равен размеру строки.

View File

@ -16,7 +16,7 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster]
**Параметры движка**
- `join_strictness` [строгость JOIN](../../../engines/table-engines/special/join.md#select-join-strictness).
- `join_strictness` [строгость JOIN](../../../engines/table-engines/special/join.md#select-join-types).
- `join_type` [тип JOIN](../../../engines/table-engines/special/join.md#select-join-types).
- `k1[, k2, ...]` ключевые столбцы секции `USING` с которыми выполняется операция `JOIN`.

View File

@ -385,12 +385,37 @@ ClickHouse проверит условия `min_part_size` и `min_part_size_rat
**Дополнительная информация**
На серверах с небольшим объёмом RAM и файла подкачки может потребоваться настройка `max_server_memory_usage_to_ram_ratio > 1`.
Значение по умолчанию для `max_server_memory_usage` рассчитывается как `memory_amount * max_server_memory_usage_to_ram_ratio`.
**См. также**
- [max_memory_usage](../settings/query-complexity.md#settings_max_memory_usage)
## max_server_memory_usage_to_ram_ratio {#max_server_memory_usage_to_ram_ratio}
Определяет долю оперативной памяти, доступную для использования сервером Clickhouse. Если сервер попытается использовать больше, предоставляемый ему объём памяти будет ограничен до расчётного значения.
Возможные значения:
- Положительное число с плавающей запятой.
- 0 — сервер Clickhouse может использовать всю оперативную память.
Значение по умолчанию: `0`.
**Использование**
На серверах с небольшим объёмом оперативной памяти и файла подкачки может потребоваться установить настройку `max_server_memory_usage_to_ram_ratio` в значение, большее 1.
**Пример**
``` xml
<max_server_memory_usage_to_ram_ratio>0.9</max_server_memory_usage_to_ram_ratio>
```
**См. также**
- [max_server_memory_usage](#max_server_memory_usage)
## max\_connections {#max-connections}
Максимальное количество входящих соединений.

View File

@ -520,6 +520,31 @@ ClickHouse использует этот параметр при чтении д
Значение по умолчанию: 0.
## network_compression_method {#network_compression_method}
Задает метод сжатия данных, используемый при обмене данными между серверами и при обмене между сервером и [clickhouse-client](../../interfaces/cli.md).
Возможные значения:
- `LZ4` — устанавливает метод сжатия LZ4.
- `ZSTD` — устанавливает метод сжатия ZSTD.
Значение по умолчанию: `LZ4`.
См. также:
- [network_zstd_compression_level](#network_zstd_compression_level)
## network_zstd_compression_level {#network_zstd_compression_level}
Регулирует уровень сжатия ZSTD. Используется только тогда, когда [network_compression_method](#network_compression_method) имеет значение `ZSTD`.
Возможные значения:
- Положительное целое число от 1 до 15.
Значение по умолчанию: `1`.
## log\_queries {#settings-log-queries}
Установка логирования запроса.
@ -700,6 +725,17 @@ log_query_threads=1
Значение по умолчанию: 50.
## connection\_pool\_max\_wait\_ms {#connection-pool-max-wait-ms}
Время ожидания соединения в миллисекундах, когда пул соединений заполнен.
Возможные значения:
- Положительное целое число.
- 0 — Бесконечный таймаут.
Значение по умолчанию: 0.
## connections\_with\_failover\_max\_tries {#connections-with-failover-max-tries}
Максимальное количество попыток соединения с каждой репликой, для движка таблиц Distributed.
@ -711,6 +747,21 @@ log_query_threads=1
Считать ли экстремальные значения (минимумы и максимумы по столбцам результата запроса). Принимает 0 или 1. По умолчанию - 0 (выключено).
Подробнее смотрите раздел «Экстремальные значения».
## kafka\_max\_wait\_ms {#kafka-max-wait-ms}
Время ожидания в миллисекундах для чтения сообщений из [Kafka](../../engines/table-engines/integrations/kafka.md#kafka) перед повторной попыткой.
Возможные значения:
- Положительное целое число.
- 0 — Бесконечный таймаут.
Значение по умолчанию: 5000.
См. также:
- [Apache Kafka](https://kafka.apache.org/)
## use\_uncompressed\_cache {#setting-use_uncompressed_cache}
Использовать ли кэш разжатых блоков. Принимает 0 или 1. По умолчанию - 0 (выключено).
@ -730,6 +781,17 @@ log_query_threads=1
Эта настройка, выставленная в 1, используется в Яндекс.Метрике для реализации suggest-а значений для условий сегментации. После ввода очередного символа, если старый запрос ещё не выполнился, его следует отменить.
## replace\_running\_query\_max\_wait\_ms {#replace-running-query-max-wait-ms}
Время ожидания завершения выполнения запроса с тем же `query_id`, когда активирована настройка [replace_running_query](#replace-running-query).
Возможные значения:
- Положительное целое число.
- 0 — Создание исключения, которое не позволяет выполнить новый запрос, если сервер уже выполняет запрос с тем же `query_id`.
Значение по умолчанию: 5000.
## stream\_flush\_interval\_ms {#stream-flush-interval-ms}
Работает для таблиц со стриммингом в случае тайм-аута, или когда поток генерирует [max\_insert\_block\_size](#settings-max_insert_block_size) строк.
@ -1216,6 +1278,34 @@ Default value: 0.
Значение по умолчанию: 16.
## insert_distributed_sync {#insert_distributed_sync}
Включает или отключает режим синхронного добавления данных в распределенные таблицы (таблицы с движком [Distributed](../../engines/table-engines/special/distributed.md#distributed)).
По умолчанию ClickHouse вставляет данные в распределённую таблицу в асинхронном режиме. Если `insert_distributed_sync=1`, то данные вставляются сихронно, а запрос `INSERT` считается выполненным успешно, когда данные записаны на все шарды (по крайней мере на одну реплику для каждого шарда, если `internal_replication = true`).
Возможные значения:
- 0 — Данные добавляются в асинхронном режиме.
- 1 — Данные добавляются в синхронном режиме.
Значение по умолчанию: `0`.
**См. также**
- [Движок Distributed](../../engines/table-engines/special/distributed.md#distributed)
- [Управление распределёнными таблицами](../../sql-reference/statements/system.md#query-language-system-distributed)
## validate\_polygons {#validate_polygons}
Включает или отключает генерирование исключения в функции [pointInPolygon](../../sql-reference/functions/geo.md#pointinpolygon), если многоугольник самопересекающийся или самокасающийся.
Допустимые значения:
- 0 — генерирование исключения отключено. `pointInPolygon` принимает недопустимые многоугольники и возвращает для них, возможно, неверные результаты.
- 1 — генерирование исключения включено.
Значение по умолчанию: 1.
## always_fetch_merged_part {#always_fetch_merged_part}
Запрещает слияние данных для таблиц семейства [Replicated*MergeTree](../../engines/table-engines/mergetree-family/replication.md).

View File

@ -46,6 +46,41 @@ SELECT * FROM system.asynchronous_metrics LIMIT 10
- [system.events](#system_tables-events) — таблица с количеством произошедших событий.
- [system.metric\_log](#system_tables-metric_log) — таблица фиксирующая историю значений метрик из `system.metrics` и `system.events`.
## system.asynchronous\_metric\_log {#system-tables-async-log}
Содержит исторические значения метрик из таблицы `system.asynchronous_metrics`, которые сохраняются раз в минуту. По умолчанию включена.
Столбцы:
- `event_date` ([Date](../sql-reference/data-types/date.md)) — дата события.
- `event_time` ([DateTime](../sql-reference/data-types/datetime.md)) — время события.
- `name` ([String](../sql-reference/data-types/string.md)) — название метрики.
- `value` ([Float64](../sql-reference/data-types/float.md)) — значение метрики.
**Пример**
``` sql
SELECT * FROM system.asynchronous_metric_log LIMIT 10
```
``` text
┌─event_date─┬──────────event_time─┬─name─────────────────────────────────────┬────value─┐
│ 2020-06-22 │ 2020-06-22 06:57:30 │ jemalloc.arenas.all.pmuzzy │ 0 │
│ 2020-06-22 │ 2020-06-22 06:57:30 │ jemalloc.arenas.all.pdirty │ 4214 │
│ 2020-06-22 │ 2020-06-22 06:57:30 │ jemalloc.background_thread.run_intervals │ 0 │
│ 2020-06-22 │ 2020-06-22 06:57:30 │ jemalloc.background_thread.num_runs │ 0 │
│ 2020-06-22 │ 2020-06-22 06:57:30 │ jemalloc.retained │ 17657856 │
│ 2020-06-22 │ 2020-06-22 06:57:30 │ jemalloc.mapped │ 71471104 │
│ 2020-06-22 │ 2020-06-22 06:57:30 │ jemalloc.resident │ 61538304 │
│ 2020-06-22 │ 2020-06-22 06:57:30 │ jemalloc.metadata │ 6199264 │
│ 2020-06-22 │ 2020-06-22 06:57:30 │ jemalloc.allocated │ 38074336 │
│ 2020-06-22 │ 2020-06-22 06:57:30 │ jemalloc.epoch │ 2 │
└────────────┴─────────────────────┴──────────────────────────────────────────┴──────────┘
```
**Смотрите также**
- [system.asynchronous\_metrics](../../operations/system-tables/asynchronous_metrics.md) — Содержит метрики, которые периодически вычисляются в фоновом режиме.
- [system.metric\_log](#system_tables-metric_log) — таблица фиксирующая историю значений метрик из `system.metrics` и `system.events`.
## system.clusters {#system-clusters}
Содержит информацию о доступных в конфигурационном файле кластерах и серверах, которые в них входят.
@ -126,6 +161,44 @@ SELECT * FROM system.contributors WHERE name='Olga Khvostikova'
└──────────────────┘
```
## system.licenses {#system-tables_system.licenses}
Содержит информацию о лицензиях сторонних библиотек, которые находятся в директории [contrib](https://github.com/ClickHouse/ClickHouse/tree/master/contrib) исходных кодов ClickHouse.
Столбцы:
- `library_name` ([String](../sql-reference/data-types/string.md)) — Название библиотеки, к которой относится лицензия.
- `license_type` ([String](../sql-reference/data-types/string.md)) — Тип лицензии, например, Apache, MIT.
- `license_path` ([String](../sql-reference/data-types/string.md)) — Путь к файлу с текстом лицензии.
- `license_text` ([String](../sql-reference/data-types/string.md)) — Текст лицензии.
**Пример**
``` sql
SELECT library_name, license_type, license_path FROM system.licenses LIMIT 15
```
``` text
┌─library_name───────┬─license_type─┬─license_path────────────────────────┐
│ FastMemcpy │ MIT │ /contrib/FastMemcpy/LICENSE │
│ arrow │ Apache │ /contrib/arrow/LICENSE.txt │
│ avro │ Apache │ /contrib/avro/LICENSE.txt │
│ aws-c-common │ Apache │ /contrib/aws-c-common/LICENSE │
│ aws-c-event-stream │ Apache │ /contrib/aws-c-event-stream/LICENSE │
│ aws-checksums │ Apache │ /contrib/aws-checksums/LICENSE │
│ aws │ Apache │ /contrib/aws/LICENSE.txt │
│ base64 │ BSD 2-clause │ /contrib/base64/LICENSE │
│ boost │ Boost │ /contrib/boost/LICENSE_1_0.txt │
│ brotli │ MIT │ /contrib/brotli/LICENSE │
│ capnproto │ MIT │ /contrib/capnproto/LICENSE │
│ cassandra │ Apache │ /contrib/cassandra/LICENSE.txt │
│ cctz │ Apache │ /contrib/cctz/LICENSE.txt │
│ cityhash102 │ MIT │ /contrib/cityhash102/COPYING │
│ cppkafka │ BSD 2-clause │ /contrib/cppkafka/LICENSE │
└────────────────────┴──────────────┴─────────────────────────────────────┘
```
## system.databases {#system-databases}
Таблица содержит один столбец name типа String - имя базы данных.
@ -433,78 +506,154 @@ CurrentMetric_ReplicatedChecks: 0
Столбцы:
- `partition` (`String`) Имя партиции. Что такое партиция можно узнать из описания запроса [ALTER](../sql-reference/statements/alter.md#query_language_queries_alter).
- `partition` ([String](../sql-reference/data-types/string.md)) имя партиции. Что такое партиция можно узнать из описания запроса [ALTER](../sql-reference/statements/alter.md#query_language_queries_alter).
Форматы:
- `YYYYMM` для автоматической схемы партиционирования по месяцам.
- `any_string` при партиционировании вручную.
- `name` (`String`) имя куска.
- `name` ([String](../sql-reference/data-types/string.md)) имя куска.
- `active` (`UInt8`) признак активности. Если кусок активен, то он используется таблицей, в противном случает он будет удален. Неактивные куски остаются после слияний.
- `part_type` ([String](../sql-reference/data-types/string.md)) — формат хранения данных.
- `marks` (`UInt64`) количество засечек. Чтобы получить примерное количество строк в куске, умножьте `marks` на гранулированность индекса (обычно 8192).
Возможные значения:
- `rows` (`UInt64`) количество строк.
- `Wide` — каждая колонка хранится в отдельном файле.
- `Compact` — все колонки хранятся в одном файле.
- `bytes_on_disk` (`UInt64`) общий размер всех файлов кусков данных в байтах.
Формат хранения данных определяется настройками `min_bytes_for_wide_part` и `min_rows_for_wide_part` таблицы [MergeTree](../engines/table-engines/mergetree-family/mergetree.md).
- `data_compressed_bytes` (`UInt64`) общий размер сжатой информации в куске данных. Размер всех дополнительных файлов (например, файлов с засечками) не учитывается.
- `active` ([UInt8](../sql-reference/data-types/int-uint.md)) признак активности. Если кусок активен, то он используется таблицей, в противном случает он будет удален. Неактивные куски остаются после слияний.
- `data_uncompressed_bytes` (`UInt64`) общий размер распакованной информации куска данных. Размер всех дополнительных файлов (например, файлов с засечками) не учитывается.
- `marks` ([UInt64](../sql-reference/data-types/int-uint.md)) количество засечек. Чтобы получить примерное количество строк в куске, умножьте `marks` на гранулированность индекса (обычно 8192).
- `marks_bytes` (`UInt64`) размер файла с засечками.
- `rows` ([UInt64](../sql-reference/data-types/int-uint.md)) количество строк.
- `modification_time` (`DateTime`) время модификации директории с куском данных. Обычно соответствует времени создания куска.
- `bytes_on_disk` ([UInt64](../sql-reference/data-types/int-uint.md)) общий размер всех файлов кусков данных в байтах.
- `remove_time` (`DateTime`) время, когда кусок стал неактивным.
- `data_compressed_bytes` ([UInt64](../sql-reference/data-types/int-uint.md)) общий размер сжатой информации в куске данных. Размер всех дополнительных файлов (например, файлов с засечками) не учитывается.
- `refcount` (`UInt32`) количество мест, в котором кусок используется. Значение больше 2 говорит о том, что кусок участвует в запросах или в слияниях.
- `data_uncompressed_bytes` ([UInt64](../sql-reference/data-types/int-uint.md)) общий размер распакованной информации куска данных. Размер всех дополнительных файлов (например, файлов с засечками) не учитывается.
- `min_date` (`Date`) минимальное значение ключа даты в куске данных.
- `marks_bytes` ([UInt64](../sql-reference/data-types/int-uint.md)) размер файла с засечками.
- `max_date` (`Date`) максимальное значение ключа даты в куске данных.
- `modification_time` ([DateTime](../sql-reference/data-types/datetime.md)) время модификации директории с куском данных. Обычно соответствует времени создания куска.
- `min_time` (`DateTime`) минимальное значение даты и времени в куске данных.
- `remove_time` ([DateTime](../sql-reference/data-types/datetime.md)) время, когда кусок стал неактивным.
- `max_time`(`DateTime`) максимальное значение даты и времени в куске данных.
- `refcount` ([UInt32](../sql-reference/data-types/int-uint.md)) количество мест, в котором кусок используется. Значение больше 2 говорит о том, что кусок участвует в запросах или в слияниях.
- `partition_id` (`String`) ID партиции.
- `min_date` ([Date](../sql-reference/data-types/date.md)) минимальное значение ключа даты в куске данных.
- `min_block_number` (`UInt64`) минимальное число кусков, из которых состоит текущий после слияния.
- `max_date` ([Date](../sql-reference/data-types/date.md)) максимальное значение ключа даты в куске данных.
- `max_block_number` (`UInt64`) максимальное число кусков, из которых состоит текущий после слияния.
- `min_time` ([DateTime](../sql-reference/data-types/datetime.md)) минимальное значение даты и времени в куске данных.
- `level` (`UInt32`) - глубина дерева слияний. Если слияний не было, то `level=0`.
- `max_time`([DateTime](../sql-reference/data-types/datetime.md)) максимальное значение даты и времени в куске данных.
- `data_version` (`UInt64`) число, которое используется для определения того, какие мутации необходимо применить к куску данных (мутации с версией большей, чем `data_version`).
- `partition_id` ([String](../sql-reference/data-types/string.md)) ID партиции.
- `primary_key_bytes_in_memory` (`UInt64`) объём памяти (в байтах), занимаемой значениями первичных ключей.
- `min_block_number` ([UInt64](../sql-reference/data-types/int-uint.md)) минимальное число кусков, из которых состоит текущий после слияния.
- `primary_key_bytes_in_memory_allocated` (`UInt64`) объём памяти (в байтах) выделенный для размещения первичных ключей.
- `max_block_number` ([UInt64](../sql-reference/data-types/int-uint.md)) максимальное число кусков, из которых состоит текущий после слияния.
- `is_frozen` (`UInt8`) Признак, показывающий существование бэкапа партиции. 1, бэкап есть. 0, бэкапа нет. Смотрите раздел [FREEZE PARTITION](../sql-reference/statements/alter.md#alter_freeze-partition).
- `level` ([UInt32](../sql-reference/data-types/int-uint.md)) - глубина дерева слияний. Если слияний не было, то `level=0`.
- `database` (`String`) имя базы данных.
- `data_version` ([UInt64](../sql-reference/data-types/int-uint.md)) число, которое используется для определения того, какие мутации необходимо применить к куску данных (мутации с версией большей, чем `data_version`).
- `table` (`String`) имя таблицы.
- `primary_key_bytes_in_memory` ([UInt64](../sql-reference/data-types/int-uint.md)) объём памяти (в байтах), занимаемой значениями первичных ключей.
- `engine` (`String`) имя движка таблицы, без параметров.
- `primary_key_bytes_in_memory_allocated` ([UInt64](../sql-reference/data-types/int-uint.md)) объём памяти (в байтах) выделенный для размещения первичных ключей.
- `path` (`String`) абсолютный путь к папке с файлами кусков данных.
- `is_frozen` ([UInt8](../sql-reference/data-types/int-uint.md)) Признак, показывающий существование бэкапа партиции. 1, бэкап есть. 0, бэкапа нет. Смотрите раздел [FREEZE PARTITION](../sql-reference/statements/alter.md#alter_freeze-partition).
- `disk` (`String`) имя диска, на котором находится кусок данных.
- `database` ([String](../sql-reference/data-types/string.md)) имя базы данных.
- `hash_of_all_files` (`String`) значение [sipHash128](../sql-reference/functions/hash-functions.md#hash_functions-siphash128) для сжатых файлов.
- `table` ([String](../sql-reference/data-types/string.md)) имя таблицы.
- `hash_of_uncompressed_files` (`String`) значение [sipHash128](../sql-reference/functions/hash-functions.md#hash_functions-siphash128) несжатых файлов (файлы с засечками, первичным ключом и пр.)
- `engine` ([String](../sql-reference/data-types/string.md)) имя движка таблицы, без параметров.
- `uncompressed_hash_of_compressed_files` (`String`) значение [sipHash128](../sql-reference/functions/hash-functions.md#hash_functions-siphash128) данных в сжатых файлах как если бы они были разжатыми.
- `path` ([String](../sql-reference/data-types/string.md)) абсолютный путь к папке с файлами кусков данных.
- `bytes` (`UInt64`) алиас для `bytes_on_disk`.
- `disk` ([String](../sql-reference/data-types/string.md)) имя диска, на котором находится кусок данных.
- `marks_size` (`UInt64`) алиас для `marks_bytes`.
- `hash_of_all_files` ([String](../sql-reference/data-types/string.md)) значение [sipHash128](../sql-reference/functions/hash-functions.md#hash_functions-siphash128) для сжатых файлов.
- `hash_of_uncompressed_files` ([String](../sql-reference/data-types/string.md)) значение [sipHash128](../sql-reference/functions/hash-functions.md#hash_functions-siphash128) несжатых файлов (файлы с засечками, первичным ключом и пр.)
- `uncompressed_hash_of_compressed_files` ([String](../sql-reference/data-types/string.md)) значение [sipHash128](../sql-reference/functions/hash-functions.md#hash_functions-siphash128) данных в сжатых файлах как если бы они были разжатыми.
- `delete_ttl_info_min` ([DateTime](../sql-reference/data-types/datetime.md)) — Минимальное значение ключа даты и времени для правила [TTL DELETE](../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-ttl).
- `delete_ttl_info_max` ([DateTime](../sql-reference/data-types/datetime.md)) — Максимальное значение ключа даты и времени для правила [TTL DELETE](../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-ttl).
- `move_ttl_info.expression` ([Array](../sql-reference/data-types/array.md)([String](../sql-reference/data-types/string.md))) — Массив выражений. Каждое выражение задаёт правило [TTL MOVE](../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-ttl).
!!! note "Предупреждение"
Массив выражений `move_ttl_info.expression` используется, в основном, для обратной совместимости. Для работы с правилами `TTL MOVE` лучше использовать поля `move_ttl_info.min` и `move_ttl_info.max`.
- `move_ttl_info.min` ([Array](../sql-reference/data-types/array.md)([DateTime](../sql-reference/data-types/datetime.md))) — Массив значений. Каждый элемент массива задаёт минимальное значение ключа даты и времени для правила [TTL MOVE](../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-ttl).
- `move_ttl_info.max` ([Array](../sql-reference/data-types/array.md)([DateTime](../sql-reference/data-types/datetime.md))) — Массив значений. Каждый элемент массива задаёт максимальное значение ключа даты и времени для правила [TTL MOVE](../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-ttl).
- `bytes` ([UInt64](../sql-reference/data-types/int-uint.md)) алиас для `bytes_on_disk`.
- `marks_size` ([UInt64](../sql-reference/data-types/int-uint.md)) алиас для `marks_bytes`.
**Пример**
``` sql
SELECT * FROM system.parts LIMIT 1 FORMAT Vertical;
```
``` text
Row 1:
──────
partition: tuple()
name: all_1_4_1_6
part_type: Wide
active: 1
marks: 2
rows: 6
bytes_on_disk: 310
data_compressed_bytes: 157
data_uncompressed_bytes: 91
marks_bytes: 144
modification_time: 2020-06-18 13:01:49
remove_time: 0000-00-00 00:00:00
refcount: 1
min_date: 0000-00-00
max_date: 0000-00-00
min_time: 0000-00-00 00:00:00
max_time: 0000-00-00 00:00:00
partition_id: all
min_block_number: 1
max_block_number: 4
level: 1
data_version: 6
primary_key_bytes_in_memory: 8
primary_key_bytes_in_memory_allocated: 64
is_frozen: 0
database: default
table: months
engine: MergeTree
disk_name: default
path: /var/lib/clickhouse/data/default/months/all_1_4_1_6/
hash_of_all_files: 2d0657a16d9430824d35e327fcbd87bf
hash_of_uncompressed_files: 84950cc30ba867c77a408ae21332ba29
uncompressed_hash_of_compressed_files: 1ad78f1c6843bbfb99a2c931abe7df7d
delete_ttl_info_min: 0000-00-00 00:00:00
delete_ttl_info_max: 0000-00-00 00:00:00
move_ttl_info.expression: []
move_ttl_info.min: []
move_ttl_info.max: []
```
**См. также**
- [Движок MergeTree](../engines/table-engines/mergetree-family/mergetree.md)
- [TTL для столбцов и таблиц](../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-ttl)
## system.part\_log {#system_tables-part-log}
@ -1210,29 +1359,50 @@ path: /clickhouse/tables/01-08/visits/replicas
## system.mutations {#system_tables-mutations}
Таблица содержит информацию о ходе выполнения [мутаций](../sql-reference/statements/alter.md#alter-mutations) MergeTree-таблиц. Каждой команде мутации соответствует одна строка. В таблице есть следующие столбцы:
Таблица содержит информацию о ходе выполнения [мутаций](../sql-reference/statements/alter.md#alter-mutations) таблиц семейства MergeTree. Каждой команде мутации соответствует одна строка таблицы.
**database**, **table** - имя БД и таблицы, к которой была применена мутация.
Столбцы:
**mutation\_id** - ID запроса. Для реплицированных таблиц эти ID соответствуют именам записей в директории `<table_path_in_zookeeper>/mutations/` в ZooKeeper, для нереплицированных - именам файлов в директории с данными таблицы.
- `database` ([String](../sql-reference/data-types/string.md)) — имя БД, к которой была применена мутация.
**command** - Команда мутации (часть запроса после `ALTER TABLE [db.]table`).
- `table` ([String](../sql-reference/data-types/string.md)) — имя таблицы, к которой была применена мутация.
**create\_time** - Время создания мутации.
- `mutation_id` ([String](../sql-reference/data-types/string.md)) — ID запроса. Для реплицированных таблиц эти ID соответствуют именам записей в директории `<table_path_in_zookeeper>/mutations/` в ZooKeeper, для нереплицированных — именам файлов в директории с данными таблицы.
**block\_numbers.partition\_id**, **block\_numbers.number** - Nested-столбец. Для мутаций реплицированных таблиц для каждой партиции содержит номер блока, полученный этой мутацией (в каждой партиции будут изменены только куски, содержащие блоки с номерами, меньшими номера, полученного мутацией в этой партиции). Для нереплицированных таблиц нумерация блоков сквозная по партициям, поэтому столбец содержит одну запись с единственным номером блока, полученным мутацией.
- `command` ([String](../sql-reference/data-types/string.md)) — команда мутации (часть запроса после `ALTER TABLE [db.]table`).
**parts\_to\_do** - Количество кусков таблицы, которые ещё предстоит изменить.
- `create_time` ([Datetime](../sql-reference/data-types/datetime.md)) — дата и время создания мутации.
**is\_done** - Завершена ли мутация. Замечание: даже если `parts_to_do = 0`, для реплицированной таблицы возможна ситуация, когда мутация ещё не завершена из-за долго выполняющейся вставки, которая добавляет данные, которые нужно будет мутировать.
- `block_numbers.partition_id` ([Array](../sql-reference/data-types/array.md)([String](../sql-reference/data-types/string.md))) — Для мутаций реплицированных таблиц массив содержит содержит номера партиций (по одной записи для каждой партиции). Для мутаций нереплицированных таблиц массив пустой.
- `block_numbers.number` ([Array](../sql-reference/data-types/array.md)([Int64](../sql-reference/data-types/int-uint.md))) — Для мутаций реплицированных таблиц массив содержит по одной записи для каждой партиции, с номером блока, полученным этой мутацией. В каждой партиции будут изменены только куски, содержащие блоки с номерами меньше чем данный номер.
Для нереплицированных таблиц нумерация блоков сквозная по партициям. Поэтому массив содержит единственную запись с номером блока, полученным мутацией.
- `parts_to_do_names` ([Array](../sql-reference/data-types/array.md)([String](../sql-reference/data-types/string.md))) — массив с именами кусков данных, которые должны быть изменены для завершения мутации.
- `parts_to_do` ([Int64](../sql-reference/data-types/int-uint.md)) — количество кусков данных, которые должны быть изменены для завершения мутации.
- `is_done` ([UInt8](../sql-reference/data-types/int-uint.md)) — Признак, завершена ли мутация. Возможные значения:
- `1` — мутация завершена,
- `0` — мутация еще продолжается.
!!! info "Замечание"
Даже если `parts_to_do = 0`, для реплицированной таблицы возможна ситуация, когда мутация ещё не завершена из-за долго выполняющейся операции `INSERT`, которая добавляет данные, которые нужно будет мутировать.
Если во время мутации какого-либо куска возникли проблемы, заполняются следующие столбцы:
**latest\_failed\_part** - Имя последнего куска, мутация которого не удалась.
- `latest_failed_part` ([String](../sql-reference/data-types/string.md)) — имя последнего куска, мутация которого не удалась.
**latest\_fail\_time** — время последней ошибки мутации.
- `latest_fail_time` ([Datetime](../sql-reference/data-types/datetime.md)) — дата и время последней ошибки мутации.
**latest\_fail\_reason** — причина последней ошибки мутации.
- `latest_fail_reason` ([String](../sql-reference/data-types/string.md)) — причина последней ошибки мутации.
**См. также**
- [Мутации](../sql-reference/statements/alter.md#alter-mutations)
- [Движок MergeTree](../engines/table-engines/mergetree-family/mergetree.md)
- [Репликация данных](../engines/table-engines/mergetree-family/replication.md) (семейство ReplicatedMergeTree)
## system.disks {#system_tables-disks}
@ -1261,10 +1431,56 @@ Cодержит информацию о дисках, заданных в [ко
Если политика хранения содержит несколько томов, то каждому тому соответствует отдельная запись в таблице.
## system.roles {#system_tables-roles}
Содержит сведения о [ролях](../operations/access-rights.md#role-management).
Столбцы:
- `name` ([String](../sql-reference/data-types/string.md)) — Имя роли.
- `id` ([UUID](../sql-reference/data-types/uuid.md)) — ID роли.
- `storage` ([String](../sql-reference/data-types/string.md)) — Путь к хранилищу ролей. Настраивается в параметре `access_control_path`.
## system.role_grants {#system_tables-role_grants}
Содержит [гранты](../sql-reference/statements/grant.md) ролей для пользователей и ролей. Чтобы добавить записи в эту таблицу, используйте команду `GRANT role TO user`.
Столбцы:
- `user_name` ([Nullable](../sql-reference/data-types/nullable.md)([String](../sql-reference/data-types/string.md))) — Имя пользователя.
- `role_name` ([Nullable](../sql-reference/data-types/nullable.md)([String](../sql-reference/data-types/string.md))) — Имя роли.
- `granted_role_name` ([String](../sql-reference/data-types/string.md)) — Имя роли, назначенной для роли `role_name`. Чтобы назначить одну роль другой используйте `GRANT role1 TO role2`.
- `granted_role_is_default` ([UInt8](../sql-reference/data-types/int-uint.md#uint-ranges)) — Флаг, который показывает, является ли `granted_role` ролью по умолчанию. Возможные значения:
- 1 — `granted_role` является ролью по умолчанию.
- 0 — `granted_role` не является ролью по умолчанию.
- `with_admin_option` ([UInt8](../sql-reference/data-types/int-uint.md#uint-ranges)) — Флаг, который показывает, обладает ли `granted_role` роль привилегией `ADMIN OPTION`. Возможные значения:
- 1 — Роль обладает привилегией `ADMIN OPTION`.
- 0 — Роль не обладает привилегией `ADMIN OPTION`.
## system.current_roles {#system_tables-current_roles}
Содержит активные роли текущего пользователя. `SET ROLE` изменяет содержимое этой таблицы.
Столбцы:
- `role_name` ([String](../sql-reference/data-types/string.md))) — Имя роли.
- `with_admin_option` ([UInt8](../sql-reference/data-types/int-uint.md#uint-ranges)) — Флаг, который показывает, обладает ли `current_role` роль привилегией `ADMIN OPTION`.
- `is_default` ([UInt8](../sql-reference/data-types/int-uint.md#uint-ranges)) — Флаг, который показывает, является ли `current_role` ролью по умолчанию.
## system.enabled_roles {#system_tables-enabled_roles}
Содержит все активные роли на данный момент, включая текущую роль текущего пользователя и роли, назначенные для текущей роли.
Столбцы:
- `role_name` ([String](../sql-reference/data-types/string.md))) — Имя роли.
- `with_admin_option` ([UInt8](../sql-reference/data-types/int-uint.md#uint-ranges)) — Флаг, который показывает, обладает ли `enabled_role` роль привилегией `ADMIN OPTION`.
- `is_current` ([UInt8](../sql-reference/data-types/int-uint.md#uint-ranges)) — Флаг, который показывает, является ли `enabled_role` текущей ролью текущего пользователя.
- `is_default` ([UInt8](../sql-reference/data-types/int-uint.md#uint-ranges)) — Флаг, который показывает, является ли `enabled_role` ролью по умолчанию.
## system.quotas {#system_tables-quotas}
Содержит информацию о [квотах](quotas.md).
Столбцы:
- `name` ([String](../sql-reference/data-types/string.md)) — Имя квоты.
- `id` ([UUID](../sql-reference/data-types/uuid.md)) — ID квоты.
- `storage`([String](../sql-reference/data-types/string.md)) — Хранилище квот. Возможные значения: "users.xml", если квота задана в файле users.xml, "disk" — если квота задана в SQL-запросе.
@ -1286,6 +1502,7 @@ Cодержит информацию о дисках, заданных в [ко
Содержит информацию о максимумах для всех интервалов всех квот. Одной квоте могут соответствовать любое количество строк или ноль.
Столбцы:
- `quota_name` ([String](../sql-reference/data-types/string.md)) — Имя квоты.
- `duration` ([UInt32](../sql-reference/data-types/int-uint.md)) — Длина временного интервала для расчета потребления ресурсов, в секундах.
- `is_randomized_interval` ([UInt8](../sql-reference/data-types/int-uint.md#uint-ranges)) — Логическое значение. Оно показывает, является ли интервал рандомизированным. Интервал всегда начинается в одно и то же время, если он не рандомизирован. Например, интервал в 1 минуту всегда начинается с целого числа минут (то есть он может начинаться в 11:20:00, но никогда не начинается в 11:20:01), интервал в один день всегда начинается в полночь UTC. Если интервал рандомизирован, то самый первый интервал начинается в произвольное время, а последующие интервалы начинаются один за другим. Значения:
@ -1303,6 +1520,7 @@ Cодержит информацию о дисках, заданных в [ко
Использование квоты текущим пользователем: сколько используется и сколько осталось.
Столбцы:
- `quota_name` ([String](../sql-reference/data-types/string.md)) — Имя квоты.
- `quota_key`([String](../sql-reference/data-types/string.md)) — Значение ключа. Например, если keys = `ip_address`, `quota_key` может иметь значение '192.168.1.1'.
- `start_time`([Nullable](../sql-reference/data-types/nullable.md)([DateTime](../sql-reference/data-types/datetime.md))) — Время начала расчета потребления ресурсов.
@ -1327,6 +1545,7 @@ Cодержит информацию о дисках, заданных в [ко
Использование квот всеми пользователями.
Столбцы:
- `quota_name` ([String](../sql-reference/data-types/string.md)) — Имя квоты.
- `quota_key` ([String](../sql-reference/data-types/string.md)) — Ключ квоты.
- `is_current` ([UInt8](../sql-reference/data-types/int-uint.md#uint-ranges)) — Квота используется для текущего пользователя.

View File

@ -1334,4 +1334,79 @@ len: 30
- [randomPrintableASCII](../../sql-reference/functions/other-functions.md#randomascii)
## randomFixedString {#randomfixedstring}
Генерирует бинарную строку заданной длины, заполненную случайными байтами, включая нулевые.
**Синтаксис**
``` sql
randomFixedString(length);
```
**Параметры**
- `length` — Длина строки в байтах. [UInt64](../../sql-reference/data-types/int-uint.md).
**Returned value(s)**
- Строка, заполненная случайными байтами.
Тип: [FixedString](../../sql-reference/data-types/fixedstring.md).
**Пример**
Запрос:
```sql
SELECT randomFixedString(13) as rnd, toTypeName(rnd)
```
Результат:
```text
┌─rnd──────┬─toTypeName(randomFixedString(13))─┐
│ j▒h㋖HɨZ'▒ │ FixedString(13) │
└──────────┴───────────────────────────────────┘
```
## randomStringUTF8 {#randomstringutf8}
Генерирует строку заданной длины со случайными символами в кодировке UTF-8.
**Синтаксис**
``` sql
randomStringUTF8(length);
```
**Параметры**
- `length` — Длина итоговой строки в кодовых точках. [UInt64](../../sql-reference/data-types/int-uint.md).
**Возвращаемое значение**
- Случайная строка в кодировке UTF-8.
Тип: [String](../../sql-reference/data-types/string.md).
**Пример**
Запрос:
```sql
SELECT randomStringUTF8(13)
```
Результат:
```text
┌─randomStringUTF8(13)─┐
│ 𘤗𙉝д兠庇󡅴󱱎󦐪􂕌𔊹𓰛 │
└──────────────────────┘
```
[Оригинальная статья](https://clickhouse.tech/docs/ru/query_language/functions/other_functions/) <!--hide-->

View File

@ -33,4 +33,42 @@ SELECT alphaTokens('abca1abc')
└─────────────────────────┘
```
## extractAllGroups(text, regexp) {#extractallgroups}
Выделяет все группы из неперекрывающихся подстрок, которые соответствуют регулярному выражению.
**Синтаксис**
``` sql
extractAllGroups(text, regexp)
```
**Параметры**
- `text` — [String](../data-types/string.md) или [FixedString](../data-types/fixedstring.md).
- `regexp` — Регулярное выражение. Константа. [String](../data-types/string.md) или [FixedString](../data-types/fixedstring.md).
**Возвращаемые значения**
- Если найдена хотя бы одна подходящая группа, функция возвращает столбец вида `Array(Array(String))`, сгруппированный по идентификатору группы (от 1 до N, где N — количество групп с захватом содержимого в `regexp`).
- Если подходящих групп не найдено, возвращает пустой массив.
Тип: [Array](../data-types/array.md).
**Пример использования**
Запрос:
``` sql
SELECT extractAllGroups('abc=123, 8="hkl"', '("[^"]+"|\\w+)=("[^"]+"|\\w+)');
```
Результат:
``` text
┌─extractAllGroups('abc=123, 8="hkl"', '("[^"]+"|\\w+)=("[^"]+"|\\w+)')─┐
│ [['abc','123'],['8','"hkl"']] │
└───────────────────────────────────────────────────────────────────────┘
```
[Оригинальная статья](https://clickhouse.tech/docs/ru/query_language/functions/splitting_merging_functions/) <!--hide-->

View File

@ -293,7 +293,7 @@ Examples of how this hierarchy is treated:
- The `MODIFY SETTING` privilege allows modifying table engine settings. It doesnt affect settings or server configuration parameters.
- The `ATTACH` operation needs the [CREATE](#grant-create) privilege.
- The `DETACH` operation needs the [DROP](#grant-drop) privilege.
- To stop mutation by the [KILL MUTATION](../../sql-reference/statements/misc.md#kill-mutation) query, you need to have a privilege to start this mutation. For example, if you want to stop the `ALTER UPDATE` query, you need the `ALTER UPDATE`, `ALTER TABLE`, or `ALTER` privilege.
- To stop mutation by the [KILL MUTATION](../../sql-reference/statements/misc.md#kill-mutation-statement) query, you need to have a privilege to start this mutation. For example, if you want to stop the `ALTER UPDATE` query, you need the `ALTER UPDATE`, `ALTER TABLE`, or `ALTER` privilege.
### CREATE {#grant-create}
@ -312,7 +312,7 @@ Allows executing [CREATE](../../sql-reference/statements/create.md) and [ATTACH]
### DROP {#grant-drop}
Allows executing [DROP](../../sql-reference/statements/misc.md#drop) and [DETACH](../../sql-reference/statements/misc.md#detach) queries according to the following hierarchy of privileges:
Allows executing [DROP](../../sql-reference/statements/misc.md#drop) and [DETACH](../../sql-reference/statements/misc.md#detach-statement) queries according to the following hierarchy of privileges:
- `DROP`. Level:
- `DROP DATABASE`. Level: `DATABASE`

View File

@ -7,7 +7,7 @@ Join создаёт новую таблицу путем объединения
``` sql
SELECT <expr_list>
FROM <left_table>
[GLOBAL] [ANY|ALL|ASOF] [INNER|LEFT|RIGHT|FULL|CROSS] [OUTER|SEMI|ANTI] JOIN <right_table>
[GLOBAL] [INNER|LEFT|RIGHT|FULL|CROSS] [OUTER|SEMI|ANTI|ANY|ASOF] JOIN <right_table>
(ON <expr_list>)|(USING <column_list>) ...
```
@ -15,7 +15,7 @@ FROM <left_table>
## Поддерживаемые типы соединения {#select-join-types}
Весе типы из стандартого [SQL JOIN](https://en.wikipedia.org/wiki/Join_(SQL)) поддерживаются:
Все типы из стандартого [SQL JOIN](https://en.wikipedia.org/wiki/Join_(SQL)) поддерживаются:
- `INNER JOIN`, возвращаются только совпадающие строки.
- `LEFT OUTER JOIN`, не совпадающие строки из левой таблицы возвращаются в дополнение к совпадающим строкам.
@ -29,18 +29,14 @@ FROM <left_table>
- `LEFT SEMI JOIN` и `RIGHT SEMI JOIN`, белый список по ключам соединения, не производит декартово произведение.
- `LEFT ANTI JOIN` и `RIGHT ANTI JOIN`, черный список по ключам соединения, не производит декартово произведение.
- `LEFT ANY JOIN`, `RIGHT ANY JOIN` и `INNER ANY JOIN`, Частично (для противоположных сторон `LEFT` и `RIGHT`) или полностью (для `INNER` и `FULL`) отключает декартово произведение для стандартых видов `JOIN`.
- `ASOF JOIN` и `LEFT ASOF JOIN`, Для соединения последовательностей по нечеткому совпадению. Использование `ASOF JOIN` описано ниже.
## Строгость {#select-join-strictness}
Изменяет способ сопоставления по ключам соединения:
- `ALL` — стандартное поведение `JOIN` в SQL, как описано выше. По умолчанию.
- `ANY` — Частично (для противоположных сторон `LEFT` и `RIGHT`) или полностью (для `INNER` и `FULL`) отключает декартово произведение для стандартых видов `JOIN`.
- `ASOF` — Для соединения последовательностей по нечеткому совпадению. Использование `ASOF JOIN` описано ниже.
## Настройки {#join-settings}
!!! note "Примечание"
Значение строгости по умолчанию может быть переопределено с помощью настройки [join\_default\_strictness](../../../operations/settings/settings.md#settings-join_default_strictness).
### Использование ASOF JOIN {#asof-join-usage}
`ASOF JOIN` применим в том случае, когда необходимо объединять записи, которые не имеют точного совпадения.

View File

@ -90,7 +90,7 @@ SELECT name, status FROM system.dictionaries;
## Управление распределёнными таблицами {#query-language-system-distributed}
ClickHouse может оперировать [распределёнными](../../sql-reference/statements/system.md) таблицами. Когда пользователь вставляет данные в эти таблицы, ClickHouse сначала формирует очередь из данных, которые должны быть отправлены на узлы кластера, а затем асинхронно отправляет подготовленные данные. Вы можете управлять очередью с помощью запросов [STOP DISTRIBUTED SENDS](#query_language-system-stop-distributed-sends), [START DISTRIBUTED SENDS](#query_language-system-start-distributed-sends) и [FLUSH DISTRIBUTED](#query_language-system-flush-distributed). Также есть возможность синхронно вставлять распределенные данные с помощью настройки `insert_distributed_sync`.
ClickHouse может оперировать [распределёнными](../../sql-reference/statements/system.md) таблицами. Когда пользователь вставляет данные в эти таблицы, ClickHouse сначала формирует очередь из данных, которые должны быть отправлены на узлы кластера, а затем асинхронно отправляет подготовленные данные. Вы можете управлять очередью с помощью запросов [STOP DISTRIBUTED SENDS](#query_language-system-stop-distributed-sends), [START DISTRIBUTED SENDS](#query_language-system-start-distributed-sends) и [FLUSH DISTRIBUTED](#query_language-system-flush-distributed). Также есть возможность синхронно вставлять распределенные данные с помощью настройки [insert_distributed_sync](../../operations/settings/settings.md#insert_distributed_sync).
### STOP DISTRIBUTED SENDS {#query_language-system-stop-distributed-sends}

View File

@ -4,8 +4,11 @@
# This script deploys ClickHouse website to your personal test subdomain.
#
# Before first use of this script:
# 1) Create https://github.com/GIT_USER/clickhouse.github.io repo (replace GIT_USER with your GitHub login)
# 2) Send email on address from https://clickhouse.tech/#contacts asking to create GIT_USER-test.clickhouse.tech domain
# 1) Set up building documentation according to https://github.com/ClickHouse/ClickHouse/tree/master/docs/tools#use-buildpy-use-build-py
# 2) Create https://github.com/GIT_USER/clickhouse.github.io repo (replace GIT_USER with your GitHub login)
# 3) Enable GitHub Pages in settings of this repo
# 4) Add file named CNAME in root of this repo with "GIT_USER-test.clickhouse.tech" content (without quotes)
# 5) Send email on address from https://clickhouse.tech/#contacts asking to create GIT_USER-test.clickhouse.tech domain
#
set -ex

View File

@ -87,7 +87,10 @@ def build_blog_nav(lang, args):
posts = []
post_meta_items = []
for post in os.listdir(year_dir):
meta, _ = util.read_md_file(os.path.join(year_dir, post))
post_path = os.path.join(year_dir, post)
if not post.endswith('.md'):
raise RuntimeError(f'Unexpected non-md file in posts folder: {post_path}')
meta, _ = util.read_md_file(post_path)
post_date = meta['date']
post_title = meta['title']
if datetime.date.fromisoformat(post_date) > datetime.date.today():

View File

@ -44,7 +44,7 @@ then
if [[ ! -z "${CLOUDFLARE_TOKEN}" ]]
then
sleep 1m
git diff --stat="9999,9999" --diff-filter=M HEAD~1 | grep '|' | awk '$1 ~ /\.html$/ { if ($3>8) { url="https://content.clickhouse.tech/"$1; sub(/\/index.html/, "/", url); print "\""url"\""; }}' | split -l 25 /dev/stdin PURGE
git diff --stat="9999,9999" --diff-filter=M HEAD~1 | grep '|' | awk '$1 ~ /\.html$/ { if ($3>6) { url="https://content.clickhouse.tech/"$1; sub(/index.html/, "", url); print "\""url"\""; }}' | split -l 25 /dev/stdin PURGE
for FILENAME in $(ls PURGE*)
do
POST_DATA=$(cat "${FILENAME}" | sed -n -e 'H;${x;s/\n/,/g;s/^,//;p;}' | awk '{print "{\"files\":["$0"]}";}')

View File

@ -22,7 +22,7 @@ mkdocs-macros-plugin==0.4.9
nltk==3.5
nose==1.3.7
protobuf==3.12.2
numpy==1.18.5
numpy==1.19.1
Pygments==2.5.2
pymdown-extensions==7.1
python-slugify==4.0.1
@ -35,4 +35,4 @@ soupsieve==2.0.1
termcolor==1.1.0
tornado==5.1.1
Unidecode==1.1.1
urllib3==1.25.9
urllib3==1.25.10

View File

@ -9,4 +9,4 @@ python-slugify==4.0.1
PyYAML==5.3.1
requests==2.24.0
text-unidecode==1.3
urllib3==1.25.9
urllib3==1.25.10

View File

@ -124,15 +124,7 @@ def adjust_markdown_html(content):
def minify_html(content):
return htmlmin.minify(content,
remove_comments=False,
remove_empty_space=True,
remove_all_empty_space=False,
reduce_empty_attributes=True,
reduce_boolean_attributes=False,
remove_optional_attribute_quotes=True,
convert_charrefs=False,
keep_pre=True)
return htmlmin.minify(content)
def build_website(args):

View File

@ -23,7 +23,7 @@ Ayrıntılııklamasına bakın [CREATE TABLE](../../../sql-reference/statem
**Motor Parametreleri**
- `join_strictness` [Katılık katılın](../../../sql-reference/statements/select/join.md#select-join-strictness).
- `join_strictness` [Katılık katılın](../../../sql-reference/statements/select/join.md#select-join-types).
- `join_type` [Birleştirme türü](../../../sql-reference/statements/select/join.md#select-join-types).
- `k1[, k2, ...]` Key columns from the `USING` fık thera: `JOIN` işlemi yapılmamaktadır.

Some files were not shown because too many files have changed in this diff Show More