Merge branch 'master' into improve_create_or_replace

This commit is contained in:
mergify[bot] 2021-08-03 11:39:07 +00:00 committed by GitHub
commit dc57254982
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
829 changed files with 10080 additions and 4950 deletions

View File

@ -19,9 +19,9 @@ Detailed description / Documentation draft:
... ...
By adding documentation, you'll allow users to try your new feature immediately, not when someone else will have time to document it later. Documentation is necessary for all features that affect user experience in any way. You can add brief documentation draft above, or add documentation right into your patch as Markdown files in [docs](https://github.com/ClickHouse/ClickHouse/tree/master/docs) folder. > By adding documentation, you'll allow users to try your new feature immediately, not when someone else will have time to document it later. Documentation is necessary for all features that affect user experience in any way. You can add brief documentation draft above, or add documentation right into your patch as Markdown files in [docs](https://github.com/ClickHouse/ClickHouse/tree/master/docs) folder.
If you are doing this for the first time, it's recommended to read the lightweight [Contributing to ClickHouse Documentation](https://github.com/ClickHouse/ClickHouse/tree/master/docs/README.md) guide first. > If you are doing this for the first time, it's recommended to read the lightweight [Contributing to ClickHouse Documentation](https://github.com/ClickHouse/ClickHouse/tree/master/docs/README.md) guide first.
Information about CI checks: https://clickhouse.tech/docs/en/development/continuous-integration/ > Information about CI checks: https://clickhouse.tech/docs/en/development/continuous-integration/

9
.gitmodules vendored
View File

@ -225,6 +225,15 @@
[submodule "contrib/yaml-cpp"] [submodule "contrib/yaml-cpp"]
path = contrib/yaml-cpp path = contrib/yaml-cpp
url = https://github.com/ClickHouse-Extras/yaml-cpp.git url = https://github.com/ClickHouse-Extras/yaml-cpp.git
[submodule "contrib/libstemmer_c"]
path = contrib/libstemmer_c
url = https://github.com/ClickHouse-Extras/libstemmer_c.git
[submodule "contrib/wordnet-blast"]
path = contrib/wordnet-blast
url = https://github.com/ClickHouse-Extras/wordnet-blast.git
[submodule "contrib/lemmagen-c"]
path = contrib/lemmagen-c
url = https://github.com/ClickHouse-Extras/lemmagen-c.git
[submodule "contrib/libpqxx"] [submodule "contrib/libpqxx"]
path = contrib/libpqxx path = contrib/libpqxx
url = https://github.com/ClickHouse-Extras/libpqxx.git url = https://github.com/ClickHouse-Extras/libpqxx.git

View File

@ -542,6 +542,7 @@ include (cmake/find/libpqxx.cmake)
include (cmake/find/nuraft.cmake) include (cmake/find/nuraft.cmake)
include (cmake/find/yaml-cpp.cmake) include (cmake/find/yaml-cpp.cmake)
include (cmake/find/s2geometry.cmake) include (cmake/find/s2geometry.cmake)
include (cmake/find/nlp.cmake)
if(NOT USE_INTERNAL_PARQUET_LIBRARY) if(NOT USE_INTERNAL_PARQUET_LIBRARY)
set (ENABLE_ORC OFF CACHE INTERNAL "") set (ENABLE_ORC OFF CACHE INTERNAL "")

View File

@ -259,10 +259,25 @@ private:
Poco::Logger * log; Poco::Logger * log;
BaseDaemon & daemon; BaseDaemon & daemon;
void onTerminate(const std::string & message, UInt32 thread_num) const void onTerminate(std::string_view message, UInt32 thread_num) const
{ {
size_t pos = message.find('\n');
LOG_FATAL(log, "(version {}{}, {}) (from thread {}) {}", LOG_FATAL(log, "(version {}{}, {}) (from thread {}) {}",
VERSION_STRING, VERSION_OFFICIAL, daemon.build_id_info, thread_num, message); VERSION_STRING, VERSION_OFFICIAL, daemon.build_id_info, thread_num, message.substr(0, pos));
/// Print trace from std::terminate exception line-by-line to make it easy for grep.
while (pos != std::string_view::npos)
{
++pos;
size_t next_pos = message.find('\n', pos);
size_t size = next_pos;
if (next_pos != std::string_view::npos)
size = next_pos - pos;
LOG_FATAL(log, "{}", message.substr(pos, size));
pos = next_pos;
}
} }
void onFault( void onFault(

View File

@ -4,13 +4,24 @@ QUERIES_FILE="queries.sql"
TABLE=$1 TABLE=$1
TRIES=3 TRIES=3
if [ -x ./clickhouse ]
then
CLICKHOUSE_CLIENT="./clickhouse client"
elif command -v clickhouse-client >/dev/null 2>&1
then
CLICKHOUSE_CLIENT="clickhouse-client"
else
echo "clickhouse-client is not found"
exit 1
fi
cat "$QUERIES_FILE" | sed "s/{table}/${TABLE}/g" | while read query; do cat "$QUERIES_FILE" | sed "s/{table}/${TABLE}/g" | while read query; do
sync sync
echo 3 | sudo tee /proc/sys/vm/drop_caches >/dev/null echo 3 | sudo tee /proc/sys/vm/drop_caches >/dev/null
echo -n "[" echo -n "["
for i in $(seq 1 $TRIES); do for i in $(seq 1 $TRIES); do
RES=$(clickhouse-client --time --format=Null --query="$query" 2>&1) RES=$(${CLICKHOUSE_CLIENT} --time --format=Null --max_memory_usage=100G --query="$query" 2>&1)
[[ "$?" == "0" ]] && echo -n "${RES}" || echo -n "null" [[ "$?" == "0" ]] && echo -n "${RES}" || echo -n "null"
[[ "$i" != $TRIES ]] && echo -n ", " [[ "$i" != $TRIES ]] && echo -n ", "
done done

View File

@ -11,8 +11,8 @@ DATASET="${TABLE}_v1.tar.xz"
QUERIES_FILE="queries.sql" QUERIES_FILE="queries.sql"
TRIES=3 TRIES=3
AMD64_BIN_URL="https://clickhouse-builds.s3.yandex.net/0/e29c4c3cc47ab2a6c4516486c1b77d57e7d42643/clickhouse_build_check/gcc-10_relwithdebuginfo_none_bundled_unsplitted_disable_False_binary/clickhouse" AMD64_BIN_URL="https://builds.clickhouse.tech/master/amd64/clickhouse"
AARCH64_BIN_URL="https://clickhouse-builds.s3.yandex.net/0/e29c4c3cc47ab2a6c4516486c1b77d57e7d42643/clickhouse_special_build_check/clang-10-aarch64_relwithdebuginfo_none_bundled_unsplitted_disable_False_binary/clickhouse" AARCH64_BIN_URL="https://builds.clickhouse.tech/master/aarch64/clickhouse"
# Note: on older Ubuntu versions, 'axel' does not support IPv6. If you are using IPv6-only servers on very old Ubuntu, just don't install 'axel'. # Note: on older Ubuntu versions, 'axel' does not support IPv6. If you are using IPv6-only servers on very old Ubuntu, just don't install 'axel'.
@ -89,7 +89,7 @@ cat "$QUERIES_FILE" | sed "s/{table}/${TABLE}/g" | while read query; do
echo -n "[" echo -n "["
for i in $(seq 1 $TRIES); do for i in $(seq 1 $TRIES); do
RES=$(./clickhouse client --max_memory_usage 100000000000 --time --format=Null --query="$query" 2>&1 ||:) RES=$(./clickhouse client --max_memory_usage 100G --time --format=Null --query="$query" 2>&1 ||:)
[[ "$?" == "0" ]] && echo -n "${RES}" || echo -n "null" [[ "$?" == "0" ]] && echo -n "${RES}" || echo -n "null"
[[ "$i" != $TRIES ]] && echo -n ", " [[ "$i" != $TRIES ]] && echo -n ", "
done done

32
cmake/find/nlp.cmake Normal file
View File

@ -0,0 +1,32 @@
option(ENABLE_NLP "Enable NLP functions support" ${ENABLE_LIBRARIES})
if (NOT ENABLE_NLP)
message (STATUS "NLP functions disabled")
return()
endif()
if (NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/libstemmer_c/Makefile")
message (WARNING "submodule contrib/libstemmer_c is missing. to fix try run: \n git submodule update --init --recursive")
message (${RECONFIGURE_MESSAGE_LEVEL} "Can't find internal libstemmer_c library, NLP functions will be disabled")
set (USE_NLP 0)
return()
endif ()
if (NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/wordnet-blast/CMakeLists.txt")
message (WARNING "submodule contrib/wordnet-blast is missing. to fix try run: \n git submodule update --init --recursive")
message (${RECONFIGURE_MESSAGE_LEVEL} "Can't find internal wordnet-blast library, NLP functions will be disabled")
set (USE_NLP 0)
return()
endif ()
if (NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/lemmagen-c/README.md")
message (WARNING "submodule contrib/lemmagen-c is missing. to fix try run: \n git submodule update --init --recursive")
message (${RECONFIGURE_MESSAGE_LEVEL} "Can't find internal lemmagen-c library, NLP functions will be disabled")
set (USE_NLP 0)
return()
endif ()
set (USE_NLP 1)
message (STATUS "Using Libraries for NLP functions: contrib/wordnet-blast, contrib/libstemmer_c, contrib/lemmagen-c")

View File

@ -328,6 +328,12 @@ endif()
add_subdirectory(fast_float) add_subdirectory(fast_float)
if (USE_NLP)
add_subdirectory(libstemmer-c-cmake)
add_subdirectory(wordnet-blast-cmake)
add_subdirectory(lemmagen-c-cmake)
endif()
if (USE_SQLITE) if (USE_SQLITE)
add_subdirectory(sqlite-cmake) add_subdirectory(sqlite-cmake)
endif() endif()

2
contrib/NuRaft vendored

@ -1 +1 @@
Subproject commit 976874b7aa7f422bf4ea595bb7d1166c617b1c26 Subproject commit 0ce9490093021c63564cca159571a8b27772ad48

2
contrib/boost vendored

@ -1 +1 @@
Subproject commit 1ccbb5a522a571ce83b606dbc2e1011c42ecccfb Subproject commit 9cf09dbfd55a5c6202dedbdf40781a51b02c2675

View File

@ -13,11 +13,12 @@ if (NOT USE_INTERNAL_BOOST_LIBRARY)
regex regex
context context
coroutine coroutine
graph
) )
if(Boost_INCLUDE_DIR AND Boost_FILESYSTEM_LIBRARY AND Boost_FILESYSTEM_LIBRARY AND if(Boost_INCLUDE_DIR AND Boost_FILESYSTEM_LIBRARY AND Boost_FILESYSTEM_LIBRARY AND
Boost_PROGRAM_OPTIONS_LIBRARY AND Boost_REGEX_LIBRARY AND Boost_SYSTEM_LIBRARY AND Boost_CONTEXT_LIBRARY AND Boost_PROGRAM_OPTIONS_LIBRARY AND Boost_REGEX_LIBRARY AND Boost_SYSTEM_LIBRARY AND Boost_CONTEXT_LIBRARY AND
Boost_COROUTINE_LIBRARY) Boost_COROUTINE_LIBRARY AND Boost_GRAPH_LIBRARY)
set(EXTERNAL_BOOST_FOUND 1) set(EXTERNAL_BOOST_FOUND 1)
@ -32,6 +33,7 @@ if (NOT USE_INTERNAL_BOOST_LIBRARY)
add_library (_boost_system INTERFACE) add_library (_boost_system INTERFACE)
add_library (_boost_context INTERFACE) add_library (_boost_context INTERFACE)
add_library (_boost_coroutine INTERFACE) add_library (_boost_coroutine INTERFACE)
add_library (_boost_graph INTERFACE)
target_link_libraries (_boost_filesystem INTERFACE ${Boost_FILESYSTEM_LIBRARY}) target_link_libraries (_boost_filesystem INTERFACE ${Boost_FILESYSTEM_LIBRARY})
target_link_libraries (_boost_iostreams INTERFACE ${Boost_IOSTREAMS_LIBRARY}) target_link_libraries (_boost_iostreams INTERFACE ${Boost_IOSTREAMS_LIBRARY})
@ -40,6 +42,7 @@ if (NOT USE_INTERNAL_BOOST_LIBRARY)
target_link_libraries (_boost_system INTERFACE ${Boost_SYSTEM_LIBRARY}) target_link_libraries (_boost_system INTERFACE ${Boost_SYSTEM_LIBRARY})
target_link_libraries (_boost_context INTERFACE ${Boost_CONTEXT_LIBRARY}) target_link_libraries (_boost_context INTERFACE ${Boost_CONTEXT_LIBRARY})
target_link_libraries (_boost_coroutine INTERFACE ${Boost_COROUTINE_LIBRARY}) target_link_libraries (_boost_coroutine INTERFACE ${Boost_COROUTINE_LIBRARY})
target_link_libraries (_boost_graph INTERFACE ${Boost_GRAPH_LIBRARY})
add_library (boost::filesystem ALIAS _boost_filesystem) add_library (boost::filesystem ALIAS _boost_filesystem)
add_library (boost::iostreams ALIAS _boost_iostreams) add_library (boost::iostreams ALIAS _boost_iostreams)
@ -48,6 +51,7 @@ if (NOT USE_INTERNAL_BOOST_LIBRARY)
add_library (boost::system ALIAS _boost_system) add_library (boost::system ALIAS _boost_system)
add_library (boost::context ALIAS _boost_context) add_library (boost::context ALIAS _boost_context)
add_library (boost::coroutine ALIAS _boost_coroutine) add_library (boost::coroutine ALIAS _boost_coroutine)
add_library (boost::graph ALIAS _boost_graph)
else() else()
set(EXTERNAL_BOOST_FOUND 0) set(EXTERNAL_BOOST_FOUND 0)
message (${RECONFIGURE_MESSAGE_LEVEL} "Can't find system boost") message (${RECONFIGURE_MESSAGE_LEVEL} "Can't find system boost")
@ -221,4 +225,17 @@ if (NOT EXTERNAL_BOOST_FOUND)
add_library (boost::coroutine ALIAS _boost_coroutine) add_library (boost::coroutine ALIAS _boost_coroutine)
target_include_directories (_boost_coroutine PRIVATE ${LIBRARY_DIR}) target_include_directories (_boost_coroutine PRIVATE ${LIBRARY_DIR})
target_link_libraries(_boost_coroutine PRIVATE _boost_context) target_link_libraries(_boost_coroutine PRIVATE _boost_context)
# graph
set (SRCS_GRAPH
"${LIBRARY_DIR}/libs/graph/src/graphml.cpp"
"${LIBRARY_DIR}/libs/graph/src/read_graphviz_new.cpp"
)
add_library (_boost_graph ${SRCS_GRAPH})
add_library (boost::graph ALIAS _boost_graph)
target_include_directories (_boost_graph PRIVATE ${LIBRARY_DIR})
target_link_libraries(_boost_graph PRIVATE _boost_regex)
endif () endif ()

1
contrib/lemmagen-c vendored Submodule

@ -0,0 +1 @@
Subproject commit 59537bdcf57bbed17913292cb4502d15657231f1

View File

@ -0,0 +1,9 @@
set(LIBRARY_DIR "${ClickHouse_SOURCE_DIR}/contrib/lemmagen-c")
set(LEMMAGEN_INCLUDE_DIR "${LIBRARY_DIR}/include")
set(SRCS
"${LIBRARY_DIR}/src/RdrLemmatizer.cpp"
)
add_library(lemmagen STATIC ${SRCS})
target_include_directories(lemmagen PUBLIC "${LEMMAGEN_INCLUDE_DIR}")

View File

@ -0,0 +1,31 @@
set(LIBRARY_DIR "${ClickHouse_SOURCE_DIR}/contrib/libstemmer_c")
set(STEMMER_INCLUDE_DIR "${LIBRARY_DIR}/include")
FILE ( READ "${LIBRARY_DIR}/mkinc.mak" _CONTENT )
# replace '\ ' into one big line
STRING ( REGEX REPLACE "\\\\\n " " ${LIBRARY_DIR}/" _CONTENT "${_CONTENT}" )
# escape ';' (if any)
STRING ( REGEX REPLACE ";" "\\\\;" _CONTENT "${_CONTENT}" )
# now replace lf into ';' (it makes list from the line)
STRING ( REGEX REPLACE "\n" ";" _CONTENT "${_CONTENT}" )
FOREACH ( LINE ${_CONTENT} )
# skip comments (beginning with #)
IF ( NOT "${LINE}" MATCHES "^#.*" )
# parse 'name=value1 value2..." - extract the 'name' part
STRING ( REGEX REPLACE "=.*$" "" _NAME "${LINE}" )
# extract the list of values part
STRING ( REGEX REPLACE "^.*=" "" _LIST "${LINE}" )
# replace (multi)spaces into ';' (it makes list from the line)
STRING ( REGEX REPLACE " +" ";" _LIST "${_LIST}" )
# finally get our two variables
IF ( "${_NAME}" MATCHES "snowball_sources" )
SET ( _SOURCES "${_LIST}" )
ELSEIF ( "${_NAME}" MATCHES "snowball_headers" )
SET ( _HEADERS "${_LIST}" )
ENDIF ()
endif ()
endforeach ()
# all the sources parsed. Now just add the lib
add_library ( stemmer STATIC ${_SOURCES} ${_HEADERS} )
target_include_directories (stemmer PUBLIC "${STEMMER_INCLUDE_DIR}")

1
contrib/libstemmer_c vendored Submodule

@ -0,0 +1 @@
Subproject commit c753054304d87daf460057c1a649c482aa094835

View File

@ -22,6 +22,7 @@ set(SRCS
"${LIBRARY_DIR}/src/launcher.cxx" "${LIBRARY_DIR}/src/launcher.cxx"
"${LIBRARY_DIR}/src/srv_config.cxx" "${LIBRARY_DIR}/src/srv_config.cxx"
"${LIBRARY_DIR}/src/snapshot_sync_req.cxx" "${LIBRARY_DIR}/src/snapshot_sync_req.cxx"
"${LIBRARY_DIR}/src/snapshot_sync_ctx.cxx"
"${LIBRARY_DIR}/src/handle_timeout.cxx" "${LIBRARY_DIR}/src/handle_timeout.cxx"
"${LIBRARY_DIR}/src/handle_append_entries.cxx" "${LIBRARY_DIR}/src/handle_append_entries.cxx"
"${LIBRARY_DIR}/src/cluster_config.cxx" "${LIBRARY_DIR}/src/cluster_config.cxx"

2
contrib/protobuf vendored

@ -1 +1 @@
Subproject commit 73b12814204ad9068ba352914d0dc244648b48ee Subproject commit 75601841d172c73ae6bf4ce8121f42b875cdbabd

1
contrib/wordnet-blast vendored Submodule

@ -0,0 +1 @@
Subproject commit 1d16ac28036e19fe8da7ba72c16a307fbdf8c87e

View File

@ -0,0 +1,13 @@
set(LIBRARY_DIR "${ClickHouse_SOURCE_DIR}/contrib/wordnet-blast")
set(SRCS
"${LIBRARY_DIR}/wnb/core/info_helper.cc"
"${LIBRARY_DIR}/wnb/core/load_wordnet.cc"
"${LIBRARY_DIR}/wnb/core/wordnet.cc"
)
add_library(wnb ${SRCS})
target_link_libraries(wnb PRIVATE boost::headers_only boost::graph)
target_include_directories(wnb PUBLIC "${LIBRARY_DIR}")

View File

@ -23,6 +23,7 @@ RUN apt-get update \
libboost-regex-dev \ libboost-regex-dev \
libboost-context-dev \ libboost-context-dev \
libboost-coroutine-dev \ libboost-coroutine-dev \
libboost-graph-dev \
zlib1g-dev \ zlib1g-dev \
liblz4-dev \ liblz4-dev \
libdouble-conversion-dev \ libdouble-conversion-dev \

View File

@ -61,4 +61,7 @@ ENV TSAN_OPTIONS='halt_on_error=1 history_size=7'
ENV UBSAN_OPTIONS='print_stacktrace=1' ENV UBSAN_OPTIONS='print_stacktrace=1'
ENV MSAN_OPTIONS='abort_on_error=1 poison_in_dtor=1' ENV MSAN_OPTIONS='abort_on_error=1 poison_in_dtor=1'
ENV TZ=Europe/Moscow
RUN ln -snf "/usr/share/zoneinfo/$TZ" /etc/localtime && echo "$TZ" > /etc/timezone
CMD sleep 1 CMD sleep 1

View File

@ -299,6 +299,7 @@ function run_tests
01318_decrypt # Depends on OpenSSL 01318_decrypt # Depends on OpenSSL
01663_aes_msan # Depends on OpenSSL 01663_aes_msan # Depends on OpenSSL
01667_aes_args_check # Depends on OpenSSL 01667_aes_args_check # Depends on OpenSSL
01683_codec_encrypted # Depends on OpenSSL
01776_decrypt_aead_size_check # Depends on OpenSSL 01776_decrypt_aead_size_check # Depends on OpenSSL
01811_filter_by_null # Depends on OpenSSL 01811_filter_by_null # Depends on OpenSSL
01281_unsucceeded_insert_select_queries_counter 01281_unsucceeded_insert_select_queries_counter
@ -310,6 +311,7 @@ function run_tests
01411_bayesian_ab_testing 01411_bayesian_ab_testing
01798_uniq_theta_sketch 01798_uniq_theta_sketch
01799_long_uniq_theta_sketch 01799_long_uniq_theta_sketch
01890_stem # depends on libstemmer_c
collate collate
collation collation
_orc_ _orc_

View File

@ -14,10 +14,14 @@ services:
} }
EOF EOF
./docker-entrypoint.sh' ./docker-entrypoint.sh'
ports: expose:
- 9020:9019 - 9019
healthcheck: healthcheck:
test: ["CMD", "curl", "-s", "localhost:9019/ping"] test: ["CMD", "curl", "-s", "localhost:9019/ping"]
interval: 5s interval: 5s
timeout: 3s timeout: 3s
retries: 30 retries: 30
volumes:
- type: ${JDBC_BRIDGE_FS:-tmpfs}
source: ${JDBC_BRIDGE_LOGS:-}
target: /app/logs

View File

@ -0,0 +1,13 @@
version: '2.3'
services:
mongo1:
image: mongo:3.6
restart: always
environment:
MONGO_INITDB_ROOT_USERNAME: root
MONGO_INITDB_ROOT_PASSWORD: clickhouse
volumes:
- ${MONGO_CONFIG_PATH}:/mongo/
ports:
- ${MONGO_EXTERNAL_PORT}:${MONGO_INTERNAL_PORT}
command: --config /mongo/mongo_secure.conf --profile=2 --verbose

View File

@ -2,7 +2,7 @@ version: '2.3'
services: services:
rabbitmq1: rabbitmq1:
image: rabbitmq:3-management-alpine image: rabbitmq:3.8-management-alpine
hostname: rabbitmq1 hostname: rabbitmq1
expose: expose:
- ${RABBITMQ_PORT} - ${RABBITMQ_PORT}

View File

@ -2,6 +2,11 @@
set -e -x set -e -x
# Choose random timezone for this test run
TZ="$(grep -v '#' /usr/share/zoneinfo/zone.tab | awk '{print $3}' | shuf | head -n1)"
echo "Choosen random timezone $TZ"
ln -snf "/usr/share/zoneinfo/$TZ" /etc/localtime && echo "$TZ" > /etc/timezone
dpkg -i package_folder/clickhouse-common-static_*.deb; dpkg -i package_folder/clickhouse-common-static_*.deb;
dpkg -i package_folder/clickhouse-common-static-dbg_*.deb dpkg -i package_folder/clickhouse-common-static-dbg_*.deb
dpkg -i package_folder/clickhouse-server_*.deb dpkg -i package_folder/clickhouse-server_*.deb

View File

@ -3,6 +3,11 @@
# fail on errors, verbose and export all env variables # fail on errors, verbose and export all env variables
set -e -x -a set -e -x -a
# Choose random timezone for this test run.
TZ="$(grep -v '#' /usr/share/zoneinfo/zone.tab | awk '{print $3}' | shuf | head -n1)"
echo "Choosen random timezone $TZ"
ln -snf "/usr/share/zoneinfo/$TZ" /etc/localtime && echo "$TZ" > /etc/timezone
dpkg -i package_folder/clickhouse-common-static_*.deb dpkg -i package_folder/clickhouse-common-static_*.deb
dpkg -i package_folder/clickhouse-common-static-dbg_*.deb dpkg -i package_folder/clickhouse-common-static-dbg_*.deb
dpkg -i package_folder/clickhouse-server_*.deb dpkg -i package_folder/clickhouse-server_*.deb
@ -138,15 +143,18 @@ if [[ -n "$WITH_COVERAGE" ]] && [[ "$WITH_COVERAGE" -eq 1 ]]; then
fi fi
tar -chf /test_output/text_log_dump.tar /var/lib/clickhouse/data/system/text_log ||: tar -chf /test_output/text_log_dump.tar /var/lib/clickhouse/data/system/text_log ||:
tar -chf /test_output/query_log_dump.tar /var/lib/clickhouse/data/system/query_log ||: tar -chf /test_output/query_log_dump.tar /var/lib/clickhouse/data/system/query_log ||:
tar -chf /test_output/zookeeper_log_dump.tar /var/lib/clickhouse/data/system/zookeeper_log ||:
tar -chf /test_output/coordination.tar /var/lib/clickhouse/coordination ||: tar -chf /test_output/coordination.tar /var/lib/clickhouse/coordination ||:
if [[ -n "$USE_DATABASE_REPLICATED" ]] && [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]; then if [[ -n "$USE_DATABASE_REPLICATED" ]] && [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]; then
grep -Fa "Fatal" /var/log/clickhouse-server/clickhouse-server1.log ||: grep -Fa "Fatal" /var/log/clickhouse-server/clickhouse-server1.log ||:
grep -Fa "Fatal" /var/log/clickhouse-server/clickhouse-server2.log ||: grep -Fa "Fatal" /var/log/clickhouse-server/clickhouse-server2.log ||:
pigz < /var/log/clickhouse-server/clickhouse-server1.log > /test_output/clickhouse-server1.log.gz ||: pigz < /var/log/clickhouse-server/clickhouse-server1.log > /test_output/clickhouse-server1.log.gz ||:
pigz < /var/log/clickhouse-server/clickhouse-server2.log > /test_output/clickhouse-server2.log.gz ||: pigz < /var/log/clickhouse-server/clickhouse-server2.log > /test_output/clickhouse-server2.log.gz ||:
mv /var/log/clickhouse-server/stderr1.log /test_output/ ||: mv /var/log/clickhouse-server/stderr1.log /test_output/ ||:
mv /var/log/clickhouse-server/stderr2.log /test_output/ ||: mv /var/log/clickhouse-server/stderr2.log /test_output/ ||:
tar -chf /test_output/zookeeper_log_dump1.tar /var/lib/clickhouse1/data/system/zookeeper_log ||:
tar -chf /test_output/zookeeper_log_dump2.tar /var/lib/clickhouse2/data/system/zookeeper_log ||:
tar -chf /test_output/coordination1.tar /var/lib/clickhouse1/coordination ||: tar -chf /test_output/coordination1.tar /var/lib/clickhouse1/coordination ||:
tar -chf /test_output/coordination2.tar /var/lib/clickhouse2/coordination ||: tar -chf /test_output/coordination2.tar /var/lib/clickhouse2/coordination ||:
fi fi

View File

@ -77,9 +77,6 @@ RUN mkdir -p /tmp/clickhouse-odbc-tmp \
&& odbcinst -i -s -l -f /tmp/clickhouse-odbc-tmp/share/doc/clickhouse-odbc/config/odbc.ini.sample \ && odbcinst -i -s -l -f /tmp/clickhouse-odbc-tmp/share/doc/clickhouse-odbc/config/odbc.ini.sample \
&& rm -rf /tmp/clickhouse-odbc-tmp && rm -rf /tmp/clickhouse-odbc-tmp
ENV TZ=Europe/Moscow
RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone
COPY run.sh / COPY run.sh /
CMD ["/bin/bash", "/run.sh"] CMD ["/bin/bash", "/run.sh"]

View File

@ -20,6 +20,7 @@ def get_skip_list_cmd(path):
def get_options(i): def get_options(i):
options = [] options = []
client_options = []
if 0 < i: if 0 < i:
options.append("--order=random") options.append("--order=random")
@ -27,25 +28,29 @@ def get_options(i):
options.append("--db-engine=Ordinary") options.append("--db-engine=Ordinary")
if i % 3 == 2: if i % 3 == 2:
options.append('''--client-option='allow_experimental_database_replicated=1' --db-engine="Replicated('/test/db/test_{}', 's1', 'r1')"'''.format(i)) options.append('''--db-engine="Replicated('/test/db/test_{}', 's1', 'r1')"'''.format(i))
client_options.append('allow_experimental_database_replicated=1')
# If database name is not specified, new database is created for each functional test. # If database name is not specified, new database is created for each functional test.
# Run some threads with one database for all tests. # Run some threads with one database for all tests.
if i % 2 == 1: if i % 2 == 1:
options.append(" --database=test_{}".format(i)) options.append(" --database=test_{}".format(i))
if i % 7 == 0: if i % 5 == 1:
options.append(" --client-option='join_use_nulls=1'") client_options.append("join_use_nulls=1")
if i % 14 == 0: if i % 15 == 6:
options.append(' --client-option="join_algorithm=\'partial_merge\'"') client_options.append("join_algorithm='partial_merge'")
if i % 21 == 0: if i % 15 == 11:
options.append(' --client-option="join_algorithm=\'auto\'"') client_options.append("join_algorithm='auto'")
options.append(' --client-option="max_rows_in_join=1000"') client_options.append('max_rows_in_join=1000')
if i == 13: if i == 13:
options.append(" --client-option='memory_tracker_fault_probability=0.00001'") client_options.append('memory_tracker_fault_probability=0.001')
if client_options:
options.append(" --client-option " + ' '.join(client_options))
return ' '.join(options) return ' '.join(options)

View File

@ -35,7 +35,7 @@ RUN apt-get update \
ENV TZ=Europe/Moscow ENV TZ=Europe/Moscow
RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone
RUN pip3 install urllib3 testflows==1.6.90 docker-compose==1.29.1 docker==5.0.0 dicttoxml kazoo tzlocal python-dateutil numpy RUN pip3 install urllib3 testflows==1.7.20 docker-compose==1.29.1 docker==5.0.0 dicttoxml kazoo tzlocal python-dateutil numpy
ENV DOCKER_CHANNEL stable ENV DOCKER_CHANNEL stable
ENV DOCKER_VERSION 20.10.6 ENV DOCKER_VERSION 20.10.6

View File

@ -1,8 +1,6 @@
# docker build -t yandex/clickhouse-unit-test . # docker build -t yandex/clickhouse-unit-test .
FROM yandex/clickhouse-stateless-test FROM yandex/clickhouse-stateless-test
ENV TZ=Europe/Moscow
RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone
RUN apt-get install gdb RUN apt-get install gdb
COPY run.sh / COPY run.sh /

View File

@ -9,7 +9,7 @@ Many developers can say that the code is the best docs by itself, and they are r
If you want to help ClickHouse with documentation you can face, for example, the following questions: If you want to help ClickHouse with documentation you can face, for example, the following questions:
- "I don't know how to write." - "I don't know how to write."
We have prepared some [recommendations](#what-to-write) for you. We have prepared some [recommendations](#what-to-write) for you.
- "I know what I want to write, but I don't know how to contribute to docs." - "I know what I want to write, but I don't know how to contribute to docs."
@ -71,17 +71,17 @@ Contribute all new information in English language. Other languages are translat
``` ```
- Bold text: `**asterisks**` or `__underlines__`. - Bold text: `**asterisks**` or `__underlines__`.
- Links: `[link text](uri)`. Examples: - Links: `[link text](uri)`. Examples:
- External link: `[ClickHouse repo](https://github.com/ClickHouse/ClickHouse)` - External link: `[ClickHouse repo](https://github.com/ClickHouse/ClickHouse)`
- Cross link: `[How to build docs](tools/README.md)` - Cross link: `[How to build docs](tools/README.md)`
- Images: `![Exclamation sign](uri)`. You can refer to local images as well as remote in internet. - Images: `![Exclamation sign](uri)`. You can refer to local images as well as remote in internet.
- Lists: Lists can be of two types: - Lists: Lists can be of two types:
- `- unordered`: Each item starts from the `-`. - `- unordered`: Each item starts from the `-`.
- `1. ordered`: Each item starts from the number. - `1. ordered`: Each item starts from the number.
A list must be separated from the text by an empty line. Nested lists must be indented with 4 spaces. A list must be separated from the text by an empty line. Nested lists must be indented with 4 spaces.
- Inline code: `` `in backticks` ``. - Inline code: `` `in backticks` ``.
@ -107,7 +107,7 @@ Contribute all new information in English language. Other languages are translat
- Text hidden behind a cut (single sting that opens on click): - Text hidden behind a cut (single sting that opens on click):
```text ```text
<details markdown="1"> <summary>Visible text</summary> <details markdown="1"> <summary>Visible text</summary>
Hidden content. Hidden content.
</details>`. </details>`.
``` ```

View File

@ -1,6 +1,6 @@
--- ---
toc_priority: toc_priority:
toc_title: toc_title:
--- ---
# data_type_name {#data_type-name} # data_type_name {#data_type-name}

View File

@ -58,6 +58,6 @@ Result:
Follow up with any text to clarify the example. Follow up with any text to clarify the example.
**See Also** **See Also**
- [link](#) - [link](#)

View File

@ -14,8 +14,8 @@ More text (Optional).
**Arguments** (Optional) **Arguments** (Optional)
- `x` — Description. Optional (only for optional arguments). Possible values: <values list>. Default value: <value>. [Type name](relative/path/to/type/dscr.md#type). - `x` — Description. Optional (only for optional arguments). Possible values: <values list>. Default value: <value>. [Type name](relative/path/to/type/dscr.md#type).
- `y` — Description. Optional (only for optional arguments). Possible values: <values list>.Default value: <value>. [Type name](relative/path/to/type/dscr.md#type). - `y` — Description. Optional (only for optional arguments). Possible values: <values list>.Default value: <value>. [Type name](relative/path/to/type/dscr.md#type).
**Parameters** (Optional, only for parametric aggregate functions) **Parameters** (Optional, only for parametric aggregate functions)
@ -23,7 +23,7 @@ More text (Optional).
**Returned value(s)** **Returned value(s)**
- Returned values list. - Returned values list.
Type: [Type name](relative/path/to/type/dscr.md#type). Type: [Type name](relative/path/to/type/dscr.md#type).

View File

@ -16,8 +16,8 @@ Better:
option(ENABLE_TESTS "Provide unit_test_dbms target with Google.test unit tests" OFF) option(ENABLE_TESTS "Provide unit_test_dbms target with Google.test unit tests" OFF)
``` ```
If the option's purpose can't be guessed by its name, or the purpose guess may be misleading, or option has some If the option's purpose can't be guessed by its name, or the purpose guess may be misleading, or option has some
pre-conditions, leave a comment above the `option()` line and explain what it does. pre-conditions, leave a comment above the `option()` line and explain what it does.
The best way would be linking the docs page (if it exists). The best way would be linking the docs page (if it exists).
The comment is parsed into a separate column (see below). The comment is parsed into a separate column (see below).
@ -33,7 +33,7 @@ option(ENABLE_TESTS "Provide unit_test_dbms target with Google.test unit tests"
Suppose you have an option that may strip debug symbols from the ClickHouse's part. Suppose you have an option that may strip debug symbols from the ClickHouse's part.
This can speed up the linking process, but produces a binary that cannot be debugged. This can speed up the linking process, but produces a binary that cannot be debugged.
In that case, prefer explicitly raising a warning telling the developer that he may be doing something wrong. In that case, prefer explicitly raising a warning telling the developer that he may be doing something wrong.
Also, such options should be disabled if applies. Also, such options should be disabled if applies.
Bad: Bad:

View File

@ -7,7 +7,7 @@ toc_title: Support
!!! info "Info" !!! info "Info"
If you have launched a ClickHouse commercial support service, feel free to [open a pull-request](https://github.com/ClickHouse/ClickHouse/edit/master/docs/en/commercial/support.md) adding it to the following list. If you have launched a ClickHouse commercial support service, feel free to [open a pull-request](https://github.com/ClickHouse/ClickHouse/edit/master/docs/en/commercial/support.md) adding it to the following list.
## Yandex.Cloud ## Yandex.Cloud
ClickHouse worldwide support from the authors of ClickHouse. Supports on-premise and cloud deployments. Ask details on clickhouse-support@yandex-team.com ClickHouse worldwide support from the authors of ClickHouse. Supports on-premise and cloud deployments. Ask details on clickhouse-support@yandex-team.com

View File

@ -4,11 +4,11 @@ ClickHouse has hundreds (or even thousands) of features. Every commit gets check
The core functionality is very well tested, but some corner-cases and different combinations of features can be uncovered with ClickHouse CI. The core functionality is very well tested, but some corner-cases and different combinations of features can be uncovered with ClickHouse CI.
Most of the bugs/regressions we see happen in that 'grey area' where test coverage is poor. Most of the bugs/regressions we see happen in that 'grey area' where test coverage is poor.
And we are very interested in covering most of the possible scenarios and feature combinations used in real life by tests. And we are very interested in covering most of the possible scenarios and feature combinations used in real life by tests.
## Why adding tests ## Why adding tests
Why/when you should add a test case into ClickHouse code: Why/when you should add a test case into ClickHouse code:
1) you use some complicated scenarios / feature combinations / you have some corner case which is probably not widely used 1) you use some complicated scenarios / feature combinations / you have some corner case which is probably not widely used
@ -17,18 +17,18 @@ Why/when you should add a test case into ClickHouse code:
4) once the test is added/accepted, you can be sure the corner case you check will never be accidentally broken. 4) once the test is added/accepted, you can be sure the corner case you check will never be accidentally broken.
5) you will be a part of great open-source community 5) you will be a part of great open-source community
6) your name will be visible in the `system.contributors` table! 6) your name will be visible in the `system.contributors` table!
7) you will make a world bit better :) 7) you will make a world bit better :)
### Steps to do ### Steps to do
#### Prerequisite #### Prerequisite
I assume you run some Linux machine (you can use docker / virtual machines on other OS) and any modern browser / internet connection, and you have some basic Linux & SQL skills. I assume you run some Linux machine (you can use docker / virtual machines on other OS) and any modern browser / internet connection, and you have some basic Linux & SQL skills.
Any highly specialized knowledge is not needed (so you don't need to know C++ or know something about how ClickHouse CI works). Any highly specialized knowledge is not needed (so you don't need to know C++ or know something about how ClickHouse CI works).
#### Preparation #### Preparation
1) [create GitHub account](https://github.com/join) (if you haven't one yet) 1) [create GitHub account](https://github.com/join) (if you haven't one yet)
2) [setup git](https://docs.github.com/en/free-pro-team@latest/github/getting-started-with-github/set-up-git) 2) [setup git](https://docs.github.com/en/free-pro-team@latest/github/getting-started-with-github/set-up-git)
@ -54,17 +54,17 @@ git remote add upstream https://github.com/ClickHouse/ClickHouse
#### New branch for the test #### New branch for the test
1) create a new branch from the latest clickhouse master 1) create a new branch from the latest clickhouse master
``` ```
cd ~/workspace/ClickHouse cd ~/workspace/ClickHouse
git fetch upstream git fetch upstream
git checkout -b name_for_a_branch_with_my_test upstream/master git checkout -b name_for_a_branch_with_my_test upstream/master
``` ```
#### Install & run clickhouse #### Install & run clickhouse
1) install `clickhouse-server` (follow [official docs](https://clickhouse.tech/docs/en/getting-started/install/)) 1) install `clickhouse-server` (follow [official docs](https://clickhouse.tech/docs/en/getting-started/install/))
2) install test configurations (it will use Zookeeper mock implementation and adjust some settings) 2) install test configurations (it will use Zookeeper mock implementation and adjust some settings)
``` ```
cd ~/workspace/ClickHouse/tests/config cd ~/workspace/ClickHouse/tests/config
sudo ./install.sh sudo ./install.sh
@ -74,7 +74,7 @@ sudo ./install.sh
sudo systemctl restart clickhouse-server sudo systemctl restart clickhouse-server
``` ```
#### Creating the test file #### Creating the test file
1) find the number for your test - find the file with the biggest number in `tests/queries/0_stateless/` 1) find the number for your test - find the file with the biggest number in `tests/queries/0_stateless/`
@ -86,7 +86,7 @@ tests/queries/0_stateless/01520_client_print_query_id.reference
``` ```
Currently, the last number for the test is `01520`, so my test will have the number `01521` Currently, the last number for the test is `01520`, so my test will have the number `01521`
2) create an SQL file with the next number and name of the feature you test 2) create an SQL file with the next number and name of the feature you test
```sh ```sh
touch tests/queries/0_stateless/01521_dummy_test.sql touch tests/queries/0_stateless/01521_dummy_test.sql
@ -112,16 +112,16 @@ clickhouse-client -nmT < tests/queries/0_stateless/01521_dummy_test.sql | tee te
- fast - should not take longer than a few seconds (better subseconds) - fast - should not take longer than a few seconds (better subseconds)
- correct - fails then feature is not working - correct - fails then feature is not working
- deterministic - deterministic
- isolated / stateless - isolated / stateless
- don't rely on some environment things - don't rely on some environment things
- don't rely on timing when possible - don't rely on timing when possible
- try to cover corner cases (zeros / Nulls / empty sets / throwing exceptions) - try to cover corner cases (zeros / Nulls / empty sets / throwing exceptions)
- to test that query return errors, you can put special comment after the query: `-- { serverError 60 }` or `-- { clientError 20 }` - to test that query return errors, you can put special comment after the query: `-- { serverError 60 }` or `-- { clientError 20 }`
- don't switch databases (unless necessary) - don't switch databases (unless necessary)
- you can create several table replicas on the same node if needed - you can create several table replicas on the same node if needed
- you can use one of the test cluster definitions when needed (see system.clusters) - you can use one of the test cluster definitions when needed (see system.clusters)
- use `number` / `numbers_mt` / `zeros` / `zeros_mt` and similar for queries / to initialize data when applicable - use `number` / `numbers_mt` / `zeros` / `zeros_mt` and similar for queries / to initialize data when applicable
- clean up the created objects after test and before the test (DROP IF EXISTS) - in case of some dirty state - clean up the created objects after test and before the test (DROP IF EXISTS) - in case of some dirty state
- prefer sync mode of operations (mutations, merges, etc.) - prefer sync mode of operations (mutations, merges, etc.)
- use other SQL files in the `0_stateless` folder as an example - use other SQL files in the `0_stateless` folder as an example
- ensure the feature / feature combination you want to test is not yet covered with existing tests - ensure the feature / feature combination you want to test is not yet covered with existing tests
@ -138,7 +138,7 @@ It's important to name tests correctly, so one could turn some tests subset off
#### Commit / push / create PR. #### Commit / push / create PR.
1) commit & push your changes 1) commit & push your changes
```sh ```sh
cd ~/workspace/ClickHouse cd ~/workspace/ClickHouse
git add tests/queries/0_stateless/01521_dummy_test.sql git add tests/queries/0_stateless/01521_dummy_test.sql
@ -147,5 +147,5 @@ git commit # use some nice commit message when possible
git push origin HEAD git push origin HEAD
``` ```
2) use a link which was shown during the push, to create a PR into the main repo 2) use a link which was shown during the push, to create a PR into the main repo
3) adjust the PR title and contents, in `Changelog category (leave one)` keep 3) adjust the PR title and contents, in `Changelog category (leave one)` keep
`Build/Testing/Packaging Improvement`, fill the rest of the fields if you want. `Build/Testing/Packaging Improvement`, fill the rest of the fields if you want.

View File

@ -8,7 +8,7 @@ toc_title: Third-Party Libraries Used
The list of third-party libraries can be obtained by the following query: The list of third-party libraries can be obtained by the following query:
``` sql ``` sql
SELECT library_name, license_type, license_path FROM system.licenses ORDER BY library_name COLLATE 'en' SELECT library_name, license_type, license_path FROM system.licenses ORDER BY library_name COLLATE 'en';
``` ```
[Example](https://gh-api.clickhouse.tech/play?user=play#U0VMRUNUIGxpYnJhcnlfbmFtZSwgbGljZW5zZV90eXBlLCBsaWNlbnNlX3BhdGggRlJPTSBzeXN0ZW0ubGljZW5zZXMgT1JERVIgQlkgbGlicmFyeV9uYW1lIENPTExBVEUgJ2VuJw==) [Example](https://gh-api.clickhouse.tech/play?user=play#U0VMRUNUIGxpYnJhcnlfbmFtZSwgbGljZW5zZV90eXBlLCBsaWNlbnNlX3BhdGggRlJPTSBzeXN0ZW0ubGljZW5zZXMgT1JERVIgQlkgbGlicmFyeV9uYW1lIENPTExBVEUgJ2VuJw==)

View File

@ -749,7 +749,7 @@ If your code in the `master` branch is not buildable yet, exclude it from the bu
**1.** The C++20 standard library is used (experimental extensions are allowed), as well as `boost` and `Poco` frameworks. **1.** The C++20 standard library is used (experimental extensions are allowed), as well as `boost` and `Poco` frameworks.
**2.** It is not allowed to use libraries from OS packages. It is also not allowed to use pre-installed libraries. All libraries should be placed in form of source code in `contrib` directory and built with ClickHouse. **2.** It is not allowed to use libraries from OS packages. It is also not allowed to use pre-installed libraries. All libraries should be placed in form of source code in `contrib` directory and built with ClickHouse. See [Guidelines for adding new third-party libraries](contrib.md#adding-third-party-libraries) for details.
**3.** Preference is always given to libraries that are already in use. **3.** Preference is always given to libraries that are already in use.

View File

@ -70,7 +70,13 @@ Note that integration of ClickHouse with third-party drivers is not tested. Also
Unit tests are useful when you want to test not the ClickHouse as a whole, but a single isolated library or class. You can enable or disable build of tests with `ENABLE_TESTS` CMake option. Unit tests (and other test programs) are located in `tests` subdirectories across the code. To run unit tests, type `ninja test`. Some tests use `gtest`, but some are just programs that return non-zero exit code on test failure. Unit tests are useful when you want to test not the ClickHouse as a whole, but a single isolated library or class. You can enable or disable build of tests with `ENABLE_TESTS` CMake option. Unit tests (and other test programs) are located in `tests` subdirectories across the code. To run unit tests, type `ninja test`. Some tests use `gtest`, but some are just programs that return non-zero exit code on test failure.
Its not necessarily to have unit tests if the code is already covered by functional tests (and functional tests are usually much more simple to use). Its not necessary to have unit tests if the code is already covered by functional tests (and functional tests are usually much more simple to use).
You can run individual gtest checks by calling the executable directly, for example:
```bash
$ ./src/unit_tests_dbms --gtest_filter=LocalAddress*
```
## Performance Tests {#performance-tests} ## Performance Tests {#performance-tests}

View File

@ -17,7 +17,7 @@ It supports non-blocking [DROP TABLE](#drop-detach-table) and [RENAME TABLE](#re
### Table UUID {#table-uuid} ### Table UUID {#table-uuid}
All tables in database `Atomic` have persistent [UUID](../../sql-reference/data-types/uuid.md) and store data in directory `/clickhouse_path/store/xxx/xxxyyyyy-yyyy-yyyy-yyyy-yyyyyyyyyyyy/`, where `xxxyyyyy-yyyy-yyyy-yyyy-yyyyyyyyyyyy` is UUID of the table. All tables in database `Atomic` have persistent [UUID](../../sql-reference/data-types/uuid.md) and store data in directory `/clickhouse_path/store/xxx/xxxyyyyy-yyyy-yyyy-yyyy-yyyyyyyyyyyy/`, where `xxxyyyyy-yyyy-yyyy-yyyy-yyyyyyyyyyyy` is UUID of the table.
Usually, the UUID is generated automatically, but the user can also explicitly specify the UUID in the same way when creating the table (this is not recommended). To display the `SHOW CREATE` query with the UUID you can use setting [show_table_uuid_in_table_create_query_if_not_nil](../../operations/settings/settings.md#show_table_uuid_in_table_create_query_if_not_nil). For example: Usually, the UUID is generated automatically, but the user can also explicitly specify the UUID in the same way when creating the table (this is not recommended). To display the `SHOW CREATE` query with the UUID you can use setting [show_table_uuid_in_table_create_query_if_not_nil](../../operations/settings/settings.md#show_table_uuid_in_table_create_query_if_not_nil). For example:
```sql ```sql

View File

@ -14,7 +14,7 @@ You can also use the following database engines:
- [MySQL](../../engines/database-engines/mysql.md) - [MySQL](../../engines/database-engines/mysql.md)
- [MaterializeMySQL](../../engines/database-engines/materialize-mysql.md) - [MaterializedMySQL](../../engines/database-engines/materialized-mysql.md)
- [Lazy](../../engines/database-engines/lazy.md) - [Lazy](../../engines/database-engines/lazy.md)

View File

@ -1,9 +1,9 @@
--- ---
toc_priority: 29 toc_priority: 29
toc_title: MaterializeMySQL toc_title: MaterializedMySQL
--- ---
# MaterializeMySQL {#materialize-mysql} # MaterializedMySQL {#materialized-mysql}
**This is experimental feature that should not be used in production.** **This is experimental feature that should not be used in production.**
@ -17,7 +17,7 @@ This feature is experimental.
``` sql ``` sql
CREATE DATABASE [IF NOT EXISTS] db_name [ON CLUSTER cluster] CREATE DATABASE [IF NOT EXISTS] db_name [ON CLUSTER cluster]
ENGINE = MaterializeMySQL('host:port', ['database' | database], 'user', 'password') [SETTINGS ...] ENGINE = MaterializedMySQL('host:port', ['database' | database], 'user', 'password') [SETTINGS ...]
``` ```
**Engine Parameters** **Engine Parameters**
@ -36,19 +36,26 @@ ENGINE = MaterializeMySQL('host:port', ['database' | database], 'user', 'passwor
- `max_wait_time_when_mysql_unavailable` — Retry interval when MySQL is not available (milliseconds). Negative value disable retry. Default: `1000`. - `max_wait_time_when_mysql_unavailable` — Retry interval when MySQL is not available (milliseconds). Negative value disable retry. Default: `1000`.
- `allows_query_when_mysql_lost` — Allow query materialized table when mysql is lost. Default: `0` (`false`). - `allows_query_when_mysql_lost` — Allow query materialized table when mysql is lost. Default: `0` (`false`).
``` ```
CREATE DATABASE mysql ENGINE = MaterializeMySQL('localhost:3306', 'db', 'user', '***') CREATE DATABASE mysql ENGINE = MaterializedMySQL('localhost:3306', 'db', 'user', '***')
SETTINGS SETTINGS
allows_query_when_mysql_lost=true, allows_query_when_mysql_lost=true,
max_wait_time_when_mysql_unavailable=10000; max_wait_time_when_mysql_unavailable=10000;
``` ```
**Settings on MySQL-server side**
For the correct work of `MaterializeMySQL`, there are few mandatory `MySQL`-side configuration settings that should be set:
- `default_authentication_plugin = mysql_native_password` since `MaterializeMySQL` can only authorize with this method.
- `gtid_mode = on` since GTID based logging is a mandatory for providing correct `MaterializeMySQL` replication. Pay attention that while turning this mode `On` you should also specify `enforce_gtid_consistency = on`.
## Virtual columns {#virtual-columns} ## Virtual columns {#virtual-columns}
When working with the `MaterializeMySQL` database engine, [ReplacingMergeTree](../../engines/table-engines/mergetree-family/replacingmergetree.md) tables are used with virtual `_sign` and `_version` columns. When working with the `MaterializedMySQL` database engine, [ReplacingMergeTree](../../engines/table-engines/mergetree-family/replacingmergetree.md) tables are used with virtual `_sign` and `_version` columns.
- `_version` — Transaction counter. Type [UInt64](../../sql-reference/data-types/int-uint.md). - `_version` — Transaction counter. Type [UInt64](../../sql-reference/data-types/int-uint.md).
- `_sign` — Deletion mark. Type [Int8](../../sql-reference/data-types/int-uint.md). Possible values: - `_sign` — Deletion mark. Type [Int8](../../sql-reference/data-types/int-uint.md). Possible values:
- `1` — Row is not deleted, - `1` — Row is not deleted,
- `-1` — Row is deleted. - `-1` — Row is deleted.
## Data Types Support {#data_types-support} ## Data Types Support {#data_types-support}
@ -70,6 +77,7 @@ When working with the `MaterializeMySQL` database engine, [ReplacingMergeTree](.
| STRING | [String](../../sql-reference/data-types/string.md) | | STRING | [String](../../sql-reference/data-types/string.md) |
| VARCHAR, VAR_STRING | [String](../../sql-reference/data-types/string.md) | | VARCHAR, VAR_STRING | [String](../../sql-reference/data-types/string.md) |
| BLOB | [String](../../sql-reference/data-types/string.md) | | BLOB | [String](../../sql-reference/data-types/string.md) |
| BINARY | [FixedString](../../sql-reference/data-types/fixedstring.md) |
Other types are not supported. If MySQL table contains a column of such type, ClickHouse throws exception "Unhandled data type" and stops replication. Other types are not supported. If MySQL table contains a column of such type, ClickHouse throws exception "Unhandled data type" and stops replication.
@ -77,23 +85,31 @@ Other types are not supported. If MySQL table contains a column of such type, Cl
## Specifics and Recommendations {#specifics-and-recommendations} ## Specifics and Recommendations {#specifics-and-recommendations}
### Compatibility restrictions
Apart of the data types limitations there are few restrictions comparing to `MySQL` databases, that should be resolved before replication will be possible:
- Each table in `MySQL` should contain `PRIMARY KEY`.
- Replication for tables, those are containing rows with `ENUM` field values out of range (specified in `ENUM` signature) will not work.
### DDL Queries {#ddl-queries} ### DDL Queries {#ddl-queries}
MySQL DDL queries are converted into the corresponding ClickHouse DDL queries ([ALTER](../../sql-reference/statements/alter/index.md), [CREATE](../../sql-reference/statements/create/index.md), [DROP](../../sql-reference/statements/drop.md), [RENAME](../../sql-reference/statements/rename.md)). If ClickHouse cannot parse some DDL query, the query is ignored. MySQL DDL queries are converted into the corresponding ClickHouse DDL queries ([ALTER](../../sql-reference/statements/alter/index.md), [CREATE](../../sql-reference/statements/create/index.md), [DROP](../../sql-reference/statements/drop.md), [RENAME](../../sql-reference/statements/rename.md)). If ClickHouse cannot parse some DDL query, the query is ignored.
### Data Replication {#data-replication} ### Data Replication {#data-replication}
`MaterializeMySQL` does not support direct `INSERT`, `DELETE` and `UPDATE` queries. However, they are supported in terms of data replication: `MaterializedMySQL` does not support direct `INSERT`, `DELETE` and `UPDATE` queries. However, they are supported in terms of data replication:
- MySQL `INSERT` query is converted into `INSERT` with `_sign=1`. - MySQL `INSERT` query is converted into `INSERT` with `_sign=1`.
- MySQL `DELETE` query is converted into `INSERT` with `_sign=-1`. - MySQL `DELETE` query is converted into `INSERT` with `_sign=-1`.
- MySQL `UPDATE` query is converted into `INSERT` with `_sign=-1` and `INSERT` with `_sign=1`. - MySQL `UPDATE` query is converted into `INSERT` with `_sign=-1` and `INSERT` with `_sign=1`.
### Selecting from MaterializeMySQL Tables {#select} ### Selecting from MaterializedMySQL Tables {#select}
`SELECT` query from `MaterializeMySQL` tables has some specifics: `SELECT` query from `MaterializedMySQL` tables has some specifics:
- If `_version` is not specified in the `SELECT` query, [FINAL](../../sql-reference/statements/select/from.md#select-from-final) modifier is used. So only rows with `MAX(_version)` are selected. - If `_version` is not specified in the `SELECT` query, [FINAL](../../sql-reference/statements/select/from.md#select-from-final) modifier is used. So only rows with `MAX(_version)` are selected.
@ -110,10 +126,10 @@ ClickHouse has only one physical order, which is determined by `ORDER BY` clause
**Notes** **Notes**
- Rows with `_sign=-1` are not deleted physically from the tables. - Rows with `_sign=-1` are not deleted physically from the tables.
- Cascade `UPDATE/DELETE` queries are not supported by the `MaterializeMySQL` engine. - Cascade `UPDATE/DELETE` queries are not supported by the `MaterializedMySQL` engine.
- Replication can be easily broken. - Replication can be easily broken.
- Manual operations on database and tables are forbidden. - Manual operations on database and tables are forbidden.
- `MaterializeMySQL` is influenced by [optimize_on_insert](../../operations/settings/settings.md#optimize-on-insert) setting. The data is merged in the corresponding table in the `MaterializeMySQL` database when a table in the MySQL server changes. - `MaterializedMySQL` is influenced by [optimize_on_insert](../../operations/settings/settings.md#optimize-on-insert) setting. The data is merged in the corresponding table in the `MaterializedMySQL` database when a table in the MySQL server changes.
## Examples of Use {#examples-of-use} ## Examples of Use {#examples-of-use}
@ -130,9 +146,9 @@ mysql> SELECT * FROM test;
``` ```
```text ```text
+---+------+------+ +---+------+------+
| a | b | c | | a | b | c |
+---+------+------+ +---+------+------+
| 2 | 222 | Wow! | | 2 | 222 | Wow! |
+---+------+------+ +---+------+------+
``` ```
@ -142,7 +158,7 @@ Database in ClickHouse, exchanging data with the MySQL server:
The database and the table created: The database and the table created:
``` sql ``` sql
CREATE DATABASE mysql ENGINE = MaterializeMySQL('localhost:3306', 'db', 'user', '***'); CREATE DATABASE mysql ENGINE = MaterializedMySQL('localhost:3306', 'db', 'user', '***');
SHOW TABLES FROM mysql; SHOW TABLES FROM mysql;
``` ```
@ -159,9 +175,9 @@ SELECT * FROM mysql.test;
``` ```
``` text ``` text
┌─a─┬──b─┐ ┌─a─┬──b─┐
│ 1 │ 11 │ │ 1 │ 11 │
│ 2 │ 22 │ │ 2 │ 22 │
└───┴────┘ └───┴────┘
``` ```
@ -172,9 +188,9 @@ SELECT * FROM mysql.test;
``` ```
``` text ``` text
┌─a─┬───b─┬─c────┐ ┌─a─┬───b─┬─c────┐
│ 2 │ 222 │ Wow! │ │ 2 │ 222 │ Wow! │
└───┴─────┴──────┘ └───┴─────┴──────┘
``` ```
[Original article](https://clickhouse.tech/docs/en/engines/database-engines/materialize-mysql/) <!--hide--> [Original article](https://clickhouse.tech/docs/en/engines/database-engines/materialized-mysql/) <!--hide-->

View File

@ -53,7 +53,7 @@ All other MySQL data types are converted into [String](../../sql-reference/data-
## Global Variables Support {#global-variables-support} ## Global Variables Support {#global-variables-support}
For better compatibility you may address global variables in MySQL style, as `@@identifier`. For better compatibility you may address global variables in MySQL style, as `@@identifier`.
These variables are supported: These variables are supported:
- `version` - `version`

View File

@ -14,7 +14,7 @@ Supports table structure modifications (`ALTER TABLE ... ADD|DROP COLUMN`). If `
## Creating a Database {#creating-a-database} ## Creating a Database {#creating-a-database}
``` sql ``` sql
CREATE DATABASE test_database CREATE DATABASE test_database
ENGINE = PostgreSQL('host:port', 'database', 'user', 'password'[, `use_table_cache`]); ENGINE = PostgreSQL('host:port', 'database', 'user', 'password'[, `use_table_cache`]);
``` ```
@ -43,14 +43,14 @@ ENGINE = PostgreSQL('host:port', 'database', 'user', 'password'[, `use_table_cac
| TEXT, CHAR | [String](../../sql-reference/data-types/string.md) | | TEXT, CHAR | [String](../../sql-reference/data-types/string.md) |
| INTEGER | Nullable([Int32](../../sql-reference/data-types/int-uint.md))| | INTEGER | Nullable([Int32](../../sql-reference/data-types/int-uint.md))|
| ARRAY | [Array](../../sql-reference/data-types/array.md) | | ARRAY | [Array](../../sql-reference/data-types/array.md) |
## Examples of Use {#examples-of-use} ## Examples of Use {#examples-of-use}
Database in ClickHouse, exchanging data with the PostgreSQL server: Database in ClickHouse, exchanging data with the PostgreSQL server:
``` sql ``` sql
CREATE DATABASE test_database CREATE DATABASE test_database
ENGINE = PostgreSQL('postgres1:5432', 'test_database', 'postgres', 'mysecretpassword', 1); ENGINE = PostgreSQL('postgres1:5432', 'test_database', 'postgres', 'mysecretpassword', 1);
``` ```
@ -102,7 +102,7 @@ SELECT * FROM test_database.test_table;
└────────┴───────┘ └────────┴───────┘
``` ```
Consider the table structure was modified in PostgreSQL: Consider the table structure was modified in PostgreSQL:
``` sql ``` sql
postgre> ALTER TABLE test_table ADD COLUMN data Text postgre> ALTER TABLE test_table ADD COLUMN data Text

View File

@ -1,6 +1,6 @@
# [experimental] Replicated {#replicated} # [experimental] Replicated {#replicated}
The engine is based on the [Atomic](../../engines/database-engines/atomic.md) engine. It supports replication of metadata via DDL log being written to ZooKeeper and executed on all of the replicas for a given database. The engine is based on the [Atomic](../../engines/database-engines/atomic.md) engine. It supports replication of metadata via DDL log being written to ZooKeeper and executed on all of the replicas for a given database.
One ClickHouse server can have multiple replicated databases running and updating at the same time. But there can't be multiple replicas of the same replicated database. One ClickHouse server can have multiple replicated databases running and updating at the same time. But there can't be multiple replicas of the same replicated database.
@ -20,9 +20,9 @@ One ClickHouse server can have multiple replicated databases running and updatin
## Specifics and Recommendations {#specifics-and-recommendations} ## Specifics and Recommendations {#specifics-and-recommendations}
DDL queries with `Replicated` database work in a similar way to [ON CLUSTER](../../sql-reference/distributed-ddl.md) queries, but with minor differences. DDL queries with `Replicated` database work in a similar way to [ON CLUSTER](../../sql-reference/distributed-ddl.md) queries, but with minor differences.
First, the DDL request tries to execute on the initiator (the host that originally received the request from the user). If the request is not fulfilled, then the user immediately receives an error, other hosts do not try to fulfill it. If the request has been successfully completed on the initiator, then all other hosts will automatically retry until they complete it. The initiator will try to wait for the query to be completed on other hosts (no longer than [distributed_ddl_task_timeout](../../operations/settings/settings.md#distributed_ddl_task_timeout)) and will return a table with the query execution statuses on each host. First, the DDL request tries to execute on the initiator (the host that originally received the request from the user). If the request is not fulfilled, then the user immediately receives an error, other hosts do not try to fulfill it. If the request has been successfully completed on the initiator, then all other hosts will automatically retry until they complete it. The initiator will try to wait for the query to be completed on other hosts (no longer than [distributed_ddl_task_timeout](../../operations/settings/settings.md#distributed_ddl_task_timeout)) and will return a table with the query execution statuses on each host.
The behavior in case of errors is regulated by the [distributed_ddl_output_mode](../../operations/settings/settings.md#distributed_ddl_output_mode) setting, for a `Replicated` database it is better to set it to `null_status_on_timeout` — i.e. if some hosts did not have time to execute the request for [distributed_ddl_task_timeout](../../operations/settings/settings.md#distributed_ddl_task_timeout), then do not throw an exception, but show the `NULL` status for them in the table. The behavior in case of errors is regulated by the [distributed_ddl_output_mode](../../operations/settings/settings.md#distributed_ddl_output_mode) setting, for a `Replicated` database it is better to set it to `null_status_on_timeout` — i.e. if some hosts did not have time to execute the request for [distributed_ddl_task_timeout](../../operations/settings/settings.md#distributed_ddl_task_timeout), then do not throw an exception, but show the `NULL` status for them in the table.
@ -47,8 +47,8 @@ CREATE TABLE r.rmt (n UInt64) ENGINE=ReplicatedMergeTree ORDER BY n;
``` ```
``` text ``` text
┌─────hosts────────────┬──status─┬─error─┬─num_hosts_remaining─┬─num_hosts_active─┐ ┌─────hosts────────────┬──status─┬─error─┬─num_hosts_remaining─┬─num_hosts_active─┐
│ shard1|replica1 │ 0 │ │ 2 │ 0 │ │ shard1|replica1 │ 0 │ │ 2 │ 0 │
│ shard1|other_replica │ 0 │ │ 1 │ 0 │ │ shard1|other_replica │ 0 │ │ 1 │ 0 │
│ other_shard|r1 │ 0 │ │ 0 │ 0 │ │ other_shard|r1 │ 0 │ │ 0 │ 0 │
└──────────────────────┴─────────┴───────┴─────────────────────┴──────────────────┘ └──────────────────────┴─────────┴───────┴─────────────────────┴──────────────────┘
@ -57,13 +57,13 @@ CREATE TABLE r.rmt (n UInt64) ENGINE=ReplicatedMergeTree ORDER BY n;
Showing the system table: Showing the system table:
``` sql ``` sql
SELECT cluster, shard_num, replica_num, host_name, host_address, port, is_local SELECT cluster, shard_num, replica_num, host_name, host_address, port, is_local
FROM system.clusters WHERE cluster='r'; FROM system.clusters WHERE cluster='r';
``` ```
``` text ``` text
┌─cluster─┬─shard_num─┬─replica_num─┬─host_name─┬─host_address─┬─port─┬─is_local─┐ ┌─cluster─┬─shard_num─┬─replica_num─┬─host_name─┬─host_address─┬─port─┬─is_local─┐
│ r │ 1 │ 1 │ node3 │ 127.0.0.1 │ 9002 │ 0 │ │ r │ 1 │ 1 │ node3 │ 127.0.0.1 │ 9002 │ 0 │
│ r │ 2 │ 1 │ node2 │ 127.0.0.1 │ 9001 │ 0 │ │ r │ 2 │ 1 │ node2 │ 127.0.0.1 │ 9001 │ 0 │
│ r │ 2 │ 2 │ node1 │ 127.0.0.1 │ 9000 │ 1 │ │ r │ 2 │ 2 │ node1 │ 127.0.0.1 │ 9000 │ 1 │
└─────────┴───────────┴─────────────┴───────────┴──────────────┴──────┴──────────┘ └─────────┴───────────┴─────────────┴───────────┴──────────────┴──────┴──────────┘
@ -78,9 +78,9 @@ node1 :) SELECT materialize(hostName()) AS host, groupArray(n) FROM r.d GROUP BY
``` ```
``` text ``` text
┌─hosts─┬─groupArray(n)─┐ ┌─hosts─┬─groupArray(n)─┐
│ node1 │ [1,3,5,7,9] │ │ node1 │ [1,3,5,7,9] │
│ node2 │ [0,2,4,6,8] │ │ node2 │ [0,2,4,6,8] │
└───────┴───────────────┘ └───────┴───────────────┘
``` ```
@ -93,8 +93,8 @@ node4 :) CREATE DATABASE r ENGINE=Replicated('some/path/r','other_shard','r2');
The cluster configuration will look like this: The cluster configuration will look like this:
``` text ``` text
┌─cluster─┬─shard_num─┬─replica_num─┬─host_name─┬─host_address─┬─port─┬─is_local─┐ ┌─cluster─┬─shard_num─┬─replica_num─┬─host_name─┬─host_address─┬─port─┬─is_local─┐
│ r │ 1 │ 1 │ node3 │ 127.0.0.1 │ 9002 │ 0 │ │ r │ 1 │ 1 │ node3 │ 127.0.0.1 │ 9002 │ 0 │
│ r │ 1 │ 2 │ node4 │ 127.0.0.1 │ 9003 │ 0 │ │ r │ 1 │ 2 │ node4 │ 127.0.0.1 │ 9003 │ 0 │
│ r │ 2 │ 1 │ node2 │ 127.0.0.1 │ 9001 │ 0 │ │ r │ 2 │ 1 │ node2 │ 127.0.0.1 │ 9001 │ 0 │
│ r │ 2 │ 2 │ node1 │ 127.0.0.1 │ 9000 │ 1 │ │ r │ 2 │ 2 │ node1 │ 127.0.0.1 │ 9000 │ 1 │
@ -108,8 +108,8 @@ node2 :) SELECT materialize(hostName()) AS host, groupArray(n) FROM r.d GROUP BY
``` ```
```text ```text
┌─hosts─┬─groupArray(n)─┐ ┌─hosts─┬─groupArray(n)─┐
│ node2 │ [1,3,5,7,9] │ │ node2 │ [1,3,5,7,9] │
│ node4 │ [0,2,4,6,8] │ │ node4 │ [0,2,4,6,8] │
└───────┴───────────────┘ └───────┴───────────────┘
``` ```

View File

@ -35,7 +35,7 @@ The table structure can differ from the original table structure:
- `password` — User password. - `password` — User password.
## Implementation Details {#implementation-details} ## Implementation Details {#implementation-details}
Supports multiple replicas that must be listed by `|` and shards must be listed by `,`. For example: Supports multiple replicas that must be listed by `|` and shards must be listed by `,`. For example:
```sql ```sql

View File

@ -20,7 +20,7 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster]
Required parameters: Required parameters:
- `primary_key_name` any column name in the column list. - `primary_key_name` any column name in the column list.
- `primary key` must be specified, it supports only one column in the primary key. The primary key will be serialized in binary as a `rocksdb key`. - `primary key` must be specified, it supports only one column in the primary key. The primary key will be serialized in binary as a `rocksdb key`.
- columns other than the primary key will be serialized in binary as `rocksdb` value in corresponding order. - columns other than the primary key will be serialized in binary as `rocksdb` value in corresponding order.
- queries with key `equals` or `in` filtering will be optimized to multi keys lookup from `rocksdb`. - queries with key `equals` or `in` filtering will be optimized to multi keys lookup from `rocksdb`.
@ -39,4 +39,46 @@ ENGINE = EmbeddedRocksDB
PRIMARY KEY key PRIMARY KEY key
``` ```
## Metrics
There is also `system.rocksdb` table, that expose rocksdb statistics:
```sql
SELECT
name,
value
FROM system.rocksdb
┌─name──────────────────────┬─value─┐
│ no.file.opens │ 1 │
│ number.block.decompressed │ 1 │
└───────────────────────────┴───────┘
```
## Configuration
You can also change any [rocksdb options](https://github.com/facebook/rocksdb/wiki/Option-String-and-Option-Map) using config:
```xml
<rocksdb>
<options>
<max_background_jobs>8</max_background_jobs>
</options>
<column_family_options>
<num_levels>2</num_levels>
</column_family_options>
<tables>
<table>
<name>TABLE</name>
<options>
<max_background_jobs>8</max_background_jobs>
</options>
<column_family_options>
<num_levels>2</num_levels>
</column_family_options>
</table>
</tables>
</rocksdb>
```
[Original article](https://clickhouse.tech/docs/en/engines/table-engines/integrations/embedded-rocksdb/) <!--hide--> [Original article](https://clickhouse.tech/docs/en/engines/table-engines/integrations/embedded-rocksdb/) <!--hide-->

View File

@ -15,7 +15,7 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name
name1 [type1], name1 [type1],
name2 [type2], name2 [type2],
... ...
) ENGINE = MongoDB(host:port, database, collection, user, password); ) ENGINE = MongoDB(host:port, database, collection, user, password [, options]);
``` ```
**Engine Parameters** **Engine Parameters**
@ -30,18 +30,30 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name
- `password` — User password. - `password` — User password.
- `options` — MongoDB connection string options (optional parameter).
## Usage Example {#usage-example} ## Usage Example {#usage-example}
Table in ClickHouse which allows to read data from MongoDB collection: Create a table in ClickHouse which allows to read data from MongoDB collection:
``` text ``` text
CREATE TABLE mongo_table CREATE TABLE mongo_table
( (
key UInt64, key UInt64,
data String data String
) ENGINE = MongoDB('mongo1:27017', 'test', 'simple_table', 'testuser', 'clickhouse'); ) ENGINE = MongoDB('mongo1:27017', 'test', 'simple_table', 'testuser', 'clickhouse');
``` ```
To read from an SSL secured MongoDB server:
``` text
CREATE TABLE mongo_table_ssl
(
key UInt64,
data String
) ENGINE = MongoDB('mongo2:27017', 'test', 'simple_table', 'testuser', 'clickhouse', 'ssl=true');
```
Query: Query:
``` sql ``` sql

View File

@ -49,14 +49,14 @@ PostgreSQL `Array` types are converted into ClickHouse arrays.
!!! info "Note" !!! info "Note"
Be careful - in PostgreSQL an array data, created like a `type_name[]`, may contain multi-dimensional arrays of different dimensions in different table rows in same column. But in ClickHouse it is only allowed to have multidimensional arrays of the same count of dimensions in all table rows in same column. Be careful - in PostgreSQL an array data, created like a `type_name[]`, may contain multi-dimensional arrays of different dimensions in different table rows in same column. But in ClickHouse it is only allowed to have multidimensional arrays of the same count of dimensions in all table rows in same column.
Supports multiple replicas that must be listed by `|`. For example: Supports multiple replicas that must be listed by `|`. For example:
```sql ```sql
CREATE TABLE test_replicas (id UInt32, name String) ENGINE = PostgreSQL(`postgres{2|3|4}:5432`, 'clickhouse', 'test_replicas', 'postgres', 'mysecretpassword'); CREATE TABLE test_replicas (id UInt32, name String) ENGINE = PostgreSQL(`postgres{2|3|4}:5432`, 'clickhouse', 'test_replicas', 'postgres', 'mysecretpassword');
``` ```
Replicas priority for PostgreSQL dictionary source is supported. The bigger the number in map, the less the priority. The highest priority is `0`. Replicas priority for PostgreSQL dictionary source is supported. The bigger the number in map, the less the priority. The highest priority is `0`.
In the example below replica `example01-1` has the highest priority: In the example below replica `example01-1` has the highest priority:

View File

@ -14,6 +14,8 @@ Engines of the family:
- [Log](../../../engines/table-engines/log-family/log.md) - [Log](../../../engines/table-engines/log-family/log.md)
- [TinyLog](../../../engines/table-engines/log-family/tinylog.md) - [TinyLog](../../../engines/table-engines/log-family/tinylog.md)
`Log` family table engines can store data to [HDFS](../../../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-hdfs) or [S3](../../../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-s3) distributed file systems.
## Common Properties {#common-properties} ## Common Properties {#common-properties}
Engines: Engines:

View File

@ -5,10 +5,8 @@ toc_title: Log
# Log {#log} # Log {#log}
Engine belongs to the family of log engines. See the common properties of log engines and their differences in the [Log Engine Family](../../../engines/table-engines/log-family/index.md) article. The engine belongs to the family of `Log` engines. See the common properties of `Log` engines and their differences in the [Log Engine Family](../../../engines/table-engines/log-family/index.md) article.
Log differs from [TinyLog](../../../engines/table-engines/log-family/tinylog.md) in that a small file of “marks” resides with the column files. These marks are written on every data block and contain offsets that indicate where to start reading the file in order to skip the specified number of rows. This makes it possible to read table data in multiple threads. `Log` differs from [TinyLog](../../../engines/table-engines/log-family/tinylog.md) in that a small file of "marks" resides with the column files. These marks are written on every data block and contain offsets that indicate where to start reading the file in order to skip the specified number of rows. This makes it possible to read table data in multiple threads.
For concurrent data access, the read operations can be performed simultaneously, while write operations block reads and each other. For concurrent data access, the read operations can be performed simultaneously, while write operations block reads and each other.
The Log engine does not support indexes. Similarly, if writing to a table failed, the table is broken, and reading from it returns an error. The Log engine is appropriate for temporary data, write-once tables, and for testing or demonstration purposes. The `Log` engine does not support indexes. Similarly, if writing to a table failed, the table is broken, and reading from it returns an error. The `Log` engine is appropriate for temporary data, write-once tables, and for testing or demonstration purposes.
[Original article](https://clickhouse.tech/docs/en/operations/table_engines/log/) <!--hide-->

View File

@ -728,7 +728,7 @@ During this time, they are not moved to other volumes or disks. Therefore, until
## Using S3 for Data Storage {#table_engine-mergetree-s3} ## Using S3 for Data Storage {#table_engine-mergetree-s3}
`MergeTree` family table engines is able to store data to [S3](https://aws.amazon.com/s3/) using a disk with type `s3`. `MergeTree` family table engines can store data to [S3](https://aws.amazon.com/s3/) using a disk with type `s3`.
This feature is under development and not ready for production. There are known drawbacks such as very low performance. This feature is under development and not ready for production. There are known drawbacks such as very low performance.
@ -764,11 +764,13 @@ Configuration markup:
``` ```
Required parameters: Required parameters:
- `endpoint` — S3 endpoint url in `path` or `virtual hosted` [styles](https://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html). Endpoint url should contain bucket and root path to store data.
- `endpoint` — S3 endpoint URL in `path` or `virtual hosted` [styles](https://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html). Endpoint URL should contain a bucket and root path to store data.
- `access_key_id` — S3 access key id. - `access_key_id` — S3 access key id.
- `secret_access_key` — S3 secret access key. - `secret_access_key` — S3 secret access key.
Optional parameters: Optional parameters:
- `region` — S3 region name. - `region` — S3 region name.
- `use_environment_credentials` — Reads AWS credentials from the Environment variables AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY and AWS_SESSION_TOKEN if they exist. Default value is `false`. - `use_environment_credentials` — Reads AWS credentials from the Environment variables AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY and AWS_SESSION_TOKEN if they exist. Default value is `false`.
- `use_insecure_imds_request` — If set to `true`, S3 client will use insecure IMDS request while obtaining credentials from Amazon EC2 metadata. Default value is `false`. - `use_insecure_imds_request` — If set to `true`, S3 client will use insecure IMDS request while obtaining credentials from Amazon EC2 metadata. Default value is `false`.
@ -784,7 +786,6 @@ Optional parameters:
- `skip_access_check` — If true, disk access checks will not be performed on disk start-up. Default value is `false`. - `skip_access_check` — If true, disk access checks will not be performed on disk start-up. Default value is `false`.
- `server_side_encryption_customer_key_base64` — If specified, required headers for accessing S3 objects with SSE-C encryption will be set. - `server_side_encryption_customer_key_base64` — If specified, required headers for accessing S3 objects with SSE-C encryption will be set.
S3 disk can be configured as `main` or `cold` storage: S3 disk can be configured as `main` or `cold` storage:
``` xml ``` xml
<storage_configuration> <storage_configuration>
@ -823,4 +824,43 @@ S3 disk can be configured as `main` or `cold` storage:
In case of `cold` option a data can be moved to S3 if local disk free size will be smaller than `move_factor * disk_size` or by TTL move rule. In case of `cold` option a data can be moved to S3 if local disk free size will be smaller than `move_factor * disk_size` or by TTL move rule.
[Original article](https://clickhouse.tech/docs/ru/operations/table_engines/mergetree/) <!--hide--> ## Using HDFS for Data Storage {#table_engine-mergetree-hdfs}
[HDFS](https://hadoop.apache.org/docs/r1.2.1/hdfs_design.html) is a distributed file system for remote data storage.
`MergeTree` family table engines can store data to HDFS using a disk with type `HDFS`.
Configuration markup:
``` xml
<yandex>
<storage_configuration>
<disks>
<hdfs>
<type>hdfs</type>
<endpoint>hdfs://hdfs1:9000/clickhouse/</endpoint>
</hdfs>
</disks>
<policies>
<hdfs>
<volumes>
<main>
<disk>hdfs</disk>
</main>
</volumes>
</hdfs>
</policies>
</storage_configuration>
<merge_tree>
<min_bytes_for_wide_part>0</min_bytes_for_wide_part>
</merge_tree>
</yandex>
```
Required parameters:
- `endpoint` — HDFS endpoint URL in `path` format. Endpoint URL should contain a root path to store data.
Optional parameters:
- `min_bytes_for_seek` — The minimal number of bytes to use seek operation instead of sequential read. Default value: `1 Mb`.

View File

@ -101,7 +101,7 @@ For very large clusters, you can use different ZooKeeper clusters for different
Replication is asynchronous and multi-master. `INSERT` queries (as well as `ALTER`) can be sent to any available server. Data is inserted on the server where the query is run, and then it is copied to the other servers. Because it is asynchronous, recently inserted data appears on the other replicas with some latency. If part of the replicas are not available, the data is written when they become available. If a replica is available, the latency is the amount of time it takes to transfer the block of compressed data over the network. The number of threads performing background tasks for replicated tables can be set by [background_schedule_pool_size](../../../operations/settings/settings.md#background_schedule_pool_size) setting. Replication is asynchronous and multi-master. `INSERT` queries (as well as `ALTER`) can be sent to any available server. Data is inserted on the server where the query is run, and then it is copied to the other servers. Because it is asynchronous, recently inserted data appears on the other replicas with some latency. If part of the replicas are not available, the data is written when they become available. If a replica is available, the latency is the amount of time it takes to transfer the block of compressed data over the network. The number of threads performing background tasks for replicated tables can be set by [background_schedule_pool_size](../../../operations/settings/settings.md#background_schedule_pool_size) setting.
`ReplicatedMergeTree` engine uses a separate thread pool for replicated fetches. Size of the pool is limited by the [background_fetches_pool_size](../../../operations/settings/settings.md#background_fetches_pool_size) setting which can be tuned with a server restart. `ReplicatedMergeTree` engine uses a separate thread pool for replicated fetches. Size of the pool is limited by the [background_fetches_pool_size](../../../operations/settings/settings.md#background_fetches_pool_size) setting which can be tuned with a server restart.
By default, an INSERT query waits for confirmation of writing the data from only one replica. If the data was successfully written to only one replica and the server with this replica ceases to exist, the stored data will be lost. To enable getting confirmation of data writes from multiple replicas, use the `insert_quorum` option. By default, an INSERT query waits for confirmation of writing the data from only one replica. If the data was successfully written to only one replica and the server with this replica ceases to exist, the stored data will be lost. To enable getting confirmation of data writes from multiple replicas, use the `insert_quorum` option.
@ -155,7 +155,7 @@ CREATE TABLE table_name
</details> </details>
As the example shows, these parameters can contain substitutions in curly brackets. The substituted values are taken from the «[macros](../../../operations/server-configuration-parameters/settings/#macros) section of the configuration file. As the example shows, these parameters can contain substitutions in curly brackets. The substituted values are taken from the «[macros](../../../operations/server-configuration-parameters/settings/#macros) section of the configuration file.
Example: Example:
@ -198,7 +198,7 @@ In this case, you can omit arguments when creating tables:
``` sql ``` sql
CREATE TABLE table_name ( CREATE TABLE table_name (
x UInt32 x UInt32
) ENGINE = ReplicatedMergeTree ) ENGINE = ReplicatedMergeTree
ORDER BY x; ORDER BY x;
``` ```
@ -207,7 +207,7 @@ It is equivalent to:
``` sql ``` sql
CREATE TABLE table_name ( CREATE TABLE table_name (
x UInt32 x UInt32
) ENGINE = ReplicatedMergeTree('/clickhouse/tables/{shard}/{database}/table_name', '{replica}') ) ENGINE = ReplicatedMergeTree('/clickhouse/tables/{shard}/{database}/table_name', '{replica}')
ORDER BY x; ORDER BY x;
``` ```

View File

@ -16,7 +16,7 @@ $ curl 'http://localhost:8123/'
Ok. Ok.
``` ```
Web UI can be accessed here: `http://localhost:8123/play`. Web UI can be accessed here: `http://localhost:8123/play`.
![Web UI](../images/play.png) ![Web UI](../images/play.png)

View File

@ -43,7 +43,7 @@ toc_title: Integrations
- Monitoring - Monitoring
- [Graphite](https://graphiteapp.org) - [Graphite](https://graphiteapp.org)
- [graphouse](https://github.com/yandex/graphouse) - [graphouse](https://github.com/yandex/graphouse)
- [carbon-clickhouse](https://github.com/lomik/carbon-clickhouse) + - [carbon-clickhouse](https://github.com/lomik/carbon-clickhouse)
- [graphite-clickhouse](https://github.com/lomik/graphite-clickhouse) - [graphite-clickhouse](https://github.com/lomik/graphite-clickhouse)
- [graphite-ch-optimizer](https://github.com/innogames/graphite-ch-optimizer) - optimizes staled partitions in [\*GraphiteMergeTree](../../engines/table-engines/mergetree-family/graphitemergetree.md#graphitemergetree) if rules from [rollup configuration](../../engines/table-engines/mergetree-family/graphitemergetree.md#rollup-configuration) could be applied - [graphite-ch-optimizer](https://github.com/innogames/graphite-ch-optimizer) - optimizes staled partitions in [\*GraphiteMergeTree](../../engines/table-engines/mergetree-family/graphitemergetree.md#graphitemergetree) if rules from [rollup configuration](../../engines/table-engines/mergetree-family/graphitemergetree.md#rollup-configuration) could be applied
- [Grafana](https://grafana.com/) - [Grafana](https://grafana.com/)

View File

@ -115,6 +115,7 @@ toc_title: Adopters
| <a href="http://english.sina.com/index.html" class="favicon">Sina</a> | News | — | — | — | [Slides in Chinese, October 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup19/6.%20ClickHouse最佳实践%20高鹏_新浪.pdf) | | <a href="http://english.sina.com/index.html" class="favicon">Sina</a> | News | — | — | — | [Slides in Chinese, October 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup19/6.%20ClickHouse最佳实践%20高鹏_新浪.pdf) |
| <a href="https://smi2.ru/" class="favicon">SMI2</a> | News | Analytics | — | — | [Blog Post in Russian, November 2017](https://habr.com/ru/company/smi2/blog/314558/) | | <a href="https://smi2.ru/" class="favicon">SMI2</a> | News | Analytics | — | — | [Blog Post in Russian, November 2017](https://habr.com/ru/company/smi2/blog/314558/) |
| <a href="https://www.spark.co.nz/" class="favicon">Spark New Zealand</a> | Telecommunications | Security Operations | — | — | [Blog Post, Feb 2020](https://blog.n0p.me/2020/02/2020-02-05-dnsmonster/) | | <a href="https://www.spark.co.nz/" class="favicon">Spark New Zealand</a> | Telecommunications | Security Operations | — | — | [Blog Post, Feb 2020](https://blog.n0p.me/2020/02/2020-02-05-dnsmonster/) |
| <a href="https://splitbee.io" class="favicon">Splitbee</a> | Analytics | Main Product | — | — | [Blog Post, Mai 2021](https://splitbee.io/blog/new-pricing) |
| <a href="https://www.splunk.com/" class="favicon">Splunk</a> | Business Analytics | Main product | — | — | [Slides in English, January 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup12/splunk.pdf) | | <a href="https://www.splunk.com/" class="favicon">Splunk</a> | Business Analytics | Main product | — | — | [Slides in English, January 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup12/splunk.pdf) |
| <a href="https://www.spotify.com" class="favicon">Spotify</a> | Music | Experimentation | — | — | [Slides, July 2018](https://www.slideshare.net/glebus/using-clickhouse-for-experimentation-104247173) | | <a href="https://www.spotify.com" class="favicon">Spotify</a> | Music | Experimentation | — | — | [Slides, July 2018](https://www.slideshare.net/glebus/using-clickhouse-for-experimentation-104247173) |
| <a href="https://www.staffcop.ru/" class="favicon">Staffcop</a> | Information Security | Main Product | — | — | [Official website, Documentation](https://www.staffcop.ru/sce43) | | <a href="https://www.staffcop.ru/" class="favicon">Staffcop</a> | Information Security | Main Product | — | — | [Official website, Documentation](https://www.staffcop.ru/sce43) |

View File

@ -5,50 +5,67 @@ toc_title: Testing Hardware
# How to Test Your Hardware with ClickHouse {#how-to-test-your-hardware-with-clickhouse} # How to Test Your Hardware with ClickHouse {#how-to-test-your-hardware-with-clickhouse}
With this instruction you can run basic ClickHouse performance test on any server without installation of ClickHouse packages. You can run basic ClickHouse performance test on any server without installation of ClickHouse packages.
1. Go to “commits” page: https://github.com/ClickHouse/ClickHouse/commits/master
2. Click on the first green check mark or red cross with green “ClickHouse Build Check” and click on the “Details” link near “ClickHouse Build Check”. There is no such link in some commits, for example commits with documentation. In this case, choose the nearest commit having this link. ## Automated Run
3. Copy the link to `clickhouse` binary for amd64 or aarch64.
4. ssh to the server and download it with wget: You can run benchmark with a single script.
1. Download the script.
```
wget https://raw.githubusercontent.com/ClickHouse/ClickHouse/master/benchmark/hardware.sh
```
2. Run the script.
```
chmod a+x ./hardware.sh
./hardware.sh
```
3. Copy the output and send it to clickhouse-feedback@yandex-team.com
All the results are published here: https://clickhouse.tech/benchmark/hardware/
## Manual Run
Alternatively you can perform benchmark in the following steps.
1. ssh to the server and download the binary with wget:
```bash ```bash
# These links are outdated, please obtain the fresh link from the "commits" page.
# For amd64: # For amd64:
wget https://clickhouse-builds.s3.yandex.net/0/e29c4c3cc47ab2a6c4516486c1b77d57e7d42643/clickhouse_build_check/gcc-10_relwithdebuginfo_none_bundled_unsplitted_disable_False_binary/clickhouse wget https://builds.clickhouse.tech/master/amd64/clickhouse
# For aarch64: # For aarch64:
wget https://clickhouse-builds.s3.yandex.net/0/e29c4c3cc47ab2a6c4516486c1b77d57e7d42643/clickhouse_special_build_check/clang-10-aarch64_relwithdebuginfo_none_bundled_unsplitted_disable_False_binary/clickhouse wget https://builds.clickhouse.tech/master/aarch64/clickhouse
# Then do: # Then do:
chmod a+x clickhouse chmod a+x clickhouse
``` ```
5. Download benchmark files: 2. Download benchmark files:
```bash ```bash
wget https://raw.githubusercontent.com/ClickHouse/ClickHouse/master/benchmark/clickhouse/benchmark-new.sh wget https://raw.githubusercontent.com/ClickHouse/ClickHouse/master/benchmark/clickhouse/benchmark-new.sh
chmod a+x benchmark-new.sh chmod a+x benchmark-new.sh
wget https://raw.githubusercontent.com/ClickHouse/ClickHouse/master/benchmark/clickhouse/queries.sql wget https://raw.githubusercontent.com/ClickHouse/ClickHouse/master/benchmark/clickhouse/queries.sql
``` ```
6. Download test data according to the [Yandex.Metrica dataset](../getting-started/example-datasets/metrica.md) instruction (“hits” table containing 100 million rows). 3. Download test data according to the [Yandex.Metrica dataset](../getting-started/example-datasets/metrica.md) instruction (“hits” table containing 100 million rows).
```bash ```bash
wget https://datasets.clickhouse.tech/hits/partitions/hits_100m_obfuscated_v1.tar.xz wget https://datasets.clickhouse.tech/hits/partitions/hits_100m_obfuscated_v1.tar.xz
tar xvf hits_100m_obfuscated_v1.tar.xz -C . tar xvf hits_100m_obfuscated_v1.tar.xz -C .
mv hits_100m_obfuscated_v1/* . mv hits_100m_obfuscated_v1/* .
``` ```
7. Run the server: 4. Run the server:
```bash ```bash
./clickhouse server ./clickhouse server
``` ```
8. Check the data: ssh to the server in another terminal 5. Check the data: ssh to the server in another terminal
```bash ```bash
./clickhouse client --query "SELECT count() FROM hits_100m_obfuscated" ./clickhouse client --query "SELECT count() FROM hits_100m_obfuscated"
100000000 100000000
``` ```
9. Edit the benchmark-new.sh, change `clickhouse-client` to `./clickhouse client` and add `--max_memory_usage 100000000000` parameter. 6. Run the benchmark:
```bash
mcedit benchmark-new.sh
```
10. Run the benchmark:
```bash ```bash
./benchmark-new.sh hits_100m_obfuscated ./benchmark-new.sh hits_100m_obfuscated
``` ```
11. Send the numbers and the info about your hardware configuration to clickhouse-feedback@yandex-team.com 7. Send the numbers and the info about your hardware configuration to clickhouse-feedback@yandex-team.com
All the results are published here: https://clickhouse.tech/benchmark/hardware/ All the results are published here: https://clickhouse.tech/benchmark/hardware/

View File

@ -34,7 +34,7 @@ Configuration template:
<min_part_size>...</min_part_size> <min_part_size>...</min_part_size>
<min_part_size_ratio>...</min_part_size_ratio> <min_part_size_ratio>...</min_part_size_ratio>
<method>...</method> <method>...</method>
<level>...</level> <level>...</level>
</case> </case>
... ...
</compression> </compression>
@ -64,11 +64,33 @@ If no conditions met for a data part, ClickHouse uses the `lz4` compression.
<min_part_size>10000000000</min_part_size> <min_part_size>10000000000</min_part_size>
<min_part_size_ratio>0.01</min_part_size_ratio> <min_part_size_ratio>0.01</min_part_size_ratio>
<method>zstd</method> <method>zstd</method>
<level>1</level> <level>1</level>
</case> </case>
</compression> </compression>
``` ```
## encryption {#server-settings-encryption}
Configures a command to obtain a key to be used by [encryption codecs](../../sql-reference/statements/create/table.md#create-query-encryption-codecs). The command, or a shell script, is expected to write a Base64-encoded key of any length to the stdout.
**Example**
For Linux with systemd:
```xml
<encryption>
<key_command>/usr/bin/systemd-ask-password --id="clickhouse-server" --timeout=0 "Enter the ClickHouse encryption passphrase:" | base64</key_command>
</encryption>
```
For other systems:
```xml
<encryption>
<key_command><![CDATA[IFS=; echo -n >/dev/tty "Enter the ClickHouse encryption passphrase: "; stty=`stty -F /dev/tty -g`; stty -F /dev/tty -echo; read k </dev/tty; stty -F /dev/tty "$stty"; echo -n $k | base64]]></key_command>
</encryption>
```
## custom_settings_prefixes {#custom_settings_prefixes} ## custom_settings_prefixes {#custom_settings_prefixes}
List of prefixes for [custom settings](../../operations/settings/index.md#custom_settings). The prefixes must be separated with commas. List of prefixes for [custom settings](../../operations/settings/index.md#custom_settings). The prefixes must be separated with commas.
@ -101,7 +123,7 @@ Default value: `1073741824` (1 GB).
```xml ```xml
<core_dump> <core_dump>
<size_limit>1073741824</size_limit> <size_limit>1073741824</size_limit>
</core_dump> </core_dump>
``` ```
## database_atomic_delay_before_drop_table_sec {#database_atomic_delay_before_drop_table_sec} ## database_atomic_delay_before_drop_table_sec {#database_atomic_delay_before_drop_table_sec}
@ -442,8 +464,8 @@ The server will need access to the public Internet via IPv4 (at the time of writ
Keys: Keys:
- `enabled` Boolean flag to enable the feature, `false` by default. Set to `true` to allow sending crash reports. - `enabled` Boolean flag to enable the feature, `false` by default. Set to `true` to allow sending crash reports.
- `endpoint` You can override the Sentry endpoint URL for sending crash reports. It can be either a separate Sentry account or your self-hosted Sentry instance. Use the [Sentry DSN](https://docs.sentry.io/error-reporting/quickstart/?platform=native#configure-the-sdk) syntax. - `endpoint` You can override the Sentry endpoint URL for sending crash reports. It can be either a separate Sentry account or your self-hosted Sentry instance. Use the [Sentry DSN](https://docs.sentry.io/error-reporting/quickstart/?platform=native#configure-the-sdk) syntax.
- `anonymize` - Avoid attaching the server hostname to the crash report. - `anonymize` - Avoid attaching the server hostname to the crash report.
- `http_proxy` - Configure HTTP proxy for sending crash reports. - `http_proxy` - Configure HTTP proxy for sending crash reports.
- `debug` - Sets the Sentry client into debug mode. - `debug` - Sets the Sentry client into debug mode.
@ -505,7 +527,7 @@ The default `max_server_memory_usage` value is calculated as `memory_amount * ma
## max_server_memory_usage_to_ram_ratio {#max_server_memory_usage_to_ram_ratio} ## max_server_memory_usage_to_ram_ratio {#max_server_memory_usage_to_ram_ratio}
Defines the fraction of total physical RAM amount, available to the ClickHouse server. If the server tries to utilize more, the memory is cut down to the appropriate amount. Defines the fraction of total physical RAM amount, available to the ClickHouse server. If the server tries to utilize more, the memory is cut down to the appropriate amount.
Possible values: Possible values:
@ -883,7 +905,7 @@ Parameters:
- `flush_interval_milliseconds` — Interval for flushing data from the buffer in memory to the table. - `flush_interval_milliseconds` — Interval for flushing data from the buffer in memory to the table.
**Example** **Example**
```xml ```xml
<yandex> <yandex>
<text_log> <text_log>
<level>notice</level> <level>notice</level>

View File

@ -31,7 +31,7 @@ Settings that can only be made in the server config file are not covered in this
## Custom Settings {#custom_settings} ## Custom Settings {#custom_settings}
In addition to the common [settings](../../operations/settings/settings.md), users can define custom settings. In addition to the common [settings](../../operations/settings/settings.md), users can define custom settings.
A custom setting name must begin with one of predefined prefixes. The list of these prefixes must be declared in the [custom_settings_prefixes](../../operations/server-configuration-parameters/settings.md#custom_settings_prefixes) parameter in the server configuration file. A custom setting name must begin with one of predefined prefixes. The list of these prefixes must be declared in the [custom_settings_prefixes](../../operations/server-configuration-parameters/settings.md#custom_settings_prefixes) parameter in the server configuration file.
@ -48,7 +48,7 @@ SET custom_a = 123;
To get the current value of a custom setting use `getSetting()` function: To get the current value of a custom setting use `getSetting()` function:
```sql ```sql
SELECT getSetting('custom_a'); SELECT getSetting('custom_a');
``` ```
**See Also** **See Also**

View File

@ -280,14 +280,13 @@ Default value: `0`.
## check_sample_column_is_correct {#check_sample_column_is_correct} ## check_sample_column_is_correct {#check_sample_column_is_correct}
Enables to check column for sampling or sampling expression is correct at table creation. Enables the check at table creation, that the data type of a column for sampling or sampling expression is correct. The data type must be one of unsigned [integer types](../../sql-reference/data-types/int-uint.md): `UInt8`, `UInt16`, `UInt32`, `UInt64`.
Possible values: Possible values:
- true — Check column or sampling expression is correct at table creation. - true — The check is enabled.
- false — Do not check column or sampling expression is correct at table creation. - false — The check is disabled at table creation.
Default value: `true`. Default value: `true`.
By default, the ClickHouse server check column for sampling or sampling expression at table creation. If you already had tables with incorrect sampling expression, set value `false` to make ClickHouse server do not raise exception when ClickHouse server is starting. By default, the ClickHouse server checks at table creation the data type of a column for sampling or sampling expression. If you already have tables with incorrect sampling expression and do not want the server to raise an exception during startup, set `check_sample_column_is_correct` to `false`.
[Original article](https://clickhouse.tech/docs/en/operations/settings/merge_tree_settings/) <!--hide-->

View File

@ -65,20 +65,20 @@ What to do when the volume of data read exceeds one of the limits: throw o
The following restrictions can be checked on each block (instead of on each row). That is, the restrictions can be broken a little. The following restrictions can be checked on each block (instead of on each row). That is, the restrictions can be broken a little.
A maximum number of rows that can be read from a local table on a leaf node when running a distributed query. While A maximum number of rows that can be read from a local table on a leaf node when running a distributed query. While
distributed queries can issue a multiple sub-queries to each shard (leaf) - this limit will be checked only on the read distributed queries can issue a multiple sub-queries to each shard (leaf) - this limit will be checked only on the read
stage on the leaf nodes and ignored on results merging stage on the root node. For example, cluster consists of 2 shards stage on the leaf nodes and ignored on results merging stage on the root node. For example, cluster consists of 2 shards
and each shard contains a table with 100 rows. Then distributed query which suppose to read all the data from both and each shard contains a table with 100 rows. Then distributed query which suppose to read all the data from both
tables with setting `max_rows_to_read=150` will fail as in total it will be 200 rows. While query tables with setting `max_rows_to_read=150` will fail as in total it will be 200 rows. While query
with `max_rows_to_read_leaf=150` will succeed since leaf nodes will read 100 rows at max. with `max_rows_to_read_leaf=150` will succeed since leaf nodes will read 100 rows at max.
## max_bytes_to_read_leaf {#max-bytes-to-read-leaf} ## max_bytes_to_read_leaf {#max-bytes-to-read-leaf}
A maximum number of bytes (uncompressed data) that can be read from a local table on a leaf node when running A maximum number of bytes (uncompressed data) that can be read from a local table on a leaf node when running
a distributed query. While distributed queries can issue a multiple sub-queries to each shard (leaf) - this limit will a distributed query. While distributed queries can issue a multiple sub-queries to each shard (leaf) - this limit will
be checked only on the read stage on the leaf nodes and ignored on results merging stage on the root node. be checked only on the read stage on the leaf nodes and ignored on results merging stage on the root node.
For example, cluster consists of 2 shards and each shard contains a table with 100 bytes of data. For example, cluster consists of 2 shards and each shard contains a table with 100 bytes of data.
Then distributed query which suppose to read all the data from both tables with setting `max_bytes_to_read=150` will fail Then distributed query which suppose to read all the data from both tables with setting `max_bytes_to_read=150` will fail
as in total it will be 200 bytes. While query with `max_bytes_to_read_leaf=150` will succeed since leaf nodes will read as in total it will be 200 bytes. While query with `max_bytes_to_read_leaf=150` will succeed since leaf nodes will read
100 bytes at max. 100 bytes at max.
## read_overflow_mode_leaf {#read-overflow-mode-leaf} ## read_overflow_mode_leaf {#read-overflow-mode-leaf}

View File

@ -28,7 +28,7 @@ Structure of the `users` section:
<profile>profile_name</profile> <profile>profile_name</profile>
<quota>default</quota> <quota>default</quota>
<default_database>default<default_database>
<databases> <databases>
<database_name> <database_name>
<table_name> <table_name>

View File

@ -20,6 +20,29 @@ Possible values:
- `global` — Replaces the `IN`/`JOIN` query with `GLOBAL IN`/`GLOBAL JOIN.` - `global` — Replaces the `IN`/`JOIN` query with `GLOBAL IN`/`GLOBAL JOIN.`
- `allow` — Allows the use of these types of subqueries. - `allow` — Allows the use of these types of subqueries.
## prefer_global_in_and_join {#prefer-global-in-and-join}
Enables the replacement of `IN`/`JOIN` operators with `GLOBAL IN`/`GLOBAL JOIN`.
Possible values:
- 0 — Disabled. `IN`/`JOIN` operators are not replaced with `GLOBAL IN`/`GLOBAL JOIN`.
- 1 — Enabled. `IN`/`JOIN` operators are replaced with `GLOBAL IN`/`GLOBAL JOIN`.
Default value: `0`.
**Usage**
Although `SET distributed_product_mode=global` can change the queries behavior for the distributed tables, it's not suitable for local tables or tables from external resources. Here is when the `prefer_global_in_and_join` setting comes into play.
For example, we have query serving nodes that contain local tables, which are not suitable for distribution. We need to scatter their data on the fly during distributed processing with the `GLOBAL` keyword — `GLOBAL IN`/`GLOBAL JOIN`.
Another use case of `prefer_global_in_and_join` is accessing tables created by external engines. This setting helps to reduce the number of calls to external sources while joining such tables: only one call per query.
**See also:**
- [Distributed subqueries](../../sql-reference/operators/in.md#select-distributed-subqueries) for more information on how to use `GLOBAL IN`/`GLOBAL JOIN`
## enable_optimize_predicate_expression {#enable-optimize-predicate-expression} ## enable_optimize_predicate_expression {#enable-optimize-predicate-expression}
Turns on predicate pushdown in `SELECT` queries. Turns on predicate pushdown in `SELECT` queries.
@ -542,7 +565,7 @@ Possible values:
Default value: `hash`. Default value: `hash`.
When using `hash` algorithm the right part of `JOIN` is uploaded into RAM. When using `hash` algorithm the right part of `JOIN` is uploaded into RAM.
When using `partial_merge` algorithm ClickHouse sorts the data and dumps it to the disk. The `merge` algorithm in ClickHouse differs a bit from the classic realization. First ClickHouse sorts the right table by [join key](../../sql-reference/statements/select/join.md#select-join) in blocks and creates min-max index for sorted blocks. Then it sorts parts of left table by `join key` and joins them over right table. The min-max index is also used to skip unneeded right table blocks. When using `partial_merge` algorithm ClickHouse sorts the data and dumps it to the disk. The `merge` algorithm in ClickHouse differs a bit from the classic realization. First ClickHouse sorts the right table by [join key](../../sql-reference/statements/select/join.md#select-join) in blocks and creates min-max index for sorted blocks. Then it sorts parts of left table by `join key` and joins them over right table. The min-max index is also used to skip unneeded right table blocks.
@ -1251,7 +1274,7 @@ Default value: `3`.
## output_format_json_quote_64bit_integers {#session_settings-output_format_json_quote_64bit_integers} ## output_format_json_quote_64bit_integers {#session_settings-output_format_json_quote_64bit_integers}
Controls quoting of 64-bit or bigger [integers](../../sql-reference/data-types/int-uint.md) (like `UInt64` or `Int128`) when they are output in a [JSON](../../interfaces/formats.md#json) format. Controls quoting of 64-bit or bigger [integers](../../sql-reference/data-types/int-uint.md) (like `UInt64` or `Int128`) when they are output in a [JSON](../../interfaces/formats.md#json) format.
Such integers are enclosed in quotes by default. This behavior is compatible with most JavaScript implementations. Such integers are enclosed in quotes by default. This behavior is compatible with most JavaScript implementations.
Possible values: Possible values:
@ -2927,7 +2950,7 @@ Result:
└─────────────┘ └─────────────┘
``` ```
Note that this setting influences [Materialized view](../../sql-reference/statements/create/view.md#materialized) and [MaterializeMySQL](../../engines/database-engines/materialize-mysql.md) behaviour. Note that this setting influences [Materialized view](../../sql-reference/statements/create/view.md#materialized) and [MaterializedMySQL](../../engines/database-engines/materialized-mysql.md) behaviour.
## engine_file_empty_if_not_exists {#engine-file-empty_if-not-exists} ## engine_file_empty_if_not_exists {#engine-file-empty_if-not-exists}
@ -3209,7 +3232,7 @@ Default value: `300`.
## distributed_ddl_task_timeout {#distributed_ddl_task_timeout} ## distributed_ddl_task_timeout {#distributed_ddl_task_timeout}
Sets timeout for DDL query responses from all hosts in cluster. If a DDL request has not been performed on all hosts, a response will contain a timeout error and a request will be executed in an async mode. Negative value means infinite. Sets timeout for DDL query responses from all hosts in cluster. If a DDL request has not been performed on all hosts, a response will contain a timeout error and a request will be executed in an async mode. Negative value means infinite.
Possible values: Possible values:

View File

@ -33,7 +33,7 @@ SELECT * FROM system.asynchronous_metric_log LIMIT 10
**See Also** **See Also**
- [system.asynchronous_metrics](../system-tables/asynchronous_metrics.md) — Contains metrics, calculated periodically in the background. - [system.asynchronous_metrics](../system-tables/asynchronous_metrics.md) — Contains metrics, calculated periodically in the background.
- [system.metric_log](../system-tables/metric_log.md) — Contains history of metrics values from tables `system.metrics` and `system.events`, periodically flushed to disk. - [system.metric_log](../system-tables/metric_log.md) — Contains history of metrics values from tables `system.metrics` and `system.events`, periodically flushed to disk.
[Original article](https://clickhouse.tech/docs/en/operations/system-tables/asynchronous_metric_log) <!--hide--> [Original article](https://clickhouse.tech/docs/en/operations/system-tables/asynchronous_metric_log) <!--hide-->

View File

@ -4,7 +4,7 @@ Contains information about columns in all the tables.
You can use this table to get information similar to the [DESCRIBE TABLE](../../sql-reference/statements/misc.md#misc-describe-table) query, but for multiple tables at once. You can use this table to get information similar to the [DESCRIBE TABLE](../../sql-reference/statements/misc.md#misc-describe-table) query, but for multiple tables at once.
Columns from [temporary tables](../../sql-reference/statements/create/table.md#temporary-tables) are visible in the `system.columns` only in those session where they have been created. They are shown with the empty `database` field. Columns from [temporary tables](../../sql-reference/statements/create/table.md#temporary-tables) are visible in the `system.columns` only in those session where they have been created. They are shown with the empty `database` field.
Columns: Columns:
@ -38,17 +38,17 @@ database: system
table: aggregate_function_combinators table: aggregate_function_combinators
name: name name: name
type: String type: String
default_kind: default_kind:
default_expression: default_expression:
data_compressed_bytes: 0 data_compressed_bytes: 0
data_uncompressed_bytes: 0 data_uncompressed_bytes: 0
marks_bytes: 0 marks_bytes: 0
comment: comment:
is_in_partition_key: 0 is_in_partition_key: 0
is_in_sorting_key: 0 is_in_sorting_key: 0
is_in_primary_key: 0 is_in_primary_key: 0
is_in_sampling_key: 0 is_in_sampling_key: 0
compression_codec: compression_codec:
Row 2: Row 2:
────── ──────
@ -56,17 +56,17 @@ database: system
table: aggregate_function_combinators table: aggregate_function_combinators
name: is_internal name: is_internal
type: UInt8 type: UInt8
default_kind: default_kind:
default_expression: default_expression:
data_compressed_bytes: 0 data_compressed_bytes: 0
data_uncompressed_bytes: 0 data_uncompressed_bytes: 0
marks_bytes: 0 marks_bytes: 0
comment: comment:
is_in_partition_key: 0 is_in_partition_key: 0
is_in_sorting_key: 0 is_in_sorting_key: 0
is_in_primary_key: 0 is_in_primary_key: 0
is_in_sampling_key: 0 is_in_sampling_key: 0
compression_codec: compression_codec:
``` ```
The `system.columns` table contains the following columns (the column type is shown in brackets): The `system.columns` table contains the following columns (the column type is shown in brackets):

View File

@ -21,7 +21,7 @@ Columns:
│ default │ /var/lib/clickhouse/ │ 276392587264 │ 490652508160 │ 0 │ │ default │ /var/lib/clickhouse/ │ 276392587264 │ 490652508160 │ 0 │
└─────────┴──────────────────────┴──────────────┴──────────────┴─────────────────┘ └─────────┴──────────────────────┴──────────────┴──────────────┴─────────────────┘
1 rows in set. Elapsed: 0.001 sec. 1 rows in set. Elapsed: 0.001 sec.
``` ```
[Original article](https://clickhouse.tech/docs/en/operations/system-tables/disks) <!--hide--> [Original article](https://clickhouse.tech/docs/en/operations/system-tables/disks) <!--hide-->

View File

@ -62,4 +62,3 @@ exception_code: ZOK
``` ```
[Original article](https://clickhouse.tech/docs/en/operations/system_tables/distributed_ddl_queuedistributed_ddl_queue.md) <!--hide--> [Original article](https://clickhouse.tech/docs/en/operations/system_tables/distributed_ddl_queuedistributed_ddl_queue.md) <!--hide-->

View File

@ -40,7 +40,7 @@ is_blocked: 1
error_count: 0 error_count: 0
data_files: 1 data_files: 1
data_compressed_bytes: 499 data_compressed_bytes: 499
last_exception: last_exception:
``` ```
**See Also** **See Also**

View File

@ -1,6 +1,6 @@
# system.enabled_roles {#system_tables-enabled_roles} # system.enabled_roles {#system_tables-enabled_roles}
Contains all active roles at the moment, including current role of the current user and granted roles for current role. Contains all active roles at the moment, including current role of the current user and granted roles for current role.
Columns: Columns:

View File

@ -27,7 +27,7 @@ Columns:
│ JSONExtractInt │ 0 │ 0 │ │ │ JSONExtractInt │ 0 │ 0 │ │
└──────────────────────────┴──────────────┴──────────────────┴──────────┘ └──────────────────────────┴──────────────┴──────────────────┴──────────┘
10 rows in set. Elapsed: 0.002 sec. 10 rows in set. Elapsed: 0.002 sec.
``` ```
[Original article](https://clickhouse.tech/docs/en/operations/system-tables/functions) <!--hide--> [Original article](https://clickhouse.tech/docs/en/operations/system-tables/functions) <!--hide-->

View File

@ -1,11 +1,11 @@
# system.licenses {#system-tables_system.licenses} # system.licenses {#system-tables_system.licenses}
Сontains licenses of third-party libraries that are located in the [contrib](https://github.com/ClickHouse/ClickHouse/tree/master/contrib) directory of ClickHouse sources. Сontains licenses of third-party libraries that are located in the [contrib](https://github.com/ClickHouse/ClickHouse/tree/master/contrib) directory of ClickHouse sources.
Columns: Columns:
- `library_name` ([String](../../sql-reference/data-types/string.md)) — Name of the library, which is license connected with. - `library_name` ([String](../../sql-reference/data-types/string.md)) — Name of the library, which is license connected with.
- `license_type` ([String](../../sql-reference/data-types/string.md)) — License type — e.g. Apache, MIT. - `license_type` ([String](../../sql-reference/data-types/string.md)) — License type — e.g. Apache, MIT.
- `license_path` ([String](../../sql-reference/data-types/string.md)) — Path to the file with the license text. - `license_path` ([String](../../sql-reference/data-types/string.md)) — Path to the file with the license text.
- `license_text` ([String](../../sql-reference/data-types/string.md)) — License text. - `license_text` ([String](../../sql-reference/data-types/string.md)) — License text.

View File

@ -48,7 +48,7 @@ changed: 0
description: How many rows in blocks should be formed for merge operations. description: How many rows in blocks should be formed for merge operations.
type: SettingUInt64 type: SettingUInt64
4 rows in set. Elapsed: 0.001 sec. 4 rows in set. Elapsed: 0.001 sec.
``` ```
[Original article](https://clickhouse.tech/docs/en/operations/system-tables/merge_tree_settings) <!--hide--> [Original article](https://clickhouse.tech/docs/en/operations/system-tables/merge_tree_settings) <!--hide-->

View File

@ -1,6 +1,6 @@
# system.mutations {#system_tables-mutations} # system.mutations {#system_tables-mutations}
The table contains information about [mutations](../../sql-reference/statements/alter/index.md#mutations) of [MergeTree](../../engines/table-engines/mergetree-family/mergetree.md) tables and their progress. Each mutation command is represented by a single row. The table contains information about [mutations](../../sql-reference/statements/alter/index.md#mutations) of [MergeTree](../../engines/table-engines/mergetree-family/mergetree.md) tables and their progress. Each mutation command is represented by a single row.
Columns: Columns:
@ -16,17 +16,17 @@ Columns:
- `block_numbers.partition_id` ([Array](../../sql-reference/data-types/array.md)([String](../../sql-reference/data-types/string.md))) — For mutations of replicated tables, the array contains the partitions' IDs (one record for each partition). For mutations of non-replicated tables the array is empty. - `block_numbers.partition_id` ([Array](../../sql-reference/data-types/array.md)([String](../../sql-reference/data-types/string.md))) — For mutations of replicated tables, the array contains the partitions' IDs (one record for each partition). For mutations of non-replicated tables the array is empty.
- `block_numbers.number` ([Array](../../sql-reference/data-types/array.md)([Int64](../../sql-reference/data-types/int-uint.md))) — For mutations of replicated tables, the array contains one record for each partition, with the block number that was acquired by the mutation. Only parts that contain blocks with numbers less than this number will be mutated in the partition. - `block_numbers.number` ([Array](../../sql-reference/data-types/array.md)([Int64](../../sql-reference/data-types/int-uint.md))) — For mutations of replicated tables, the array contains one record for each partition, with the block number that was acquired by the mutation. Only parts that contain blocks with numbers less than this number will be mutated in the partition.
In non-replicated tables, block numbers in all partitions form a single sequence. This means that for mutations of non-replicated tables, the column will contain one record with a single block number acquired by the mutation. In non-replicated tables, block numbers in all partitions form a single sequence. This means that for mutations of non-replicated tables, the column will contain one record with a single block number acquired by the mutation.
- `parts_to_do_names` ([Array](../../sql-reference/data-types/array.md)([String](../../sql-reference/data-types/string.md))) — An array of names of data parts that need to be mutated for the mutation to complete. - `parts_to_do_names` ([Array](../../sql-reference/data-types/array.md)([String](../../sql-reference/data-types/string.md))) — An array of names of data parts that need to be mutated for the mutation to complete.
- `parts_to_do` ([Int64](../../sql-reference/data-types/int-uint.md)) — The number of data parts that need to be mutated for the mutation to complete. - `parts_to_do` ([Int64](../../sql-reference/data-types/int-uint.md)) — The number of data parts that need to be mutated for the mutation to complete.
- `is_done` ([UInt8](../../sql-reference/data-types/int-uint.md)) — The flag whether the mutation is done or not. Possible values: - `is_done` ([UInt8](../../sql-reference/data-types/int-uint.md)) — The flag whether the mutation is done or not. Possible values:
- `1` if the mutation is completed, - `1` if the mutation is completed,
- `0` if the mutation is still in process. - `0` if the mutation is still in process.
!!! info "Note" !!! info "Note"
Even if `parts_to_do = 0` it is possible that a mutation of a replicated table is not completed yet because of a long-running `INSERT` query, that will create a new data part needed to be mutated. Even if `parts_to_do = 0` it is possible that a mutation of a replicated table is not completed yet because of a long-running `INSERT` query, that will create a new data part needed to be mutated.

View File

@ -26,7 +26,7 @@ Reads from this table are not parallelized.
│ 9 │ │ 9 │
└────────┘ └────────┘
10 rows in set. Elapsed: 0.001 sec. 10 rows in set. Elapsed: 0.001 sec.
``` ```
[Original article](https://clickhouse.tech/docs/en/operations/system-tables/numbers) <!--hide--> [Original article](https://clickhouse.tech/docs/en/operations/system-tables/numbers) <!--hide-->

View File

@ -24,7 +24,7 @@ Used for tests.
│ 9 │ │ 9 │
└────────┘ └────────┘
10 rows in set. Elapsed: 0.001 sec. 10 rows in set. Elapsed: 0.001 sec.
``` ```
[Original article](https://clickhouse.tech/docs/en/operations/system-tables/numbers_mt) <!--hide--> [Original article](https://clickhouse.tech/docs/en/operations/system-tables/numbers_mt) <!--hide-->

View File

@ -17,7 +17,7 @@ This is similar to the `DUAL` table found in other DBMSs.
│ 0 │ │ 0 │
└───────┘ └───────┘
1 rows in set. Elapsed: 0.001 sec. 1 rows in set. Elapsed: 0.001 sec.
``` ```
[Original article](https://clickhouse.tech/docs/en/operations/system-tables/one) <!--hide--> [Original article](https://clickhouse.tech/docs/en/operations/system-tables/one) <!--hide-->

View File

@ -63,7 +63,7 @@ read_rows: 0
read_bytes: 0 read_bytes: 0
peak_memory_usage: 0 peak_memory_usage: 0
error: 0 error: 0
exception: exception:
``` ```
[Original article](https://clickhouse.tech/docs/en/operations/system-tables/part_log) <!--hide--> [Original article](https://clickhouse.tech/docs/en/operations/system-tables/part_log) <!--hide-->

View File

@ -19,10 +19,10 @@ Columns:
Possible Values: Possible Values:
- `Wide` — Each column is stored in a separate file in a filesystem. - `Wide` — Each column is stored in a separate file in a filesystem.
- `Compact` — All columns are stored in one file in a filesystem. - `Compact` — All columns are stored in one file in a filesystem.
Data storing format is controlled by the `min_bytes_for_wide_part` and `min_rows_for_wide_part` settings of the [MergeTree](../../engines/table-engines/mergetree-family/mergetree.md) table. Data storing format is controlled by the `min_bytes_for_wide_part` and `min_rows_for_wide_part` settings of the [MergeTree](../../engines/table-engines/mergetree-family/mergetree.md) table.
- `active` ([UInt8](../../sql-reference/data-types/int-uint.md)) Flag that indicates whether the data part is active. If a data part is active, its used in a table. Otherwise, its deleted. Inactive data parts remain after merging. - `active` ([UInt8](../../sql-reference/data-types/int-uint.md)) Flag that indicates whether the data part is active. If a data part is active, its used in a table. Otherwise, its deleted. Inactive data parts remain after merging.
@ -88,7 +88,7 @@ Columns:
- `delete_ttl_info_max` ([DateTime](../../sql-reference/data-types/datetime.md)) — The maximum value of the date and time key for [TTL DELETE rule](../../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-ttl). - `delete_ttl_info_max` ([DateTime](../../sql-reference/data-types/datetime.md)) — The maximum value of the date and time key for [TTL DELETE rule](../../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-ttl).
- `move_ttl_info.expression` ([Array](../../sql-reference/data-types/array.md)([String](../../sql-reference/data-types/string.md))) — Array of expressions. Each expression defines a [TTL MOVE rule](../../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-ttl). - `move_ttl_info.expression` ([Array](../../sql-reference/data-types/array.md)([String](../../sql-reference/data-types/string.md))) — Array of expressions. Each expression defines a [TTL MOVE rule](../../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-ttl).
!!! note "Warning" !!! note "Warning"
The `move_ttl_info.expression` array is kept mostly for backward compatibility, now the simpliest way to check `TTL MOVE` rule is to use the `move_ttl_info.min` and `move_ttl_info.max` fields. The `move_ttl_info.expression` array is kept mostly for backward compatibility, now the simpliest way to check `TTL MOVE` rule is to use the `move_ttl_info.min` and `move_ttl_info.max` fields.

View File

@ -19,10 +19,10 @@ Columns:
Possible values: Possible values:
- `Wide` — Each column is stored in a separate file in a filesystem. - `Wide` — Each column is stored in a separate file in a filesystem.
- `Compact` — All columns are stored in one file in a filesystem. - `Compact` — All columns are stored in one file in a filesystem.
Data storing format is controlled by the `min_bytes_for_wide_part` and `min_rows_for_wide_part` settings of the [MergeTree](../../engines/table-engines/mergetree-family/mergetree.md) table. Data storing format is controlled by the `min_bytes_for_wide_part` and `min_rows_for_wide_part` settings of the [MergeTree](../../engines/table-engines/mergetree-family/mergetree.md) table.
- `active` ([UInt8](../../sql-reference/data-types/int-uint.md)) — Flag that indicates whether the data part is active. If a data part is active, its used in a table. Otherwise, its deleted. Inactive data parts remain after merging. - `active` ([UInt8](../../sql-reference/data-types/int-uint.md)) — Flag that indicates whether the data part is active. If a data part is active, its used in a table. Otherwise, its deleted. Inactive data parts remain after merging.

View File

@ -51,6 +51,7 @@ Columns:
- `databases` ([Array](../../sql-reference/data-types/array.md)([LowCardinality(String)](../../sql-reference/data-types/lowcardinality.md))) — Names of the databases present in the query. - `databases` ([Array](../../sql-reference/data-types/array.md)([LowCardinality(String)](../../sql-reference/data-types/lowcardinality.md))) — Names of the databases present in the query.
- `tables` ([Array](../../sql-reference/data-types/array.md)([LowCardinality(String)](../../sql-reference/data-types/lowcardinality.md))) — Names of the tables present in the query. - `tables` ([Array](../../sql-reference/data-types/array.md)([LowCardinality(String)](../../sql-reference/data-types/lowcardinality.md))) — Names of the tables present in the query.
- `columns` ([Array](../../sql-reference/data-types/array.md)([LowCardinality(String)](../../sql-reference/data-types/lowcardinality.md))) — Names of the columns present in the query. - `columns` ([Array](../../sql-reference/data-types/array.md)([LowCardinality(String)](../../sql-reference/data-types/lowcardinality.md))) — Names of the columns present in the query.
- `projections` ([String](../../sql-reference/data-types/string.md)) — Names of the projections used during the query execution.
- `exception_code` ([Int32](../../sql-reference/data-types/int-uint.md)) — Code of an exception. - `exception_code` ([Int32](../../sql-reference/data-types/int-uint.md)) — Code of an exception.
- `exception` ([String](../../sql-reference/data-types/string.md)) — Exception message. - `exception` ([String](../../sql-reference/data-types/string.md)) — Exception message.
- `stack_trace` ([String](../../sql-reference/data-types/string.md)) — [Stack trace](https://en.wikipedia.org/wiki/Stack_trace). An empty string, if the query was completed successfully. - `stack_trace` ([String](../../sql-reference/data-types/string.md)) — [Stack trace](https://en.wikipedia.org/wiki/Stack_trace). An empty string, if the query was completed successfully.
@ -65,6 +66,8 @@ Columns:
- `initial_query_id` ([String](../../sql-reference/data-types/string.md)) — ID of the initial query (for distributed query execution). - `initial_query_id` ([String](../../sql-reference/data-types/string.md)) — ID of the initial query (for distributed query execution).
- `initial_address` ([IPv6](../../sql-reference/data-types/domains/ipv6.md)) — IP address that the parent query was launched from. - `initial_address` ([IPv6](../../sql-reference/data-types/domains/ipv6.md)) — IP address that the parent query was launched from.
- `initial_port` ([UInt16](../../sql-reference/data-types/int-uint.md)) — The client port that was used to make the parent query. - `initial_port` ([UInt16](../../sql-reference/data-types/int-uint.md)) — The client port that was used to make the parent query.
- `initial_query_start_time` ([DateTime](../../sql-reference/data-types/datetime.md)) — Initial query starting time (for distributed query execution).
- `initial_query_start_time_microseconds` ([DateTime64](../../sql-reference/data-types/datetime64.md)) — Initial query starting time with microseconds precision (for distributed query execution).
- `interface` ([UInt8](../../sql-reference/data-types/int-uint.md)) — Interface that the query was initiated from. Possible values: - `interface` ([UInt8](../../sql-reference/data-types/int-uint.md)) — Interface that the query was initiated from. Possible values:
- 1 — TCP. - 1 — TCP.
- 2 — HTTP. - 2 — HTTP.
@ -101,55 +104,77 @@ Columns:
**Example** **Example**
``` sql ``` sql
SELECT * FROM system.query_log WHERE type = 'QueryFinish' AND (query LIKE '%toDate(\'2000-12-05\')%') ORDER BY query_start_time DESC LIMIT 1 FORMAT Vertical; SELECT * FROM system.query_log WHERE type = 'QueryFinish' ORDER BY query_start_time DESC LIMIT 1 FORMAT Vertical;
``` ```
``` text ``` text
Row 1: Row 1:
────── ──────
type: QueryStart type: QueryFinish
event_date: 2020-09-11 event_date: 2021-07-28
event_time: 2020-09-11 10:08:17 event_time: 2021-07-28 13:46:56
event_time_microseconds: 2020-09-11 10:08:17.063321 event_time_microseconds: 2021-07-28 13:46:56.719791
query_start_time: 2020-09-11 10:08:17 query_start_time: 2021-07-28 13:46:56
query_start_time_microseconds: 2020-09-11 10:08:17.063321 query_start_time_microseconds: 2021-07-28 13:46:56.704542
query_duration_ms: 0 query_duration_ms: 14
read_rows: 0 read_rows: 8393
read_bytes: 0 read_bytes: 374325
written_rows: 0 written_rows: 0
written_bytes: 0 written_bytes: 0
result_rows: 0 result_rows: 4201
result_bytes: 0 result_bytes: 153024
memory_usage: 0 memory_usage: 4714038
current_database: default current_database: default
query: INSERT INTO test1 VALUES query: SELECT DISTINCT arrayJoin(extractAll(name, '[\\w_]{2,}')) AS res FROM (SELECT name FROM system.functions UNION ALL SELECT name FROM system.table_engines UNION ALL SELECT name FROM system.formats UNION ALL SELECT name FROM system.table_functions UNION ALL SELECT name FROM system.data_type_families UNION ALL SELECT name FROM system.merge_tree_settings UNION ALL SELECT name FROM system.settings UNION ALL SELECT cluster FROM system.clusters UNION ALL SELECT macro FROM system.macros UNION ALL SELECT policy_name FROM system.storage_policies UNION ALL SELECT concat(func.name, comb.name) FROM system.functions AS func CROSS JOIN system.aggregate_function_combinators AS comb WHERE is_aggregate UNION ALL SELECT name FROM system.databases LIMIT 10000 UNION ALL SELECT DISTINCT name FROM system.tables LIMIT 10000 UNION ALL SELECT DISTINCT name FROM system.dictionaries LIMIT 10000 UNION ALL SELECT DISTINCT name FROM system.columns LIMIT 10000) WHERE notEmpty(res)
exception_code: 0 normalized_query_hash: 6666026786019643712
query_kind: Select
databases: ['system']
tables: ['system.aggregate_function_combinators','system.clusters','system.columns','system.data_type_families','system.databases','system.dictionaries','system.formats','system.functions','system.macros','system.merge_tree_settings','system.settings','system.storage_policies','system.table_engines','system.table_functions','system.tables']
columns: ['system.aggregate_function_combinators.name','system.clusters.cluster','system.columns.name','system.data_type_families.name','system.databases.name','system.dictionaries.name','system.formats.name','system.functions.is_aggregate','system.functions.name','system.macros.macro','system.merge_tree_settings.name','system.settings.name','system.storage_policies.policy_name','system.table_engines.name','system.table_functions.name','system.tables.name']
projections: []
exception_code: 0
exception: exception:
stack_trace: stack_trace:
is_initial_query: 1 is_initial_query: 1
user: default user: default
query_id: 50a320fd-85a8-49b8-8761-98a86bcbacef query_id: a3361f6e-a1fd-4d54-9f6f-f93a08bab0bf
address: ::ffff:127.0.0.1 address: ::ffff:127.0.0.1
port: 33452 port: 51006
initial_user: default initial_user: default
initial_query_id: 50a320fd-85a8-49b8-8761-98a86bcbacef initial_query_id: a3361f6e-a1fd-4d54-9f6f-f93a08bab0bf
initial_address: ::ffff:127.0.0.1 initial_address: ::ffff:127.0.0.1
initial_port: 33452 initial_port: 51006
interface: 1 initial_query_start_time: 2021-07-28 13:46:56
os_user: bharatnc initial_query_start_time_microseconds: 2021-07-28 13:46:56.704542
client_hostname: tower interface: 1
client_name: ClickHouse os_user:
client_revision: 54437 client_hostname:
client_version_major: 20 client_name: ClickHouse client
client_version_minor: 7 client_revision: 54449
client_version_patch: 2 client_version_major: 21
http_method: 0 client_version_minor: 8
client_version_patch: 0
http_method: 0
http_user_agent: http_user_agent:
http_referer:
forwarded_for:
quota_key: quota_key:
revision: 54440 revision: 54453
thread_ids: [] log_comment:
ProfileEvents: {'Query':1,'SelectQuery':1,'ReadCompressedBytes':36,'CompressedReadBufferBlocks':1,'CompressedReadBufferBytes':10,'IOBufferAllocs':1,'IOBufferAllocBytes':89,'ContextLock':15,'RWLockAcquiredReadLocks':1} thread_ids: [5058,22097,22110,22094]
Settings: {'background_pool_size':'32','load_balancing':'random','allow_suspicious_low_cardinality_types':'1','distributed_aggregation_memory_efficient':'1','skip_unavailable_shards':'1','log_queries':'1','max_bytes_before_external_group_by':'20000000000','max_bytes_before_external_sort':'20000000000','allow_introspection_functions':'1'} ProfileEvents.Names: ['Query','SelectQuery','ArenaAllocChunks','ArenaAllocBytes','FunctionExecute','NetworkSendElapsedMicroseconds','SelectedRows','SelectedBytes','ContextLock','RWLockAcquiredReadLocks','RealTimeMicroseconds','UserTimeMicroseconds','SystemTimeMicroseconds','SoftPageFaults','OSCPUWaitMicroseconds','OSCPUVirtualTimeMicroseconds','OSWriteBytes','OSWriteChars']
ProfileEvents.Values: [1,1,39,352256,64,360,8393,374325,412,440,34480,13108,4723,671,19,17828,8192,10240]
Settings.Names: ['load_balancing','max_memory_usage']
Settings.Values: ['random','10000000000']
used_aggregate_functions: []
used_aggregate_function_combinators: []
used_database_engines: []
used_data_type_families: ['UInt64','UInt8','Nullable','String','date']
used_dictionaries: []
used_formats: []
used_functions: ['concat','notEmpty','extractAll']
used_storages: []
used_table_functions: []
``` ```
**See Also** **See Also**

View File

@ -13,9 +13,9 @@ Columns:
- `granted_role_is_default` ([UInt8](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Flag that shows whether `granted_role` is a default role. Possible values: - `granted_role_is_default` ([UInt8](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Flag that shows whether `granted_role` is a default role. Possible values:
- 1 — `granted_role` is a default role. - 1 — `granted_role` is a default role.
- 0 — `granted_role` is not a default role. - 0 — `granted_role` is not a default role.
- `with_admin_option` ([UInt8](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Flag that shows whether `granted_role` is a role with [ADMIN OPTION](../../sql-reference/statements/grant.md#admin-option-privilege) privilege. Possible values: - `with_admin_option` ([UInt8](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Flag that shows whether `granted_role` is a role with [ADMIN OPTION](../../sql-reference/statements/grant.md#admin-option-privilege) privilege. Possible values:
- 1 — The role has `ADMIN OPTION` privilege. - 1 — The role has `ADMIN OPTION` privilege.
- 0 — The role without `ADMIN OPTION` privilege. - 0 — The role without `ADMIN OPTION` privilege.
[Original article](https://clickhouse.tech/docs/en/operations/system-tables/role-grants) <!--hide--> [Original article](https://clickhouse.tech/docs/en/operations/system-tables/role-grants) <!--hide-->

View File

@ -13,7 +13,7 @@ Columns:
- `id` ([UUID](../../sql-reference/data-types/uuid.md)) — Row policy ID. - `id` ([UUID](../../sql-reference/data-types/uuid.md)) — Row policy ID.
- `storage` ([String](../../sql-reference/data-types/string.md)) — Name of the directory where the row policy is stored. - `storage` ([String](../../sql-reference/data-types/string.md)) — Name of the directory where the row policy is stored.
- `select_filter` ([Nullable](../../sql-reference/data-types/nullable.md)([String](../../sql-reference/data-types/string.md))) — Condition which is used to filter rows. - `select_filter` ([Nullable](../../sql-reference/data-types/nullable.md)([String](../../sql-reference/data-types/string.md))) — Condition which is used to filter rows.

View File

@ -7,7 +7,7 @@ Columns:
- `id` ([UUID](../../sql-reference/data-types/uuid.md)) — Setting profile ID. - `id` ([UUID](../../sql-reference/data-types/uuid.md)) — Setting profile ID.
- `storage` ([String](../../sql-reference/data-types/string.md)) — Path to the storage of setting profiles. Configured in the `access_control_path` parameter. - `storage` ([String](../../sql-reference/data-types/string.md)) — Path to the storage of setting profiles. Configured in the `access_control_path` parameter.
- `num_elements` ([UInt64](../../sql-reference/data-types/int-uint.md)) — Number of elements for this profile in the `system.settings_profile_elements` table. - `num_elements` ([UInt64](../../sql-reference/data-types/int-uint.md)) — Number of elements for this profile in the `system.settings_profile_elements` table.

View File

@ -1,24 +1,24 @@
# system.tables {#system-tables} # system.tables {#system-tables}
Contains metadata of each table that the server knows about. Contains metadata of each table that the server knows about.
[Detached](../../sql-reference/statements/detach.md) tables are not shown in `system.tables`. [Detached](../../sql-reference/statements/detach.md) tables are not shown in `system.tables`.
[Temporary tables](../../sql-reference/statements/create/table.md#temporary-tables) are visible in the `system.tables` only in those session where they have been created. They are shown with the empty `database` field and with the `is_temporary` flag switched on. [Temporary tables](../../sql-reference/statements/create/table.md#temporary-tables) are visible in the `system.tables` only in those session where they have been created. They are shown with the empty `database` field and with the `is_temporary` flag switched on.
Columns: Columns:
- `database` ([String](../../sql-reference/data-types/string.md)) — The name of the database the table is in. - `database` ([String](../../sql-reference/data-types/string.md)) — The name of the database the table is in.
- `name` ([String](../../sql-reference/data-types/string.md)) — Table name. - `name` ([String](../../sql-reference/data-types/string.md)) — Table name.
- `engine` ([String](../../sql-reference/data-types/string.md)) — Table engine name (without parameters). - `engine` ([String](../../sql-reference/data-types/string.md)) — Table engine name (without parameters).
- `is_temporary` ([UInt8](../../sql-reference/data-types/int-uint.md)) - Flag that indicates whether the table is temporary. - `is_temporary` ([UInt8](../../sql-reference/data-types/int-uint.md)) - Flag that indicates whether the table is temporary.
- `data_path` ([String](../../sql-reference/data-types/string.md)) - Path to the table data in the file system. - `data_path` ([String](../../sql-reference/data-types/string.md)) - Path to the table data in the file system.
- `metadata_path` ([String](../../sql-reference/data-types/string.md)) - Path to the table metadata in the file system. - `metadata_path` ([String](../../sql-reference/data-types/string.md)) - Path to the table metadata in the file system.
- `metadata_modification_time` ([DateTime](../../sql-reference/data-types/datetime.md)) - Time of latest modification of the table metadata. - `metadata_modification_time` ([DateTime](../../sql-reference/data-types/datetime.md)) - Time of latest modification of the table metadata.
@ -28,33 +28,33 @@ Columns:
- `create_table_query` ([String](../../sql-reference/data-types/string.md)) - The query that was used to create the table. - `create_table_query` ([String](../../sql-reference/data-types/string.md)) - The query that was used to create the table.
- `engine_full` ([String](../../sql-reference/data-types/string.md)) - Parameters of the table engine. - `engine_full` ([String](../../sql-reference/data-types/string.md)) - Parameters of the table engine.
- `partition_key` ([String](../../sql-reference/data-types/string.md)) - The partition key expression specified in the table. - `partition_key` ([String](../../sql-reference/data-types/string.md)) - The partition key expression specified in the table.
- `sorting_key` ([String](../../sql-reference/data-types/string.md)) - The sorting key expression specified in the table. - `sorting_key` ([String](../../sql-reference/data-types/string.md)) - The sorting key expression specified in the table.
- `primary_key` ([String](../../sql-reference/data-types/string.md)) - The primary key expression specified in the table. - `primary_key` ([String](../../sql-reference/data-types/string.md)) - The primary key expression specified in the table.
- `sampling_key` ([String](../../sql-reference/data-types/string.md)) - The sampling key expression specified in the table. - `sampling_key` ([String](../../sql-reference/data-types/string.md)) - The sampling key expression specified in the table.
- `storage_policy` ([String](../../sql-reference/data-types/string.md)) - The storage policy: - `storage_policy` ([String](../../sql-reference/data-types/string.md)) - The storage policy:
- [MergeTree](../../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-multiple-volumes) - [MergeTree](../../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-multiple-volumes)
- [Distributed](../../engines/table-engines/special/distributed.md#distributed) - [Distributed](../../engines/table-engines/special/distributed.md#distributed)
- `total_rows` ([Nullable](../../sql-reference/data-types/nullable.md)([UInt64](../../sql-reference/data-types/int-uint.md))) - Total number of rows, if it is possible to quickly determine exact number of rows in the table, otherwise `NULL` (including underying `Buffer` table). - `total_rows` ([Nullable](../../sql-reference/data-types/nullable.md)([UInt64](../../sql-reference/data-types/int-uint.md))) - Total number of rows, if it is possible to quickly determine exact number of rows in the table, otherwise `NULL` (including underying `Buffer` table).
- `total_bytes` ([Nullable](../../sql-reference/data-types/nullable.md)([UInt64](../../sql-reference/data-types/int-uint.md))) - Total number of bytes, if it is possible to quickly determine exact number of bytes for the table on storage, otherwise `NULL` (does not includes any underlying storage). - `total_bytes` ([Nullable](../../sql-reference/data-types/nullable.md)([UInt64](../../sql-reference/data-types/int-uint.md))) - Total number of bytes, if it is possible to quickly determine exact number of bytes for the table on storage, otherwise `NULL` (does not includes any underlying storage).
- If the table stores data on disk, returns used space on disk (i.e. compressed). - If the table stores data on disk, returns used space on disk (i.e. compressed).
- If the table stores data in memory, returns approximated number of used bytes in memory. - If the table stores data in memory, returns approximated number of used bytes in memory.
- `lifetime_rows` ([Nullable](../../sql-reference/data-types/nullable.md)([UInt64](../../sql-reference/data-types/int-uint.md))) - Total number of rows INSERTed since server start (only for `Buffer` tables). - `lifetime_rows` ([Nullable](../../sql-reference/data-types/nullable.md)([UInt64](../../sql-reference/data-types/int-uint.md))) - Total number of rows INSERTed since server start (only for `Buffer` tables).
- `lifetime_bytes` ([Nullable](../../sql-reference/data-types/nullable.md)([UInt64](../../sql-reference/data-types/int-uint.md))) - Total number of bytes INSERTed since server start (only for `Buffer` tables). - `lifetime_bytes` ([Nullable](../../sql-reference/data-types/nullable.md)([UInt64](../../sql-reference/data-types/int-uint.md))) - Total number of bytes INSERTed since server start (only for `Buffer` tables).
- `comment` ([String](../../sql-reference/data-types/string.md)) - The comment for the table. - `comment` ([String](../../sql-reference/data-types/string.md)) - The comment for the table.
The `system.tables` table is used in `SHOW TABLES` query implementation. The `system.tables` table is used in `SHOW TABLES` query implementation.

View File

@ -42,12 +42,12 @@ microseconds: 871397
thread_name: clickhouse-serv thread_name: clickhouse-serv
thread_id: 564917 thread_id: 564917
level: Information level: Information
query_id: query_id:
logger_name: DNSCacheUpdater logger_name: DNSCacheUpdater
message: Update period 15 seconds message: Update period 15 seconds
revision: 54440 revision: 54440
source_file: /ClickHouse/src/Interpreters/DNSCacheUpdater.cpp; void DB::DNSCacheUpdater::start() source_file: /ClickHouse/src/Interpreters/DNSCacheUpdater.cpp; void DB::DNSCacheUpdater::start()
source_line: 45 source_line: 45
``` ```
[Original article](https://clickhouse.tech/docs/en/operations/system-tables/text_log) <!--hide--> [Original article](https://clickhouse.tech/docs/en/operations/system-tables/text_log) <!--hide-->

View File

@ -49,7 +49,7 @@ timestamp_ns: 1599762189872924510
revision: 54440 revision: 54440
trace_type: Memory trace_type: Memory
thread_id: 564963 thread_id: 564963
query_id: query_id:
trace: [371912858,371912789,371798468,371799717,371801313,371790250,624462773,566365041,566440261,566445834,566460071,566459914,566459842,566459580,566459469,566459389,566459341,566455774,371993941,371988245,372158848,372187428,372187309,372187093,372185478,140222123165193,140222122205443] trace: [371912858,371912789,371798468,371799717,371801313,371790250,624462773,566365041,566440261,566445834,566460071,566459914,566459842,566459580,566459469,566459389,566459341,566455774,371993941,371988245,372158848,372187428,372187309,372187093,372185478,140222123165193,140222122205443]
size: 5244400 size: 5244400
``` ```

View File

@ -7,7 +7,7 @@ Columns:
- `id` ([UUID](../../sql-reference/data-types/uuid.md)) — User ID. - `id` ([UUID](../../sql-reference/data-types/uuid.md)) — User ID.
- `storage` ([String](../../sql-reference/data-types/string.md)) — Path to the storage of users. Configured in the `access_control_path` parameter. - `storage` ([String](../../sql-reference/data-types/string.md)) — Path to the storage of users. Configured in the `access_control_path` parameter.
- `auth_type` ([Enum8](../../sql-reference/data-types/enum.md)('no_password' = 0,'plaintext_password' = 1, 'sha256_password' = 2, 'double_sha1_password' = 3)) — Shows the authentication type. There are multiple ways of user identification: with no password, with plain text password, with [SHA256](https://ru.wikipedia.org/wiki/SHA-2)-encoded password or with [double SHA-1](https://ru.wikipedia.org/wiki/SHA-1)-encoded password. - `auth_type` ([Enum8](../../sql-reference/data-types/enum.md)('no_password' = 0,'plaintext_password' = 1, 'sha256_password' = 2, 'double_sha1_password' = 3)) — Shows the authentication type. There are multiple ways of user identification: with no password, with plain text password, with [SHA256](https://ru.wikipedia.org/wiki/SHA-2)-encoded password or with [double SHA-1](https://ru.wikipedia.org/wiki/SHA-1)-encoded password.

View File

@ -16,12 +16,12 @@ $ sudo service clickhouse-server restart
If you installed ClickHouse using something other than the recommended `deb` packages, use the appropriate update method. If you installed ClickHouse using something other than the recommended `deb` packages, use the appropriate update method.
!!! note "Note" !!! note "Note"
You can update multiple servers at once as soon as there is no moment when all replicas of one shard are offline. You can update multiple servers at once as soon as there is no moment when all replicas of one shard are offline.
The upgrade of older version of ClickHouse to specific version: The upgrade of older version of ClickHouse to specific version:
As an example: As an example:
`xx.yy.a.b` is a current stable version. The latest stable version could be found [here](https://github.com/ClickHouse/ClickHouse/releases) `xx.yy.a.b` is a current stable version. The latest stable version could be found [here](https://github.com/ClickHouse/ClickHouse/releases)
```bash ```bash

View File

@ -40,7 +40,7 @@ clickhouse-benchmark [keys] < queries_file;
## Keys {#clickhouse-benchmark-keys} ## Keys {#clickhouse-benchmark-keys}
- `--query=QUERY` — Query to execute. If this parameter is not passed, `clickhouse-benchmark` will read queries from standard input. - `--query=QUERY` — Query to execute. If this parameter is not passed, `clickhouse-benchmark` will read queries from standard input.
- `-c N`, `--concurrency=N` — Number of queries that `clickhouse-benchmark` sends simultaneously. Default value: 1. - `-c N`, `--concurrency=N` — Number of queries that `clickhouse-benchmark` sends simultaneously. Default value: 1.
- `-d N`, `--delay=N` — Interval in seconds between intermediate reports (to disable reports set 0). Default value: 1. - `-d N`, `--delay=N` — Interval in seconds between intermediate reports (to disable reports set 0). Default value: 1.
- `-h HOST`, `--host=HOST` — Server host. Default value: `localhost`. For the [comparison mode](#clickhouse-benchmark-comparison-mode) you can use multiple `-h` keys. - `-h HOST`, `--host=HOST` — Server host. Default value: `localhost`. For the [comparison mode](#clickhouse-benchmark-comparison-mode) you can use multiple `-h` keys.

View File

@ -74,7 +74,7 @@ Parameters:
source cluster & destination clusters accept exactly the same source cluster & destination clusters accept exactly the same
parameters as parameters for the usual Distributed table parameters as parameters for the usual Distributed table
see https://clickhouse.tech/docs/en/engines/table-engines/special/distributed/ see https://clickhouse.tech/docs/en/engines/table-engines/special/distributed/
--> -->
<shard> <shard>
<internal_replication>false</internal_replication> <internal_replication>false</internal_replication>
<replica> <replica>

View File

@ -18,7 +18,7 @@ Keys:
- `--seed <string>` — Seed arbitrary string that determines the result of obfuscation. - `--seed <string>` — Seed arbitrary string that determines the result of obfuscation.
- `--backslash` — Add a backslash at the end of each line of the formatted query. Can be useful when you copy a query from web or somewhere else with multiple lines, and want to execute it in command line. - `--backslash` — Add a backslash at the end of each line of the formatted query. Can be useful when you copy a query from web or somewhere else with multiple lines, and want to execute it in command line.
## Examples {#examples} ## Examples {#examples}
1. Highlighting and single line: 1. Highlighting and single line:
@ -32,12 +32,12 @@ Result:
SELECT sum(number) FROM numbers(5) SELECT sum(number) FROM numbers(5)
``` ```
2. Multiqueries: 2. Multiqueries:
```bash ```bash
$ clickhouse-format -n <<< "SELECT * FROM (SELECT 1 AS x UNION ALL SELECT 1 UNION DISTINCT SELECT 3);" $ clickhouse-format -n <<< "SELECT * FROM (SELECT 1 AS x UNION ALL SELECT 1 UNION DISTINCT SELECT 3);"
``` ```
Result: Result:
```text ```text
@ -58,13 +58,13 @@ FROM
```bash ```bash
$ clickhouse-format --seed Hello --obfuscate <<< "SELECT cost_first_screen BETWEEN a AND b, CASE WHEN x >= 123 THEN y ELSE NULL END;" $ clickhouse-format --seed Hello --obfuscate <<< "SELECT cost_first_screen BETWEEN a AND b, CASE WHEN x >= 123 THEN y ELSE NULL END;"
``` ```
Result: Result:
```text ```text
SELECT treasury_mammoth_hazelnut BETWEEN nutmeg AND span, CASE WHEN chive >= 116 THEN switching ELSE ANYTHING END; SELECT treasury_mammoth_hazelnut BETWEEN nutmeg AND span, CASE WHEN chive >= 116 THEN switching ELSE ANYTHING END;
``` ```
Same query and another seed string: Same query and another seed string:
```bash ```bash
@ -95,4 +95,4 @@ FROM \
UNION DISTINCT \ UNION DISTINCT \
SELECT 3 \ SELECT 3 \
) )
``` ```

View File

@ -38,7 +38,7 @@ Arguments:
- `-of`, `--format`, `--output-format` — output format, `TSV` by default. - `-of`, `--format`, `--output-format` — output format, `TSV` by default.
- `-d`, `--database` — default database, `_local` by default. - `-d`, `--database` — default database, `_local` by default.
- `--stacktrace` — whether to dump debug output in case of exception. - `--stacktrace` — whether to dump debug output in case of exception.
- `--echo` — print query before execution. - `--echo` — print query before execution.
- `--verbose` — more details on query execution. - `--verbose` — more details on query execution.
- `--logger.console` — Log to console. - `--logger.console` — Log to console.
- `--logger.log` — Log file name. - `--logger.log` — Log file name.

View File

@ -255,7 +255,7 @@ windowFunnel(window, [mode, [mode, ... ]])(timestamp, cond1, cond2, ..., condN)
- `window` — Length of the sliding window, it is the time interval between the first and the last condition. The unit of `window` depends on the `timestamp` itself and varies. Determined using the expression `timestamp of cond1 <= timestamp of cond2 <= ... <= timestamp of condN <= timestamp of cond1 + window`. - `window` — Length of the sliding window, it is the time interval between the first and the last condition. The unit of `window` depends on the `timestamp` itself and varies. Determined using the expression `timestamp of cond1 <= timestamp of cond2 <= ... <= timestamp of condN <= timestamp of cond1 + window`.
- `mode` — It is an optional argument. One or more modes can be set. - `mode` — It is an optional argument. One or more modes can be set.
- `'strict'` — If same condition holds for sequence of events then such non-unique events would be skipped. - `'strict'` — If same condition holds for sequence of events then such non-unique events would be skipped.
- `'strict_order'` — Don't allow interventions of other events. E.g. in the case of `A->B->D->C`, it stops finding `A->B->C` at the `D` and the max event level is 2. - `'strict_order'` — Don't allow interventions of other events. E.g. in the case of `A->B->D->C`, it stops finding `A->B->C` at the `D` and the max event level is 2.
- `'strict_increase'` — Apply conditions only to events with strictly increasing timestamps. - `'strict_increase'` — Apply conditions only to events with strictly increasing timestamps.
@ -530,7 +530,7 @@ sequenceNextNode(direction, base)(timestamp, event_column, base_condition, event
- tail — Set the base point to the last event. - tail — Set the base point to the last event.
- first_match — Set the base point to the first matched `event1`. - first_match — Set the base point to the first matched `event1`.
- last_match — Set the base point to the last matched `event1`. - last_match — Set the base point to the last matched `event1`.
**Arguments** **Arguments**
- `timestamp` — Name of the column containing the timestamp. Data types supported: [Date](../../sql-reference/data-types/date.md), [DateTime](../../sql-reference/data-types/datetime.md#data_type-datetime) and other unsigned integer types. - `timestamp` — Name of the column containing the timestamp. Data types supported: [Date](../../sql-reference/data-types/date.md), [DateTime](../../sql-reference/data-types/datetime.md#data_type-datetime) and other unsigned integer types.
@ -553,11 +553,11 @@ The query statement searching the event following A->B:
``` sql ``` sql
CREATE TABLE test_flow ( CREATE TABLE test_flow (
dt DateTime, dt DateTime,
id int, id int,
page String) page String)
ENGINE = MergeTree() ENGINE = MergeTree()
PARTITION BY toYYYYMMDD(dt) PARTITION BY toYYYYMMDD(dt)
ORDER BY id; ORDER BY id;
INSERT INTO test_flow VALUES (1, 1, 'A') (2, 1, 'B') (3, 1, 'C') (4, 1, 'D') (5, 1, 'E'); INSERT INTO test_flow VALUES (1, 1, 'A') (2, 1, 'B') (3, 1, 'C') (4, 1, 'D') (5, 1, 'E');
@ -585,21 +585,21 @@ INSERT INTO test_flow VALUES (1, 3, 'Gift') (2, 3, 'Home') (3, 3, 'Gift') (4, 3,
``` sql ``` sql
SELECT id, sequenceNextNode('forward', 'head')(dt, page, page = 'Home', page = 'Home', page = 'Gift') FROM test_flow GROUP BY id; SELECT id, sequenceNextNode('forward', 'head')(dt, page, page = 'Home', page = 'Home', page = 'Gift') FROM test_flow GROUP BY id;
dt id page dt id page
1970-01-01 09:00:01 1 Home // Base point, Matched with Home 1970-01-01 09:00:01 1 Home // Base point, Matched with Home
1970-01-01 09:00:02 1 Gift // Matched with Gift 1970-01-01 09:00:02 1 Gift // Matched with Gift
1970-01-01 09:00:03 1 Exit // The result 1970-01-01 09:00:03 1 Exit // The result
1970-01-01 09:00:01 2 Home // Base point, Matched with Home 1970-01-01 09:00:01 2 Home // Base point, Matched with Home
1970-01-01 09:00:02 2 Home // Unmatched with Gift 1970-01-01 09:00:02 2 Home // Unmatched with Gift
1970-01-01 09:00:03 2 Gift 1970-01-01 09:00:03 2 Gift
1970-01-01 09:00:04 2 Basket 1970-01-01 09:00:04 2 Basket
1970-01-01 09:00:01 3 Gift // Base point, Unmatched with Home 1970-01-01 09:00:01 3 Gift // Base point, Unmatched with Home
1970-01-01 09:00:02 3 Home 1970-01-01 09:00:02 3 Home
1970-01-01 09:00:03 3 Gift 1970-01-01 09:00:03 3 Gift
1970-01-01 09:00:04 3 Basket 1970-01-01 09:00:04 3 Basket
``` ```
**Behavior for `backward` and `tail`** **Behavior for `backward` and `tail`**
@ -611,14 +611,14 @@ SELECT id, sequenceNextNode('backward', 'tail')(dt, page, page = 'Basket', page
1970-01-01 09:00:01 1 Home 1970-01-01 09:00:01 1 Home
1970-01-01 09:00:02 1 Gift 1970-01-01 09:00:02 1 Gift
1970-01-01 09:00:03 1 Exit // Base point, Unmatched with Basket 1970-01-01 09:00:03 1 Exit // Base point, Unmatched with Basket
1970-01-01 09:00:01 2 Home 1970-01-01 09:00:01 2 Home
1970-01-01 09:00:02 2 Home // The result 1970-01-01 09:00:02 2 Home // The result
1970-01-01 09:00:03 2 Gift // Matched with Gift 1970-01-01 09:00:03 2 Gift // Matched with Gift
1970-01-01 09:00:04 2 Basket // Base point, Matched with Basket 1970-01-01 09:00:04 2 Basket // Base point, Matched with Basket
1970-01-01 09:00:01 3 Gift 1970-01-01 09:00:01 3 Gift
1970-01-01 09:00:02 3 Home // The result 1970-01-01 09:00:02 3 Home // The result
1970-01-01 09:00:03 3 Gift // Base point, Matched with Gift 1970-01-01 09:00:03 3 Gift // Base point, Matched with Gift
1970-01-01 09:00:04 3 Basket // Base point, Matched with Basket 1970-01-01 09:00:04 3 Basket // Base point, Matched with Basket
``` ```
@ -633,16 +633,16 @@ SELECT id, sequenceNextNode('forward', 'first_match')(dt, page, page = 'Gift', p
1970-01-01 09:00:01 1 Home 1970-01-01 09:00:01 1 Home
1970-01-01 09:00:02 1 Gift // Base point 1970-01-01 09:00:02 1 Gift // Base point
1970-01-01 09:00:03 1 Exit // The result 1970-01-01 09:00:03 1 Exit // The result
1970-01-01 09:00:01 2 Home 1970-01-01 09:00:01 2 Home
1970-01-01 09:00:02 2 Home 1970-01-01 09:00:02 2 Home
1970-01-01 09:00:03 2 Gift // Base point 1970-01-01 09:00:03 2 Gift // Base point
1970-01-01 09:00:04 2 Basket The result 1970-01-01 09:00:04 2 Basket The result
1970-01-01 09:00:01 3 Gift // Base point 1970-01-01 09:00:01 3 Gift // Base point
1970-01-01 09:00:02 3 Home // The result 1970-01-01 09:00:02 3 Home // The result
1970-01-01 09:00:03 3 Gift 1970-01-01 09:00:03 3 Gift
1970-01-01 09:00:04 3 Basket 1970-01-01 09:00:04 3 Basket
``` ```
``` sql ``` sql
@ -652,16 +652,16 @@ SELECT id, sequenceNextNode('forward', 'first_match')(dt, page, page = 'Gift', p
1970-01-01 09:00:01 1 Home 1970-01-01 09:00:01 1 Home
1970-01-01 09:00:02 1 Gift // Base point 1970-01-01 09:00:02 1 Gift // Base point
1970-01-01 09:00:03 1 Exit // Unmatched with Home 1970-01-01 09:00:03 1 Exit // Unmatched with Home
1970-01-01 09:00:01 2 Home 1970-01-01 09:00:01 2 Home
1970-01-01 09:00:02 2 Home 1970-01-01 09:00:02 2 Home
1970-01-01 09:00:03 2 Gift // Base point 1970-01-01 09:00:03 2 Gift // Base point
1970-01-01 09:00:04 2 Basket // Unmatched with Home 1970-01-01 09:00:04 2 Basket // Unmatched with Home
1970-01-01 09:00:01 3 Gift // Base point 1970-01-01 09:00:01 3 Gift // Base point
1970-01-01 09:00:02 3 Home // Matched with Home 1970-01-01 09:00:02 3 Home // Matched with Home
1970-01-01 09:00:03 3 Gift // The result 1970-01-01 09:00:03 3 Gift // The result
1970-01-01 09:00:04 3 Basket 1970-01-01 09:00:04 3 Basket
``` ```
@ -673,17 +673,17 @@ SELECT id, sequenceNextNode('backward', 'last_match')(dt, page, page = 'Gift', p
dt id page dt id page
1970-01-01 09:00:01 1 Home // The result 1970-01-01 09:00:01 1 Home // The result
1970-01-01 09:00:02 1 Gift // Base point 1970-01-01 09:00:02 1 Gift // Base point
1970-01-01 09:00:03 1 Exit 1970-01-01 09:00:03 1 Exit
1970-01-01 09:00:01 2 Home 1970-01-01 09:00:01 2 Home
1970-01-01 09:00:02 2 Home // The result 1970-01-01 09:00:02 2 Home // The result
1970-01-01 09:00:03 2 Gift // Base point 1970-01-01 09:00:03 2 Gift // Base point
1970-01-01 09:00:04 2 Basket 1970-01-01 09:00:04 2 Basket
1970-01-01 09:00:01 3 Gift 1970-01-01 09:00:01 3 Gift
1970-01-01 09:00:02 3 Home // The result 1970-01-01 09:00:02 3 Home // The result
1970-01-01 09:00:03 3 Gift // Base point 1970-01-01 09:00:03 3 Gift // Base point
1970-01-01 09:00:04 3 Basket 1970-01-01 09:00:04 3 Basket
``` ```
``` sql ``` sql
@ -692,17 +692,17 @@ SELECT id, sequenceNextNode('backward', 'last_match')(dt, page, page = 'Gift', p
dt id page dt id page
1970-01-01 09:00:01 1 Home // Matched with Home, the result is null 1970-01-01 09:00:01 1 Home // Matched with Home, the result is null
1970-01-01 09:00:02 1 Gift // Base point 1970-01-01 09:00:02 1 Gift // Base point
1970-01-01 09:00:03 1 Exit 1970-01-01 09:00:03 1 Exit
1970-01-01 09:00:01 2 Home // The result 1970-01-01 09:00:01 2 Home // The result
1970-01-01 09:00:02 2 Home // Matched with Home 1970-01-01 09:00:02 2 Home // Matched with Home
1970-01-01 09:00:03 2 Gift // Base point 1970-01-01 09:00:03 2 Gift // Base point
1970-01-01 09:00:04 2 Basket 1970-01-01 09:00:04 2 Basket
1970-01-01 09:00:01 3 Gift // The result 1970-01-01 09:00:01 3 Gift // The result
1970-01-01 09:00:02 3 Home // Matched with Home 1970-01-01 09:00:02 3 Home // Matched with Home
1970-01-01 09:00:03 3 Gift // Base point 1970-01-01 09:00:03 3 Gift // Base point
1970-01-01 09:00:04 3 Basket 1970-01-01 09:00:04 3 Basket
``` ```
@ -726,39 +726,39 @@ INSERT INTO test_flow_basecond VALUES (1, 1, 'A', 'ref4') (2, 1, 'A', 'ref3') (3
``` sql ``` sql
SELECT id, sequenceNextNode('forward', 'head')(dt, page, ref = 'ref1', page = 'A') FROM test_flow_basecond GROUP BY id; SELECT id, sequenceNextNode('forward', 'head')(dt, page, ref = 'ref1', page = 'A') FROM test_flow_basecond GROUP BY id;
dt id page ref dt id page ref
1970-01-01 09:00:01 1 A ref4 // The head can not be base point because the ref column of the head unmatched with 'ref1'. 1970-01-01 09:00:01 1 A ref4 // The head can not be base point because the ref column of the head unmatched with 'ref1'.
1970-01-01 09:00:02 1 A ref3 1970-01-01 09:00:02 1 A ref3
1970-01-01 09:00:03 1 B ref2 1970-01-01 09:00:03 1 B ref2
1970-01-01 09:00:04 1 B ref1 1970-01-01 09:00:04 1 B ref1
``` ```
``` sql ``` sql
SELECT id, sequenceNextNode('backward', 'tail')(dt, page, ref = 'ref4', page = 'B') FROM test_flow_basecond GROUP BY id; SELECT id, sequenceNextNode('backward', 'tail')(dt, page, ref = 'ref4', page = 'B') FROM test_flow_basecond GROUP BY id;
dt id page ref dt id page ref
1970-01-01 09:00:01 1 A ref4 1970-01-01 09:00:01 1 A ref4
1970-01-01 09:00:02 1 A ref3 1970-01-01 09:00:02 1 A ref3
1970-01-01 09:00:03 1 B ref2 1970-01-01 09:00:03 1 B ref2
1970-01-01 09:00:04 1 B ref1 // The tail can not be base point because the ref column of the tail unmatched with 'ref4'. 1970-01-01 09:00:04 1 B ref1 // The tail can not be base point because the ref column of the tail unmatched with 'ref4'.
``` ```
``` sql ``` sql
SELECT id, sequenceNextNode('forward', 'first_match')(dt, page, ref = 'ref3', page = 'A') FROM test_flow_basecond GROUP BY id; SELECT id, sequenceNextNode('forward', 'first_match')(dt, page, ref = 'ref3', page = 'A') FROM test_flow_basecond GROUP BY id;
dt id page ref dt id page ref
1970-01-01 09:00:01 1 A ref4 // This row can not be base point because the ref column unmatched with 'ref3'. 1970-01-01 09:00:01 1 A ref4 // This row can not be base point because the ref column unmatched with 'ref3'.
1970-01-01 09:00:02 1 A ref3 // Base point 1970-01-01 09:00:02 1 A ref3 // Base point
1970-01-01 09:00:03 1 B ref2 // The result 1970-01-01 09:00:03 1 B ref2 // The result
1970-01-01 09:00:04 1 B ref1 1970-01-01 09:00:04 1 B ref1
``` ```
``` sql ``` sql
SELECT id, sequenceNextNode('backward', 'last_match')(dt, page, ref = 'ref2', page = 'B') FROM test_flow_basecond GROUP BY id; SELECT id, sequenceNextNode('backward', 'last_match')(dt, page, ref = 'ref2', page = 'B') FROM test_flow_basecond GROUP BY id;
dt id page ref dt id page ref
1970-01-01 09:00:01 1 A ref4 1970-01-01 09:00:01 1 A ref4
1970-01-01 09:00:02 1 A ref3 // The result 1970-01-01 09:00:02 1 A ref3 // The result
1970-01-01 09:00:03 1 B ref2 // Base point 1970-01-01 09:00:03 1 B ref2 // Base point
1970-01-01 09:00:04 1 B ref1 // This row can not be base point because the ref column unmatched with 'ref2'. 1970-01-01 09:00:04 1 B ref1 // This row can not be base point because the ref column unmatched with 'ref2'.
``` ```

View File

@ -47,7 +47,7 @@ Query:
CREATE table test (t UInt8) ENGINE = Memory; CREATE table test (t UInt8) ENGINE = Memory;
``` ```
Get the arithmetic mean: Get the arithmetic mean:
Query: Query:

View File

@ -19,7 +19,7 @@ avgWeighted(x, weight)
`x` and `weight` must both be `x` and `weight` must both be
[Integer](../../../sql-reference/data-types/int-uint.md), [Integer](../../../sql-reference/data-types/int-uint.md),
[floating-point](../../../sql-reference/data-types/float.md), or [floating-point](../../../sql-reference/data-types/float.md), or
[Decimal](../../../sql-reference/data-types/decimal.md), [Decimal](../../../sql-reference/data-types/decimal.md),
but may have different types. but may have different types.

View File

@ -32,7 +32,7 @@ Type: [Integer](../../data-types/int-uint.md) or [Float](../../data-types/float.
Query: Query:
```sql ```sql
SELECT deltaSumTimestamp(value, timestamp) SELECT deltaSumTimestamp(value, timestamp)
FROM (SELECT number AS timestamp, [0, 4, 8, 3, 0, 0, 0, 1, 3, 5][number] AS value FROM numbers(1, 10)); FROM (SELECT number AS timestamp, [0, 4, 8, 3, 0, 0, 0, 1, 3, 5][number] AS value FROM numbers(1, 10));
``` ```

View File

@ -4,7 +4,7 @@ toc_priority: 114
# groupArraySample {#grouparraysample} # groupArraySample {#grouparraysample}
Creates an array of sample argument values. The size of the resulting array is limited to `max_size` elements. Argument values are selected and added to the array randomly. Creates an array of sample argument values. The size of the resulting array is limited to `max_size` elements. Argument values are selected and added to the array randomly.
**Syntax** **Syntax**

Some files were not shown because too many files have changed in this diff Show More