mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-22 15:42:02 +00:00
Merge
This commit is contained in:
commit
139a19d583
1
.gitignore
vendored
1
.gitignore
vendored
@ -79,6 +79,7 @@ configure-stamp
|
||||
*.bin
|
||||
*.mrk
|
||||
*.mrk2
|
||||
*.mrk3
|
||||
|
||||
.dupload.conf
|
||||
|
||||
|
6
.gitmodules
vendored
6
.gitmodules
vendored
@ -91,10 +91,10 @@
|
||||
url = https://github.com/ClickHouse-Extras/libunwind.git
|
||||
[submodule "contrib/simdjson"]
|
||||
path = contrib/simdjson
|
||||
url = https://github.com/lemire/simdjson.git
|
||||
url = https://github.com/ClickHouse-Extras/simdjson.git
|
||||
[submodule "contrib/rapidjson"]
|
||||
path = contrib/rapidjson
|
||||
url = https://github.com/Tencent/rapidjson
|
||||
url = https://github.com/ClickHouse-Extras/rapidjson
|
||||
[submodule "contrib/fastops"]
|
||||
path = contrib/fastops
|
||||
url = https://github.com/ClickHouse-Extras/fastops
|
||||
@ -173,7 +173,7 @@
|
||||
url = https://github.com/fmtlib/fmt.git
|
||||
[submodule "contrib/sentry-native"]
|
||||
path = contrib/sentry-native
|
||||
url = https://github.com/getsentry/sentry-native.git
|
||||
url = https://github.com/ClickHouse-Extras/sentry-native.git
|
||||
[submodule "contrib/gcem"]
|
||||
path = contrib/gcem
|
||||
url = https://github.com/kthohr/gcem.git
|
||||
|
@ -176,10 +176,7 @@ if(NOT DISABLE_CPU_OPTIMIZE)
|
||||
include(cmake/cpu_features.cmake)
|
||||
endif()
|
||||
|
||||
if(NOT COMPILER_CLANG) # clang: error: the clang compiler does not support '-march=native'
|
||||
option(ARCH_NATIVE "Enable -march=native compiler flag" ${ARCH_ARM})
|
||||
endif()
|
||||
|
||||
option(ARCH_NATIVE "Enable -march=native compiler flag" 0)
|
||||
if (ARCH_NATIVE)
|
||||
set (COMPILER_FLAGS "${COMPILER_FLAGS} -march=native")
|
||||
endif ()
|
||||
|
@ -10,6 +10,7 @@ ClickHouse is an open-source column-oriented database management system that all
|
||||
* [YouTube channel](https://www.youtube.com/c/ClickHouseDB) has a lot of content about ClickHouse in video format.
|
||||
* [Slack](https://join.slack.com/t/clickhousedb/shared_invite/zt-d2zxkf9e-XyxDa_ucfPxzuH4SJIm~Ng) and [Telegram](https://telegram.me/clickhouse_en) allow to chat with ClickHouse users in real-time.
|
||||
* [Blog](https://clickhouse.yandex/blog/en/) contains various ClickHouse-related articles, as well as announcements and reports about events.
|
||||
* [Code Browser](https://clickhouse.tech/codebrowser/html_report/ClickHouse/index.html) with syntax highlight and navigation.
|
||||
* [Yandex.Messenger channel](https://yandex.ru/chat/#/join/20e380d9-c7be-4123-ab06-e95fb946975e) shares announcements and useful links in Russian.
|
||||
* [Contacts](https://clickhouse.tech/#contacts) can help to get your questions answered if there are any.
|
||||
* You can also [fill this form](https://clickhouse.tech/#meet) to meet Yandex ClickHouse team in person.
|
||||
|
@ -404,7 +404,7 @@ public:
|
||||
a date at start of january) In this case one can get 53 for the
|
||||
first week of next year. This flag ensures that the week is
|
||||
relevant for the given year. Note that this flag is only
|
||||
releveant if WeekModeFlag::JANUARY is not set.
|
||||
relevant if WeekModeFlag::JANUARY is not set.
|
||||
|
||||
If set Week is in range 1-53.
|
||||
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -51,6 +51,7 @@
|
||||
#include <Common/getMultipleKeysFromConfig.h>
|
||||
#include <Common/ClickHouseRevision.h>
|
||||
#include <Common/Config/ConfigProcessor.h>
|
||||
#include <Common/MemorySanitizer.h>
|
||||
#include <Common/SymbolIndex.h>
|
||||
|
||||
#if !defined(ARCADIA_BUILD)
|
||||
@ -76,6 +77,15 @@ static void call_default_signal_handler(int sig)
|
||||
raise(sig);
|
||||
}
|
||||
|
||||
const char * msan_strsignal(int sig)
|
||||
{
|
||||
// Apparently strsignal is not instrumented by MemorySanitizer, so we
|
||||
// have to unpoison it to avoid msan reports inside fmt library when we
|
||||
// print it.
|
||||
const char * signal_name = strsignal(sig);
|
||||
__msan_unpoison_string(signal_name);
|
||||
return signal_name;
|
||||
}
|
||||
|
||||
static constexpr size_t max_query_id_size = 127;
|
||||
|
||||
@ -280,12 +290,14 @@ private:
|
||||
if (query_id.empty())
|
||||
{
|
||||
LOG_FATAL(log, "(version {}{}, {}) (from thread {}) (no query) Received signal {} ({})",
|
||||
VERSION_STRING, VERSION_OFFICIAL, daemon.build_id_info, thread_num, strsignal(sig), sig);
|
||||
VERSION_STRING, VERSION_OFFICIAL, daemon.build_id_info,
|
||||
thread_num, msan_strsignal(sig), sig);
|
||||
}
|
||||
else
|
||||
{
|
||||
LOG_FATAL(log, "(version {}{}, {}) (from thread {}) (query_id: {}) Received signal {} ({})",
|
||||
VERSION_STRING, VERSION_OFFICIAL, daemon.build_id_info, thread_num, query_id, strsignal(sig), sig);
|
||||
VERSION_STRING, VERSION_OFFICIAL, daemon.build_id_info,
|
||||
thread_num, query_id, msan_strsignal(sig), sig);
|
||||
}
|
||||
|
||||
String error_message;
|
||||
@ -833,13 +845,13 @@ void BaseDaemon::handleSignal(int signal_id)
|
||||
onInterruptSignals(signal_id);
|
||||
}
|
||||
else
|
||||
throw DB::Exception(std::string("Unsupported signal: ") + strsignal(signal_id), 0);
|
||||
throw DB::Exception(std::string("Unsupported signal: ") + msan_strsignal(signal_id), 0);
|
||||
}
|
||||
|
||||
void BaseDaemon::onInterruptSignals(int signal_id)
|
||||
{
|
||||
is_cancelled = true;
|
||||
LOG_INFO(&logger(), "Received termination signal ({})", strsignal(signal_id));
|
||||
LOG_INFO(&logger(), "Received termination signal ({})", msan_strsignal(signal_id));
|
||||
|
||||
if (sigint_signals_counter >= 2)
|
||||
{
|
||||
|
@ -29,7 +29,7 @@
|
||||
namespace Poco { class TaskManager; }
|
||||
|
||||
|
||||
/// \brief Base class for applications that can run as deamons.
|
||||
/// \brief Base class for applications that can run as daemons.
|
||||
///
|
||||
/// \code
|
||||
/// # Some possible command line options:
|
||||
|
@ -46,7 +46,7 @@ void setExtras()
|
||||
sentry_set_extra("version_patch", sentry_value_new_int32(VERSION_PATCH));
|
||||
}
|
||||
|
||||
void sentry_logger(sentry_level_t level, const char * message, va_list args)
|
||||
void sentry_logger(sentry_level_e level, const char * message, va_list args, void *)
|
||||
{
|
||||
auto * logger = &Poco::Logger::get("SentryWriter");
|
||||
size_t size = 1024;
|
||||
@ -107,7 +107,7 @@ void SentryWriter::initialize(Poco::Util::LayeredConfiguration & config)
|
||||
|
||||
sentry_options_t * options = sentry_options_new(); /// will be freed by sentry_init or sentry_shutdown
|
||||
sentry_options_set_release(options, VERSION_STRING_SHORT);
|
||||
sentry_options_set_logger(options, &sentry_logger);
|
||||
sentry_options_set_logger(options, &sentry_logger, nullptr);
|
||||
if (debug)
|
||||
{
|
||||
sentry_options_set_debug(options, 1);
|
||||
|
@ -26,12 +26,12 @@ namespace ext
|
||||
}
|
||||
|
||||
template <typename Rep, typename Period = std::ratio<1>>
|
||||
std::string to_string(const std::chrono::duration<Rep, Period> & dur)
|
||||
std::string to_string(const std::chrono::duration<Rep, Period> & duration)
|
||||
{
|
||||
auto seconds_as_int = std::chrono::duration_cast<std::chrono::seconds>(dur);
|
||||
if (seconds_as_int == dur)
|
||||
auto seconds_as_int = std::chrono::duration_cast<std::chrono::seconds>(duration);
|
||||
if (seconds_as_int == duration)
|
||||
return std::to_string(seconds_as_int.count()) + "s";
|
||||
auto seconds_as_double = std::chrono::duration_cast<std::chrono::duration<double>>(dur);
|
||||
auto seconds_as_double = std::chrono::duration_cast<std::chrono::duration<double>>(duration);
|
||||
return std::to_string(seconds_as_double.count()) + "s";
|
||||
}
|
||||
|
||||
@ -42,8 +42,8 @@ namespace ext
|
||||
}
|
||||
|
||||
template <typename Rep, typename Period = std::ratio<1>>
|
||||
std::ostream & operator<<(std::ostream & o, const std::chrono::duration<Rep, Period> & dur)
|
||||
std::ostream & operator<<(std::ostream & o, const std::chrono::duration<Rep, Period> & duration)
|
||||
{
|
||||
return o << to_string(dur);
|
||||
return o << to_string(duration);
|
||||
}
|
||||
}
|
||||
|
@ -24,7 +24,7 @@
|
||||
* = log(6.3*5.3) + lgamma(5.3)
|
||||
* = log(6.3*5.3*4.3*3.3*2.3) + lgamma(2.3)
|
||||
* 2. Polynomial approximation of lgamma around its
|
||||
* minimun ymin=1.461632144968362245 to maintain monotonicity.
|
||||
* minimum ymin=1.461632144968362245 to maintain monotonicity.
|
||||
* On [ymin-0.23, ymin+0.27] (i.e., [1.23164,1.73163]), use
|
||||
* Let z = x-ymin;
|
||||
* lgamma(x) = -1.214862905358496078218 + z^2*poly(z)
|
||||
|
@ -21,7 +21,7 @@ public:
|
||||
|
||||
std::optional<size_t> getLayer() const
|
||||
{
|
||||
return layer; /// layer setted in inheritor class BaseDaemonApplication.
|
||||
return layer; /// layer set in inheritor class BaseDaemonApplication.
|
||||
}
|
||||
|
||||
void setTextLog(std::shared_ptr<DB::TextLog> log, int max_priority);
|
||||
|
@ -91,10 +91,13 @@ void OwnSplitChannel::logSplit(const Poco::Message & msg)
|
||||
elem.source_file = msg.getSourceFile();
|
||||
|
||||
elem.source_line = msg.getSourceLine();
|
||||
|
||||
std::lock_guard<std::mutex> lock(text_log_mutex);
|
||||
if (auto log = text_log.lock())
|
||||
log->add(elem);
|
||||
std::shared_ptr<TextLog> text_log_locked{};
|
||||
{
|
||||
std::lock_guard<std::mutex> lock(text_log_mutex);
|
||||
text_log_locked = text_log.lock();
|
||||
}
|
||||
if (text_log_locked)
|
||||
text_log_locked->add(elem);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -53,8 +53,6 @@
|
||||
* These assumptions are specific for Yandex.Metrica. Your mileage may vary.
|
||||
*
|
||||
* mysqlxx could not be considered as separate full-featured library,
|
||||
* because it is developed from the principle - "everything that we don't need is not implemented",
|
||||
* and also the library depends on some other libraries from Yandex.Metrica code.
|
||||
* (dependencied could be easily removed if necessary).
|
||||
* because it is developed from the principle - "everything that we don't need is not implemented".
|
||||
* It is assumed that the user will add all missing functionality that is needed.
|
||||
*/
|
||||
|
@ -110,7 +110,7 @@ namespace pcg_extras {
|
||||
/*
|
||||
* C++ requires us to be able to serialize RNG state by printing or reading
|
||||
* it from a stream. Because we use 128-bit ints, we also need to be able
|
||||
* ot print them, so here is code to do so.
|
||||
* or print them, so here is code to do so.
|
||||
*
|
||||
* This code provides enough functionality to print 128-bit ints in decimal
|
||||
* and zero-padded in hex. It's not a full-featured implementation.
|
||||
@ -253,7 +253,7 @@ inline std::istream& operator>>(std::istream& in, uint8_t& value)
|
||||
*/
|
||||
|
||||
/*
|
||||
* XorShifts are invertable, but they are someting of a pain to invert.
|
||||
* XorShifts are invertable, but they are something of a pain to invert.
|
||||
* This function backs them out. It's used by the whacky "inside out"
|
||||
* generator defined later.
|
||||
*/
|
||||
|
@ -174,7 +174,7 @@ PCG_DEFINE_CONSTANT(pcg128_t, default, increment,
|
||||
* period
|
||||
* specific stream - the constant can be changed at any time, selecting
|
||||
* a different random sequence
|
||||
* unique stream - the constant is based on the memory addresss of the
|
||||
* unique stream - the constant is based on the memory address of the
|
||||
* object, thus every RNG has its own unique sequence
|
||||
*
|
||||
* This variation is provided though mixin classes which define a function
|
||||
@ -352,7 +352,7 @@ protected:
|
||||
* (reducing register pressure).
|
||||
*
|
||||
* Given the high level of parameterization, the code has to use some
|
||||
* template-metaprogramming tricks to handle some of the suble variations
|
||||
* template-metaprogramming tricks to handle some of the subtle variations
|
||||
* involved.
|
||||
*/
|
||||
|
||||
|
@ -1,9 +1,9 @@
|
||||
# This strings autochanged from release_lib.sh:
|
||||
SET(VERSION_REVISION 54437)
|
||||
SET(VERSION_REVISION 54438)
|
||||
SET(VERSION_MAJOR 20)
|
||||
SET(VERSION_MINOR 7)
|
||||
SET(VERSION_MINOR 8)
|
||||
SET(VERSION_PATCH 1)
|
||||
SET(VERSION_GITHASH d64e51d1a78c1b53c33915ca0f75c97b2333844f)
|
||||
SET(VERSION_DESCRIBE v20.7.1.1-prestable)
|
||||
SET(VERSION_STRING 20.7.1.1)
|
||||
SET(VERSION_GITHASH 5d60ab33a511efd149c7c3de77c0dd4b81e65b13)
|
||||
SET(VERSION_DESCRIBE v20.8.1.1-prestable)
|
||||
SET(VERSION_STRING 20.8.1.1)
|
||||
# end of autochange
|
||||
|
@ -20,6 +20,12 @@ endif ()
|
||||
|
||||
option (WEVERYTHING "Enables -Weverything option with some exceptions. This is intended for exploration of new compiler warnings that may be found to be useful. Only makes sense for clang." ON)
|
||||
|
||||
# Control maximum size of stack frames. It can be important if the code is run in fibers with small stack size.
|
||||
# Only in release build because debug has too large stack frames.
|
||||
if ((NOT CMAKE_BUILD_TYPE_UC STREQUAL "DEBUG") AND (NOT SANITIZE))
|
||||
add_warning(frame-larger-than=16384)
|
||||
endif ()
|
||||
|
||||
if (COMPILER_CLANG)
|
||||
add_warning(pedantic)
|
||||
no_warning(vla-extension)
|
||||
|
2
contrib/rapidjson
vendored
2
contrib/rapidjson
vendored
@ -1 +1 @@
|
||||
Subproject commit 8f4c021fa2f1e001d2376095928fc0532adf2ae6
|
||||
Subproject commit c4ef90ccdbc21d5d5a628d08316bfd301e32d6fa
|
2
contrib/sentry-native
vendored
2
contrib/sentry-native
vendored
@ -1 +1 @@
|
||||
Subproject commit f91ed3f95b5653f247189d720ab00765b4899d6f
|
||||
Subproject commit 94644e92f0a3ff14bd35ed902a8622a2d15f7be4
|
2
contrib/simdjson
vendored
2
contrib/simdjson
vendored
@ -1 +1 @@
|
||||
Subproject commit 1e4aa116e5a39e4ba23b9a93e6c7f048c5105b20
|
||||
Subproject commit 3190d66a49059092a1753dc35595923debfc1698
|
4
debian/changelog
vendored
4
debian/changelog
vendored
@ -1,5 +1,5 @@
|
||||
clickhouse (20.7.1.1) unstable; urgency=low
|
||||
clickhouse (20.8.1.1) unstable; urgency=low
|
||||
|
||||
* Modified source code
|
||||
|
||||
-- clickhouse-release <clickhouse-release@yandex-team.ru> Mon, 13 Jul 2020 18:25:58 +0300
|
||||
-- clickhouse-release <clickhouse-release@yandex-team.ru> Fri, 07 Aug 2020 21:45:46 +0300
|
||||
|
@ -1,7 +1,7 @@
|
||||
FROM ubuntu:18.04
|
||||
|
||||
ARG repository="deb https://repo.clickhouse.tech/deb/stable/ main/"
|
||||
ARG version=20.7.1.*
|
||||
ARG version=20.8.1.*
|
||||
|
||||
RUN apt-get update \
|
||||
&& apt-get install --yes --no-install-recommends \
|
||||
|
@ -21,7 +21,7 @@ RUN apt-get --allow-unauthenticated update -y \
|
||||
# Special dpkg-deb (https://github.com/ClickHouse-Extras/dpkg) version which is able
|
||||
# to compress files using pigz (https://zlib.net/pigz/) instead of gzip.
|
||||
# Significantly increase deb packaging speed and compatible with old systems
|
||||
RUN curl -O https://clickhouse-builds.s3.yandex.net/utils/dpkg-deb
|
||||
RUN curl -O https://clickhouse-builds.s3.yandex.net/utils/1/dpkg-deb
|
||||
RUN chmod +x dpkg-deb
|
||||
RUN cp dpkg-deb /usr/bin
|
||||
|
||||
|
@ -1,7 +1,7 @@
|
||||
FROM ubuntu:20.04
|
||||
|
||||
ARG repository="deb https://repo.clickhouse.tech/deb/stable/ main/"
|
||||
ARG version=20.7.1.*
|
||||
ARG version=20.8.1.*
|
||||
ARG gosu_ver=1.10
|
||||
|
||||
RUN apt-get update \
|
||||
|
@ -1,7 +1,7 @@
|
||||
FROM ubuntu:18.04
|
||||
|
||||
ARG repository="deb https://repo.clickhouse.tech/deb/stable/ main/"
|
||||
ARG version=20.7.1.*
|
||||
ARG version=20.8.1.*
|
||||
|
||||
RUN apt-get update && \
|
||||
apt-get install -y apt-transport-https dirmngr && \
|
||||
|
@ -53,7 +53,7 @@ mkdir -p /etc/clickhouse-server
|
||||
mkdir -p /etc/clickhouse-client
|
||||
mkdir -p /etc/clickhouse-server/config.d
|
||||
mkdir -p /etc/clickhouse-server/users.d
|
||||
mkdir -p /var/log/clickhouse-server
|
||||
ln -s /test_output /var/log/clickhouse-server
|
||||
cp $CLICKHOUSE_DIR/programs/server/config.xml /etc/clickhouse-server/
|
||||
cp $CLICKHOUSE_DIR/programs/server/users.xml /etc/clickhouse-server/
|
||||
|
||||
@ -66,7 +66,6 @@ ln -s /usr/share/clickhouse-test/config/listen.xml /etc/clickhouse-server/config
|
||||
ln -s /usr/share/clickhouse-test/config/part_log.xml /etc/clickhouse-server/config.d/
|
||||
ln -s /usr/share/clickhouse-test/config/text_log.xml /etc/clickhouse-server/config.d/
|
||||
ln -s /usr/share/clickhouse-test/config/metric_log.xml /etc/clickhouse-server/config.d/
|
||||
ln -s /usr/share/clickhouse-test/config/query_masking_rules.xml /etc/clickhouse-server/config.d/
|
||||
ln -s /usr/share/clickhouse-test/config/custom_settings_prefixes.xml /etc/clickhouse-server/config.d/
|
||||
ln -s /usr/share/clickhouse-test/config/log_queries.xml /etc/clickhouse-server/users.d/
|
||||
ln -s /usr/share/clickhouse-test/config/readonly.xml /etc/clickhouse-server/users.d/
|
||||
@ -84,6 +83,10 @@ ln -s /usr/share/clickhouse-test/config/server.crt /etc/clickhouse-server/
|
||||
ln -s /usr/share/clickhouse-test/config/dhparam.pem /etc/clickhouse-server/
|
||||
ln -sf /usr/share/clickhouse-test/config/client_config.xml /etc/clickhouse-client/config.xml
|
||||
|
||||
# Keep original query_masking_rules.xml
|
||||
ln -s --backup=simple --suffix=_original.xml /usr/share/clickhouse-test/config/query_masking_rules.xml /etc/clickhouse-server/config.d/
|
||||
|
||||
|
||||
clickhouse-server --config /etc/clickhouse-server/config.xml --daemon
|
||||
|
||||
counter=0
|
||||
@ -161,15 +164,15 @@ clickhouse-test -j 4 --no-long --testname --shard --zookeeper --skip ${TESTS_TO_
|
||||
|
||||
|
||||
kill_clickhouse () {
|
||||
kill `ps ax | grep clickhouse-server | grep -v 'grep' | awk '{print $1}'` 2>/dev/null
|
||||
killall clickhouse-server ||:
|
||||
|
||||
for i in {1..10}
|
||||
do
|
||||
if ! kill -0 `ps ax | grep clickhouse-server | grep -v 'grep' | awk '{print $1}'`; then
|
||||
if ! killall -0 clickhouse-server; then
|
||||
echo "No clickhouse process"
|
||||
break
|
||||
else
|
||||
echo "Process" `ps ax | grep clickhouse-server | grep -v 'grep' | awk '{print $1}'` "still alive"
|
||||
echo "Clickhouse server process" $(pgrep -f clickhouse-server) "still alive"
|
||||
sleep 10
|
||||
fi
|
||||
done
|
||||
@ -202,5 +205,3 @@ if [[ ! -z "$FAILED_TESTS" ]]; then
|
||||
else
|
||||
echo "No failed tests"
|
||||
fi
|
||||
|
||||
mv /var/log/clickhouse-server/* /test_output
|
||||
|
@ -514,16 +514,20 @@ create table queries engine File(TSVWithNamesAndTypes, 'report/queries.tsv')
|
||||
;
|
||||
|
||||
create table changed_perf_report engine File(TSV, 'report/changed-perf.tsv') as
|
||||
select
|
||||
toDecimal64(left, 3), toDecimal64(right, 3),
|
||||
with
|
||||
-- server_time is sometimes reported as zero (if it's less than 1 ms),
|
||||
-- so we have to work around this to not get an error about conversion
|
||||
-- of NaN to decimal.
|
||||
left > right
|
||||
? '- ' || toString(toDecimal64(left / (right + 0.001), 3)) || 'x'
|
||||
: '+ ' || toString(toDecimal64(right / (left + 0.001), 3)) || 'x',
|
||||
toDecimal64(diff, 3), toDecimal64(stat_threshold, 3),
|
||||
changed_fail, test, query_index, query_display_name
|
||||
(left > right ? left / right : right / left) as times_change_float,
|
||||
isFinite(times_change_float) as times_change_finite,
|
||||
toDecimal64(times_change_finite ? times_change_float : 1., 3) as times_change_decimal,
|
||||
times_change_finite
|
||||
? (left > right ? '-' : '+') || toString(times_change_decimal) || 'x'
|
||||
: '--' as times_change_str
|
||||
select
|
||||
toDecimal64(left, 3), toDecimal64(right, 3), times_change_str,
|
||||
toDecimal64(diff, 3), toDecimal64(stat_threshold, 3),
|
||||
changed_fail, test, query_index, query_display_name
|
||||
from queries where changed_show order by abs(diff) desc;
|
||||
|
||||
create table unstable_queries_report engine File(TSV, 'report/unstable-queries.tsv') as
|
||||
@ -603,11 +607,18 @@ create table test_times_report engine File(TSV, 'report/test-times.tsv') as
|
||||
|
||||
-- report for all queries page, only main metric
|
||||
create table all_tests_report engine File(TSV, 'report/all-queries.tsv') as
|
||||
with
|
||||
-- server_time is sometimes reported as zero (if it's less than 1 ms),
|
||||
-- so we have to work around this to not get an error about conversion
|
||||
-- of NaN to decimal.
|
||||
(left > right ? left / right : right / left) as times_change_float,
|
||||
isFinite(times_change_float) as times_change_finite,
|
||||
toDecimal64(times_change_finite ? times_change_float : 1., 3) as times_change_decimal,
|
||||
times_change_finite
|
||||
? (left > right ? '-' : '+') || toString(times_change_decimal) || 'x'
|
||||
: '--' as times_change_str
|
||||
select changed_fail, unstable_fail,
|
||||
toDecimal64(left, 3), toDecimal64(right, 3),
|
||||
left > right
|
||||
? '- ' || toString(toDecimal64(left / (right + 0.001), 3)) || 'x'
|
||||
: '+ ' || toString(toDecimal64(right / (left + 0.001), 3)) || 'x',
|
||||
toDecimal64(left, 3), toDecimal64(right, 3), times_change_str,
|
||||
toDecimal64(isFinite(diff) ? diff : 0, 3),
|
||||
toDecimal64(isFinite(stat_threshold) ? stat_threshold : 0, 3),
|
||||
test, query_index, query_display_name
|
||||
|
@ -14,7 +14,7 @@ function find_reference_sha
|
||||
# master, the merge base is the tested commit itself, so we have to step back
|
||||
# once.
|
||||
start_ref=$(git -C right/ch merge-base origin/master pr)
|
||||
if [ "PR_TO_TEST" == "0" ]
|
||||
if [ "$PR_TO_TEST" == "0" ]
|
||||
then
|
||||
start_ref=$start_ref~
|
||||
fi
|
||||
|
@ -17,6 +17,8 @@ parser.add_argument('--report', default='main', choices=['main', 'all-queries'],
|
||||
help='Which report to build')
|
||||
args = parser.parse_args()
|
||||
|
||||
tables = []
|
||||
errors_explained = []
|
||||
report_errors = []
|
||||
error_tests = 0
|
||||
slow_average_tests = 0
|
||||
@ -145,21 +147,42 @@ tr:nth-child(odd) td {{filter: brightness(90%);}}
|
||||
table_anchor = 0
|
||||
row_anchor = 0
|
||||
|
||||
def nextTableAnchor():
|
||||
def currentTableAnchor():
|
||||
global table_anchor
|
||||
return f'{table_anchor}'
|
||||
|
||||
def newTableAnchor():
|
||||
global table_anchor
|
||||
table_anchor += 1
|
||||
return str(table_anchor)
|
||||
return currentTableAnchor()
|
||||
|
||||
def currentRowAnchor():
|
||||
global row_anchor
|
||||
global table_anchor
|
||||
return f'{table_anchor}.{row_anchor}'
|
||||
|
||||
def nextRowAnchor():
|
||||
global row_anchor
|
||||
global table_anchor
|
||||
row_anchor += 1
|
||||
return str(table_anchor) + "." + str(row_anchor)
|
||||
return f'{table_anchor}.{row_anchor + 1}'
|
||||
|
||||
def tr(x):
|
||||
a = nextRowAnchor()
|
||||
def setRowAnchor(anchor_row_part):
|
||||
global row_anchor
|
||||
global table_anchor
|
||||
row_anchor = anchor_row_part
|
||||
return currentRowAnchor()
|
||||
|
||||
def advanceRowAnchor():
|
||||
global row_anchor
|
||||
global table_anchor
|
||||
row_anchor += 1
|
||||
return currentRowAnchor()
|
||||
|
||||
|
||||
def tr(x, anchor=None):
|
||||
#return '<tr onclick="location.href=\'#{a}\'" id={a}>{x}</tr>'.format(a=a, x=str(x))
|
||||
return '<tr id={a}>{x}</tr>'.format(a=a, x=str(x))
|
||||
anchor = anchor if anchor else advanceRowAnchor()
|
||||
return f'<tr id={anchor}>{x}</tr>'
|
||||
|
||||
def td(value, cell_attributes = ''):
|
||||
return '<td {cell_attributes}>{value}</td>'.format(
|
||||
@ -169,19 +192,23 @@ def td(value, cell_attributes = ''):
|
||||
def th(x):
|
||||
return '<th>' + str(x) + '</th>'
|
||||
|
||||
def tableRow(cell_values, cell_attributes = []):
|
||||
return tr(''.join([td(v, a)
|
||||
for v, a in itertools.zip_longest(
|
||||
cell_values, cell_attributes,
|
||||
fillvalue = '')
|
||||
if a is not None and v is not None]))
|
||||
def tableRow(cell_values, cell_attributes = [], anchor=None):
|
||||
return tr(
|
||||
''.join([td(v, a)
|
||||
for v, a in itertools.zip_longest(
|
||||
cell_values, cell_attributes,
|
||||
fillvalue = '')
|
||||
if a is not None and v is not None]),
|
||||
anchor)
|
||||
|
||||
def tableHeader(r):
|
||||
return tr(''.join([th(f) for f in r]))
|
||||
|
||||
def tableStart(title):
|
||||
anchor = nextTableAnchor();
|
||||
cls = '-'.join(title.lower().split(' ')[:3]);
|
||||
global table_anchor
|
||||
table_anchor = cls
|
||||
anchor = currentTableAnchor()
|
||||
return f"""
|
||||
<h2 id="{anchor}">
|
||||
<a class="cancela" href="#{anchor}">{title}</a>
|
||||
@ -211,20 +238,23 @@ def htmlRows(n):
|
||||
result += tableRow(row)
|
||||
return result
|
||||
|
||||
def printSimpleTable(caption, columns, rows):
|
||||
def addSimpleTable(caption, columns, rows, pos=None):
|
||||
global tables
|
||||
text = ''
|
||||
if not rows:
|
||||
return
|
||||
|
||||
print(tableStart(caption))
|
||||
print(tableHeader(columns))
|
||||
text += tableStart(caption)
|
||||
text += tableHeader(columns)
|
||||
for row in rows:
|
||||
print(tableRow(row))
|
||||
print(tableEnd())
|
||||
text += tableRow(row)
|
||||
text += tableEnd()
|
||||
tables.insert(pos if pos else len(tables), text)
|
||||
|
||||
def print_tested_commits():
|
||||
def add_tested_commits():
|
||||
global report_errors
|
||||
try:
|
||||
printSimpleTable('Tested commits', ['Old', 'New'],
|
||||
addSimpleTable('Tested commits', ['Old', 'New'],
|
||||
[['<pre>{}</pre>'.format(x) for x in
|
||||
[open('left-commit.txt').read(),
|
||||
open('right-commit.txt').read()]]])
|
||||
@ -235,7 +265,8 @@ def print_tested_commits():
|
||||
*sys.exc_info()[:2])[-1])
|
||||
pass
|
||||
|
||||
def print_report_errors():
|
||||
def add_report_errors():
|
||||
global tables
|
||||
global report_errors
|
||||
# Add the errors reported by various steps of comparison script
|
||||
try:
|
||||
@ -246,67 +277,100 @@ def print_report_errors():
|
||||
*sys.exc_info()[:2])[-1])
|
||||
pass
|
||||
|
||||
if len(report_errors):
|
||||
print(tableStart('Errors while building the report'))
|
||||
print(tableHeader(['Error']))
|
||||
for x in report_errors:
|
||||
print(tableRow([x]))
|
||||
print(tableEnd())
|
||||
if not report_errors:
|
||||
return
|
||||
|
||||
text = tableStart('Errors while building the report')
|
||||
text += tableHeader(['Error'])
|
||||
for x in report_errors:
|
||||
text += tableRow([x])
|
||||
text += tableEnd()
|
||||
# Insert after Tested Commits
|
||||
tables.insert(1, text)
|
||||
errors_explained.append([f'<a href="#{currentTableAnchor()}">There were some errors while building the report</a>']);
|
||||
|
||||
def add_errors_explained():
|
||||
if not errors_explained:
|
||||
return
|
||||
|
||||
text = '<a name="fail1"/>'
|
||||
text += tableStart('Error summary')
|
||||
text += tableHeader(['Description'])
|
||||
for row in errors_explained:
|
||||
text += tableRow(row)
|
||||
text += tableEnd()
|
||||
|
||||
global tables
|
||||
tables.insert(1, text)
|
||||
|
||||
|
||||
if args.report == 'main':
|
||||
print(header_template.format())
|
||||
|
||||
print_tested_commits()
|
||||
add_tested_commits()
|
||||
|
||||
|
||||
run_error_rows = tsvRows('run-errors.tsv')
|
||||
error_tests += len(run_error_rows)
|
||||
printSimpleTable('Run errors', ['Test', 'Error'], run_error_rows)
|
||||
addSimpleTable('Run errors', ['Test', 'Error'], run_error_rows)
|
||||
if run_error_rows:
|
||||
errors_explained.append([f'<a href="#{currentTableAnchor()}">There were some errors while running the tests</a>']);
|
||||
|
||||
|
||||
slow_on_client_rows = tsvRows('report/slow-on-client.tsv')
|
||||
error_tests += len(slow_on_client_rows)
|
||||
printSimpleTable('Slow on client',
|
||||
addSimpleTable('Slow on client',
|
||||
['Client time, s', 'Server time, s', 'Ratio', 'Test', 'Query'],
|
||||
slow_on_client_rows)
|
||||
if slow_on_client_rows:
|
||||
errors_explained.append([f'<a href="#{currentTableAnchor()}">Some queries are taking noticeable time client-side (missing `FORMAT Null`?)</a>']);
|
||||
|
||||
unmarked_short_rows = tsvRows('report/unmarked-short-queries.tsv')
|
||||
error_tests += len(unmarked_short_rows)
|
||||
printSimpleTable('Short queries not marked as short',
|
||||
addSimpleTable('Short queries not marked as short',
|
||||
['New client time, s', 'Test', '#', 'Query'],
|
||||
unmarked_short_rows)
|
||||
if unmarked_short_rows:
|
||||
errors_explained.append([f'<a href="#{currentTableAnchor()}">Some queries have short duration but are not explicitly marked as "short"</a>']);
|
||||
|
||||
def print_partial():
|
||||
def add_partial():
|
||||
rows = tsvRows('report/partial-queries-report.tsv')
|
||||
if not rows:
|
||||
return
|
||||
global unstable_partial_queries, slow_average_tests
|
||||
print(tableStart('Partial queries'))
|
||||
|
||||
global unstable_partial_queries, slow_average_tests, tables
|
||||
text = tableStart('Partial queries')
|
||||
columns = ['Median time, s', 'Relative time variance', 'Test', '#', 'Query']
|
||||
print(tableHeader(columns))
|
||||
text += tableHeader(columns)
|
||||
attrs = ['' for c in columns]
|
||||
for row in rows:
|
||||
anchor = f'{currentTableAnchor()}.{row[2]}.{row[3]}'
|
||||
if float(row[1]) > 0.10:
|
||||
attrs[1] = f'style="background: {color_bad}"'
|
||||
unstable_partial_queries += 1
|
||||
errors_explained.append([f'<a href="#{anchor}">The query no. {row[3]} of test \'{row[2]}\' has excessive variance of run time. Keep it below 10%</a>'])
|
||||
else:
|
||||
attrs[1] = ''
|
||||
if float(row[0]) > allowed_single_run_time:
|
||||
attrs[0] = f'style="background: {color_bad}"'
|
||||
errors_explained.append([f'<a href="#{anchor}">The query no. {row[3]} of test \'{row[2]}\' is taking too long to run. Keep the run time below {allowed_single_run} seconds"</a>'])
|
||||
slow_average_tests += 1
|
||||
else:
|
||||
attrs[0] = ''
|
||||
print(tableRow(row, attrs))
|
||||
print(tableEnd())
|
||||
text += tableRow(row, attrs, anchor)
|
||||
text += tableEnd()
|
||||
tables.append(text)
|
||||
|
||||
print_partial()
|
||||
add_partial()
|
||||
|
||||
def print_changes():
|
||||
def add_changes():
|
||||
rows = tsvRows('report/changed-perf.tsv')
|
||||
if not rows:
|
||||
return
|
||||
|
||||
global faster_queries, slower_queries
|
||||
global faster_queries, slower_queries, tables
|
||||
|
||||
print(tableStart('Changes in performance'))
|
||||
text = tableStart('Changes in performance')
|
||||
columns = [
|
||||
'Old, s', # 0
|
||||
'New, s', # 1
|
||||
@ -319,11 +383,12 @@ if args.report == 'main':
|
||||
'Query', # 8
|
||||
]
|
||||
|
||||
print(tableHeader(columns))
|
||||
text += tableHeader(columns)
|
||||
|
||||
attrs = ['' for c in columns]
|
||||
attrs[5] = None
|
||||
for row in rows:
|
||||
anchor = f'{currentTableAnchor()}.{row[6]}.{row[7]}'
|
||||
if int(row[5]):
|
||||
if float(row[3]) < 0.:
|
||||
faster_queries += 1
|
||||
@ -331,18 +396,19 @@ if args.report == 'main':
|
||||
else:
|
||||
slower_queries += 1
|
||||
attrs[2] = attrs[3] = f'style="background: {color_bad}"'
|
||||
errors_explained.append([f'<a href="#{anchor}">The query no. {row[7]} of test \'{row[6]}\' has slowed down</a>'])
|
||||
else:
|
||||
attrs[2] = attrs[3] = ''
|
||||
|
||||
print(tableRow(row, attrs))
|
||||
text += tableRow(row, attrs, anchor)
|
||||
|
||||
print(tableEnd())
|
||||
text += tableEnd()
|
||||
tables.append(text)
|
||||
|
||||
print_changes()
|
||||
add_changes()
|
||||
|
||||
def print_unstable_queries():
|
||||
global unstable_queries
|
||||
global very_unstable_queries
|
||||
def add_unstable_queries():
|
||||
global unstable_queries, very_unstable_queries, tables
|
||||
|
||||
unstable_rows = tsvRows('report/unstable-queries.tsv')
|
||||
if not unstable_rows:
|
||||
@ -361,33 +427,35 @@ if args.report == 'main':
|
||||
'Query' #7
|
||||
]
|
||||
|
||||
print(tableStart('Unstable queries'))
|
||||
print(tableHeader(columns))
|
||||
text = tableStart('Unstable queries')
|
||||
text += tableHeader(columns)
|
||||
|
||||
attrs = ['' for c in columns]
|
||||
attrs[4] = None
|
||||
for r in unstable_rows:
|
||||
anchor = f'{currentTableAnchor()}.{r[5]}.{r[6]}'
|
||||
if int(r[4]):
|
||||
very_unstable_queries += 1
|
||||
attrs[3] = f'style="background: {color_bad}"'
|
||||
else:
|
||||
attrs[3] = ''
|
||||
|
||||
print(tableRow(r, attrs))
|
||||
text += tableRow(r, attrs, anchor)
|
||||
|
||||
print(tableEnd())
|
||||
text += tableEnd()
|
||||
tables.append(text)
|
||||
|
||||
print_unstable_queries()
|
||||
add_unstable_queries()
|
||||
|
||||
skipped_tests_rows = tsvRows('analyze/skipped-tests.tsv')
|
||||
printSimpleTable('Skipped tests', ['Test', 'Reason'], skipped_tests_rows)
|
||||
addSimpleTable('Skipped tests', ['Test', 'Reason'], skipped_tests_rows)
|
||||
|
||||
printSimpleTable('Test performance changes',
|
||||
addSimpleTable('Test performance changes',
|
||||
['Test', 'Queries', 'Unstable', 'Changed perf', 'Total not OK', 'Avg relative time diff'],
|
||||
tsvRows('report/test-perf-changes.tsv'))
|
||||
|
||||
def print_test_times():
|
||||
global slow_average_tests
|
||||
def add_test_times():
|
||||
global slow_average_tests, tables
|
||||
rows = tsvRows('report/test-times.tsv')
|
||||
if not rows:
|
||||
return
|
||||
@ -403,8 +471,8 @@ if args.report == 'main':
|
||||
'Shortest query<br>(sum for all runs), s', #7
|
||||
]
|
||||
|
||||
print(tableStart('Test times'))
|
||||
print(tableHeader(columns))
|
||||
text = tableStart('Test times')
|
||||
text += tableHeader(columns)
|
||||
|
||||
nominal_runs = 13 # FIXME pass this as an argument
|
||||
total_runs = (nominal_runs + 1) * 2 # one prewarm run, two servers
|
||||
@ -414,22 +482,25 @@ if args.report == 'main':
|
||||
# FIXME should be 15s max -- investigate parallel_insert
|
||||
slow_average_tests += 1
|
||||
attrs[6] = f'style="background: {color_bad}"'
|
||||
errors_explained.append([f'<a href="./all-queries.html#all-query-times.{r[0]}.0">The test \'{r[0]}\' is too slow to run as a whole. Investigate whether the create and fill queries can be sped up'])
|
||||
else:
|
||||
attrs[6] = ''
|
||||
|
||||
if float(r[5]) > allowed_single_run_time * total_runs:
|
||||
slow_average_tests += 1
|
||||
attrs[5] = f'style="background: {color_bad}"'
|
||||
errors_explained.append([f'<a href="./all-queries.html#all-query-times.{r[0]}.0">Some query of the test \'{r[0]}\' is too slow to run. See the all queries report'])
|
||||
else:
|
||||
attrs[5] = ''
|
||||
|
||||
print(tableRow(r, attrs))
|
||||
text += tableRow(r, attrs)
|
||||
|
||||
print(tableEnd())
|
||||
text += tableEnd()
|
||||
tables.append(text)
|
||||
|
||||
print_test_times()
|
||||
add_test_times()
|
||||
|
||||
def print_benchmark_results():
|
||||
def add_benchmark_results():
|
||||
if not os.path.isfile('benchmark/website-left.json'):
|
||||
return
|
||||
|
||||
@ -479,26 +550,33 @@ if args.report == 'main':
|
||||
|
||||
all_rows.append([row, attrs])
|
||||
|
||||
print(tableStart('Concurrent benchmarks'))
|
||||
print(tableHeader(header))
|
||||
text = tableStart('Concurrent benchmarks')
|
||||
text += tableHeader(header)
|
||||
for row, attrs in all_rows:
|
||||
print(tableRow(row, attrs))
|
||||
print(tableEnd())
|
||||
text += tableRow(row, attrs)
|
||||
text += tableEnd()
|
||||
|
||||
global tables
|
||||
tables.append(text)
|
||||
|
||||
try:
|
||||
print_benchmark_results()
|
||||
add_benchmark_results()
|
||||
except:
|
||||
report_errors.append(
|
||||
traceback.format_exception_only(
|
||||
*sys.exc_info()[:2])[-1])
|
||||
pass
|
||||
|
||||
printSimpleTable('Metric changes',
|
||||
addSimpleTable('Metric changes',
|
||||
['Metric', 'Old median value', 'New median value',
|
||||
'Relative difference', 'Times difference'],
|
||||
tsvRows('metrics/changes.tsv'))
|
||||
|
||||
print_report_errors()
|
||||
add_report_errors()
|
||||
add_errors_explained()
|
||||
|
||||
for t in tables:
|
||||
print(t)
|
||||
|
||||
print("""
|
||||
<p class="links">
|
||||
@ -559,9 +637,9 @@ elif args.report == 'all-queries':
|
||||
|
||||
print(header_template.format())
|
||||
|
||||
print_tested_commits()
|
||||
add_tested_commits()
|
||||
|
||||
def print_all_queries():
|
||||
def add_all_queries():
|
||||
rows = tsvRows('report/all-queries.tsv')
|
||||
if not rows:
|
||||
return
|
||||
@ -579,13 +657,14 @@ elif args.report == 'all-queries':
|
||||
'Query', #9
|
||||
]
|
||||
|
||||
print(tableStart('All query times'))
|
||||
print(tableHeader(columns))
|
||||
text = tableStart('All query times')
|
||||
text += tableHeader(columns)
|
||||
|
||||
attrs = ['' for c in columns]
|
||||
attrs[0] = None
|
||||
attrs[1] = None
|
||||
for r in rows:
|
||||
anchor = f'{currentTableAnchor()}.{r[7]}.{r[8]}'
|
||||
if int(r[1]):
|
||||
attrs[6] = f'style="background: {color_bad}"'
|
||||
else:
|
||||
@ -606,13 +685,15 @@ elif args.report == 'all-queries':
|
||||
attrs[2] = ''
|
||||
attrs[3] = ''
|
||||
|
||||
print(tableRow(r, attrs))
|
||||
text += tableRow(r, attrs, anchor)
|
||||
|
||||
print(tableEnd())
|
||||
text += tableEnd()
|
||||
tables.append(text)
|
||||
|
||||
print_all_queries()
|
||||
|
||||
print_report_errors()
|
||||
add_all_queries()
|
||||
add_report_errors()
|
||||
for t in tables:
|
||||
print(t)
|
||||
|
||||
print("""
|
||||
<p class="links">
|
||||
|
@ -166,7 +166,7 @@ For more information, see the section [“Command-line client”](../interfaces/
|
||||
|
||||
Example:
|
||||
|
||||
``` bash
|
||||
```
|
||||
$ ./clickhouse-client
|
||||
ClickHouse client version 0.0.18749.
|
||||
Connecting to localhost:9000.
|
||||
|
@ -48,13 +48,13 @@ The following settings are also enforced:
|
||||
HTTPS endpoint example with `curl`:
|
||||
|
||||
``` bash
|
||||
curl "https://play-api.clickhouse.tech:8443/?query=SELECT+'Play+ClickHouse!';&user=playground&password=clickhouse&database=datasets"
|
||||
curl "https://play-api.clickhouse.tech:8443/?query=SELECT+'Play+ClickHouse\!';&user=playground&password=clickhouse&database=datasets"
|
||||
```
|
||||
|
||||
TCP endpoint example with [CLI](../interfaces/cli.md):
|
||||
|
||||
``` bash
|
||||
clickhouse client --secure -h play-api.clickhouse.tech --port 9440 -u playground --password clickhouse -q "SELECT 'Play ClickHouse!'"
|
||||
clickhouse client --secure -h play-api.clickhouse.tech --port 9440 -u playground --password clickhouse -q "SELECT 'Play ClickHouse\!'"
|
||||
```
|
||||
|
||||
## Implementation Details {#implementation-details}
|
||||
|
8
docs/en/interfaces/third-party/gui.md
vendored
8
docs/en/interfaces/third-party/gui.md
vendored
@ -65,6 +65,14 @@ Features:
|
||||
- Database explorer.
|
||||
- Visualization tools, that allow you to represent data in different forms.
|
||||
|
||||
### Grafana {#grafana}
|
||||
|
||||
[Grafana](https://grafana.com/grafana/plugins/vertamedia-clickhouse-datasource) is a platform for monitoring and visualization.
|
||||
|
||||
"Grafana allows you to query, visualize, alert on and understand your metrics no matter where they are stored. Create, explore, and share dashboards with your team and foster a data driven culture. Trusted and loved by the community" — grafana.com.
|
||||
|
||||
ClickHouse datasource plugin provides a support for ClickHouse as a backend database.
|
||||
|
||||
### DBeaver {#dbeaver}
|
||||
|
||||
[DBeaver](https://dbeaver.io/) - universal desktop database client with ClickHouse support.
|
||||
|
@ -1,4 +1,4 @@
|
||||
## system.asynchronous\_metric\_log {#system-tables-async-log}
|
||||
## system.asynchronous_metric_log {#system-tables-async-log}
|
||||
|
||||
Contains the historical values for `system.asynchronous_metrics`, which are saved once per minute. This feature is enabled by default.
|
||||
|
||||
@ -32,5 +32,7 @@ SELECT * FROM system.asynchronous_metric_log LIMIT 10
|
||||
|
||||
**See Also**
|
||||
|
||||
- [system.asynchronous\_metrics](../system-tables/asynchronous_metrics.md) — Contains metrics that are calculated periodically in the background.
|
||||
- [system.asynchronous_metrics](../system-tables/asynchronous_metrics.md) — Contains metrics that are calculated periodically in the background.
|
||||
- [system.metric_log](../system-tables/metric_log.md) — Contains history of metrics values from tables `system.metrics` and `system.events`, periodically flushed to disk.
|
||||
|
||||
[Original article](https://clickhouse.tech/docs/en/operations/system_tables/asynchronous_metric_log) <!--hide-->
|
||||
|
@ -34,3 +34,5 @@ SELECT * FROM system.asynchronous_metrics LIMIT 10
|
||||
- [system.metrics](../../operations/system-tables/metrics.md#system_tables-metrics) — Contains instantly calculated metrics.
|
||||
- [system.events](../../operations/system-tables/events.md#system_tables-events) — Contains a number of events that have occurred.
|
||||
- [system.metric\_log](../../operations/system-tables/metric_log.md#system_tables-metric_log) — Contains a history of metrics values from tables `system.metrics` и `system.events`.
|
||||
|
||||
[Original article](https://clickhouse.tech/docs/en/operations/system_tables/asynchronous_metrics) <!--hide-->
|
@ -22,3 +22,5 @@ Please note that `errors_count` is updated once per query to the cluster, but `e
|
||||
- [Table engine Distributed](../../engines/table-engines/special/distributed.md)
|
||||
- [distributed\_replica\_error\_cap setting](../../operations/settings/settings.md#settings-distributed_replica_error_cap)
|
||||
- [distributed\_replica\_error\_half\_life setting](../../operations/settings/settings.md#settings-distributed_replica_error_half_life)
|
||||
|
||||
[Original article](https://clickhouse.tech/docs/en/operations/system_tables/clusters) <!--hide-->
|
||||
|
@ -20,3 +20,5 @@ The `system.columns` table contains the following columns (the column type is sh
|
||||
- `is_in_sorting_key` (UInt8) — Flag that indicates whether the column is in the sorting key expression.
|
||||
- `is_in_primary_key` (UInt8) — Flag that indicates whether the column is in the primary key expression.
|
||||
- `is_in_sampling_key` (UInt8) — Flag that indicates whether the column is in the sampling key expression.
|
||||
|
||||
[Original article](https://clickhouse.tech/docs/en/operations/system_tables/columns) <!--hide-->
|
||||
|
@ -38,3 +38,4 @@ SELECT * FROM system.contributors WHERE name = 'Olga Khvostikova'
|
||||
│ Olga Khvostikova │
|
||||
└──────────────────┘
|
||||
```
|
||||
[Original article](https://clickhouse.tech/docs/en/operations/system_tables/contributors) <!--hide-->
|
||||
|
@ -1,4 +1,5 @@
|
||||
#system.current_roles {#system_tables-current_roles}
|
||||
# system.current_roles {#system_tables-current_roles}
|
||||
|
||||
Contains active roles of a current user. `SET ROLE` changes the contents of this table.
|
||||
|
||||
Columns:
|
||||
@ -7,4 +8,4 @@ Columns:
|
||||
- `with_admin_option` ([UInt8](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Flag that shows whether `current_role` is a role with `ADMIN OPTION` privilege.
|
||||
- `is_default` ([UInt8](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Flag that shows whether `current_role` is a default role.
|
||||
|
||||
[Original article](https://clickhouse.tech/docs/en/operations/system_tables/current-roles) <!--hide-->
|
||||
[Original article](https://clickhouse.tech/docs/en/operations/system_tables/current-roles) <!--hide-->
|
||||
|
@ -1,4 +1,4 @@
|
||||
# system.data\_type\_families {#system_tables-data_type_families}
|
||||
# system.data_type_families {#system_tables-data_type_families}
|
||||
|
||||
Contains information about supported [data types](../../sql-reference/data-types/).
|
||||
|
||||
@ -32,3 +32,5 @@ SELECT * FROM system.data_type_families WHERE alias_to = 'String'
|
||||
**See Also**
|
||||
|
||||
- [Syntax](../../sql-reference/syntax.md) — Information about supported syntax.
|
||||
|
||||
[Original article](https://clickhouse.tech/docs/en/operations/system_tables/data_type_families) <!--hide-->
|
||||
|
@ -5,3 +5,5 @@ This table contains a single String column called ‘name’ – the name of a d
|
||||
Each database that the server knows about has a corresponding entry in the table.
|
||||
|
||||
This system table is used for implementing the `SHOW DATABASES` query.
|
||||
|
||||
[Original article](https://clickhouse.tech/docs/en/operations/system_tables/databases) <!--hide-->
|
@ -1,4 +1,4 @@
|
||||
# system.detached\_parts {#system_tables-detached_parts}
|
||||
# system.detached_parts {#system_tables-detached_parts}
|
||||
|
||||
Contains information about detached parts of [MergeTree](../../engines/table-engines/mergetree-family/mergetree.md) tables. The `reason` column specifies why the part was detached.
|
||||
|
||||
@ -7,3 +7,5 @@ For user-detached parts, the reason is empty. Such parts can be attached with [A
|
||||
For the description of other columns, see [system.parts](../../operations/system-tables/parts.md#system_tables-parts).
|
||||
|
||||
If part name is invalid, values of some columns may be `NULL`. Such parts can be deleted with [ALTER TABLE DROP DETACHED PART](../../sql-reference/statements/alter/partition.md#alter_drop-detached).
|
||||
|
||||
[Original article](https://clickhouse.tech/docs/en/operations/system_tables/detached_parts) <!--hide-->
|
||||
|
@ -59,3 +59,5 @@ SELECT * FROM system.dictionaries
|
||||
│ dictdb │ dict │ LOADED │ dictdb.dict │ Flat │ UInt64 │ ['value_default','value_expression'] │ ['String','String'] │ 74032 │ 0 │ 1 │ 1 │ 0.0004887585532746823 │ ClickHouse: dictdb.dicttbl │ 0 │ 1 │ 2020-03-04 04:17:34 │ 2020-03-04 04:30:34 │ 0.002 │ │
|
||||
└──────────┴──────┴────────┴─────────────┴──────┴────────┴──────────────────────────────────────┴─────────────────────┴─────────────────┴─────────────┴──────────┴───────────────┴───────────────────────┴────────────────────────────┴──────────────┴──────────────┴─────────────────────┴──────────────────────────────┘───────────────────────┴────────────────┘
|
||||
```
|
||||
|
||||
[Original article](https://clickhouse.tech/docs/en/operations/system_tables/dictionaries) <!--hide-->
|
@ -10,17 +10,4 @@ Columns:
|
||||
- `total_space` ([UInt64](../../sql-reference/data-types/int-uint.md)) — Disk volume in bytes.
|
||||
- `keep_free_space` ([UInt64](../../sql-reference/data-types/int-uint.md)) — Amount of disk space that should stay free on disk in bytes. Defined in the `keep_free_space_bytes` parameter of disk configuration.
|
||||
|
||||
## system.storage\_policies {#system_tables-storage_policies}
|
||||
|
||||
Contains information about storage policies and volumes defined in the [server configuration](../../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-multiple-volumes_configure).
|
||||
|
||||
Columns:
|
||||
|
||||
- `policy_name` ([String](../../sql-reference/data-types/string.md)) — Name of the storage policy.
|
||||
- `volume_name` ([String](../../sql-reference/data-types/string.md)) — Volume name defined in the storage policy.
|
||||
- `volume_priority` ([UInt64](../../sql-reference/data-types/int-uint.md)) — Volume order number in the configuration.
|
||||
- `disks` ([Array(String)](../../sql-reference/data-types/array.md)) — Disk names, defined in the storage policy.
|
||||
- `max_data_part_size` ([UInt64](../../sql-reference/data-types/int-uint.md)) — Maximum size of a data part that can be stored on volume disks (0 — no limit).
|
||||
- `move_factor` ([Float64](../../sql-reference/data-types/float.md)) — Ratio of free disk space. When the ratio exceeds the value of configuration parameter, ClickHouse start to move data to the next volume in order.
|
||||
|
||||
If the storage policy contains more then one volume, then information for each volume is stored in the individual row of the table.
|
||||
[Original article](https://clickhouse.tech/docs/en/operations/system_tables/disks) <!--hide-->
|
||||
|
@ -1,4 +1,5 @@
|
||||
#system.enabled_roles {#system_tables-enabled_roles}
|
||||
# system.enabled_roles {#system_tables-enabled_roles}
|
||||
|
||||
Contains all active roles at the moment, including current role of the current user and granted roles for current role.
|
||||
|
||||
Columns:
|
||||
|
@ -30,3 +30,5 @@ SELECT * FROM system.events LIMIT 5
|
||||
- [system.metrics](../../operations/system-tables/metrics.md#system_tables-metrics) — Contains instantly calculated metrics.
|
||||
- [system.metric\_log](../../operations/system-tables/metric_log.md#system_tables-metric_log) — Contains a history of metrics values from tables `system.metrics` и `system.events`.
|
||||
- [Monitoring](../../operations/monitoring.md) — Base concepts of ClickHouse monitoring.
|
||||
|
||||
[Original article](https://clickhouse.tech/docs/en/operations/system_tables/events) <!--hide-->
|
||||
|
@ -6,3 +6,5 @@ Columns:
|
||||
|
||||
- `name`(`String`) – The name of the function.
|
||||
- `is_aggregate`(`UInt8`) — Whether the function is aggregate.
|
||||
|
||||
[Original article](https://clickhouse.tech/docs/en/operations/system_tables/functions) <!--hide-->
|
||||
|
@ -1,4 +1,4 @@
|
||||
# system.graphite\_retentions {#system-graphite-retentions}
|
||||
# system.graphite_retentions {#system-graphite-retentions}
|
||||
|
||||
Contains information about parameters [graphite\_rollup](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-graphite) which are used in tables with [\*GraphiteMergeTree](../../engines/table-engines/mergetree-family/graphitemergetree.md) engines.
|
||||
|
||||
@ -13,3 +13,5 @@ Columns:
|
||||
- `is_default` (UInt8) - Whether the pattern is the default.
|
||||
- `Tables.database` (Array(String)) - Array of names of database tables that use the `config_name` parameter.
|
||||
- `Tables.table` (Array(String)) - Array of table names that use the `config_name` parameter.
|
||||
|
||||
[Original article](https://clickhouse.tech/docs/en/operations/system_tables/graphite_retentions) <!--hide-->
|
||||
|
@ -1,4 +1,4 @@
|
||||
# system.merge\_tree\_settings {#system-merge_tree_settings}
|
||||
# system.merge_tree_settings {#system-merge_tree_settings}
|
||||
|
||||
Contains information about settings for `MergeTree` tables.
|
||||
|
||||
@ -9,3 +9,5 @@ Columns:
|
||||
- `description` (String) — Setting description.
|
||||
- `type` (String) — Setting type (implementation specific string value).
|
||||
- `changed` (UInt8) — Whether the setting was explicitly defined in the config or explicitly changed.
|
||||
|
||||
[Original article](https://clickhouse.tech/docs/en/operations/system_tables/merge_tree_settings) <!--hide-->
|
||||
|
@ -17,3 +17,5 @@ Columns:
|
||||
- `rows_read` (UInt64) — Number of rows read.
|
||||
- `bytes_written_uncompressed` (UInt64) — Number of bytes written, uncompressed.
|
||||
- `rows_written` (UInt64) — Number of rows written.
|
||||
|
||||
[Original article](https://clickhouse.tech/docs/en/operations/system_tables/merges) <!--hide-->
|
||||
|
@ -1,4 +1,4 @@
|
||||
# system.metric\_log {#system_tables-metric_log}
|
||||
# system.metric_log {#system_tables-metric_log}
|
||||
|
||||
Contains history of metrics values from tables `system.metrics` and `system.events`, periodically flushed to disk.
|
||||
To turn on metrics history collection on `system.metric_log`, create `/etc/clickhouse-server/config.d/metric_log.xml` with following content:
|
||||
@ -53,3 +53,5 @@ CurrentMetric_ReplicatedChecks: 0
|
||||
- [system.events](../../operations/system-tables/events.md#system_tables-events) — Contains a number of events that occurred.
|
||||
- [system.metrics](../../operations/system-tables/metrics.md) — Contains instantly calculated metrics.
|
||||
- [Monitoring](../../operations/monitoring.md) — Base concepts of ClickHouse monitoring.
|
||||
|
||||
[Original article](https://clickhouse.tech/docs/en/operations/system_tables/metric_log) <!--hide-->
|
||||
|
@ -37,3 +37,5 @@ SELECT * FROM system.metrics LIMIT 10
|
||||
- [system.events](../../operations/system-tables/events.md#system_tables-events) — Contains a number of events that occurred.
|
||||
- [system.metric\_log](../../operations/system-tables/metric_log.md#system_tables-metric_log) — Contains a history of metrics values from tables `system.metrics` и `system.events`.
|
||||
- [Monitoring](../../operations/monitoring.md) — Base concepts of ClickHouse monitoring.
|
||||
|
||||
[Original article](https://clickhouse.tech/docs/en/operations/system_tables/metrics) <!--hide-->
|
||||
|
@ -43,4 +43,6 @@ If there were problems with mutating some data parts, the following columns cont
|
||||
|
||||
- [Mutations](../../sql-reference/statements/alter/index.md#mutations)
|
||||
- [MergeTree](../../engines/table-engines/mergetree-family/mergetree.md) table engine
|
||||
- [ReplicatedMergeTree](../../engines/table-engines/mergetree-family/replication.md) family
|
||||
- [ReplicatedMergeTree](../../engines/table-engines/mergetree-family/replication.md) family
|
||||
|
||||
[Original article](https://clickhouse.tech/docs/en/operations/system_tables/mutations) <!--hide-->
|
||||
|
@ -5,3 +5,5 @@ This table contains a single UInt64 column named `number` that contains almost a
|
||||
You can use this table for tests, or if you need to do a brute force search.
|
||||
|
||||
Reads from this table are not parallelized.
|
||||
|
||||
[Original article](https://clickhouse.tech/docs/en/operations/system_tables/numbers) <!--hide-->
|
||||
|
@ -1,5 +1,7 @@
|
||||
# system.numbers\_mt {#system-numbers-mt}
|
||||
# system.numbers_mt {#system-numbers-mt}
|
||||
|
||||
The same as [system.numbers](../../operations/system-tables/numbers.md) but reads are parallelized. The numbers can be returned in any order.
|
||||
|
||||
Used for tests.
|
||||
|
||||
[Original article](https://clickhouse.tech/docs/en/operations/system_tables/numbers_mt) <!--hide-->
|
||||
|
@ -5,3 +5,5 @@ This table contains a single row with a single `dummy` UInt8 column containing t
|
||||
This table is used if a `SELECT` query doesn’t specify the `FROM` clause.
|
||||
|
||||
This is similar to the `DUAL` table found in other DBMSs.
|
||||
|
||||
[Original article](https://clickhouse.tech/docs/en/operations/system_tables/one) <!--hide-->
|
||||
|
@ -1,4 +1,4 @@
|
||||
# system.part\_log {#system_tables-part-log}
|
||||
# system.part_log {#system_tables-part-log}
|
||||
|
||||
The `system.part_log` table is created only if the [part\_log](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-part-log) server setting is specified.
|
||||
|
||||
@ -30,3 +30,5 @@ The `system.part_log` table contains the following columns:
|
||||
- `exception` (String) — Text message of the occurred error.
|
||||
|
||||
The `system.part_log` table is created after the first inserting data to the `MergeTree` table.
|
||||
|
||||
[Original article](https://clickhouse.tech/docs/en/operations/system_tables/part_log) <!--hide-->
|
||||
|
@ -153,4 +153,6 @@ move_ttl_info.max: []
|
||||
**See Also**
|
||||
|
||||
- [MergeTree family](../../engines/table-engines/mergetree-family/mergetree.md)
|
||||
- [TTL for Columns and Tables](../../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-ttl)
|
||||
- [TTL for Columns and Tables](../../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-ttl)
|
||||
|
||||
[Original article](https://clickhouse.tech/docs/en/operations/system_tables/parts) <!--hide-->
|
||||
|
@ -13,3 +13,5 @@ Columns:
|
||||
- `memory_usage` (UInt64) – Amount of RAM the request uses. It might not include some types of dedicated memory. See the [max\_memory\_usage](../../operations/settings/query-complexity.md#settings_max_memory_usage) setting.
|
||||
- `query` (String) – The query text. For `INSERT`, it doesn’t include the data to insert.
|
||||
- `query_id` (String) – Query ID, if defined.
|
||||
|
||||
[Original article](https://clickhouse.tech/docs/en/operations/system_tables/processes) <!--hide-->
|
||||
|
@ -1,4 +1,4 @@
|
||||
# system.query\_log {#system_tables-query_log}
|
||||
# system.query_log {#system_tables-query_log}
|
||||
|
||||
Contains information about executed queries, for example, start time, duration of processing, error messages.
|
||||
|
||||
@ -136,3 +136,6 @@ Settings.Values: ['0','random','1','10000000000']
|
||||
**See Also**
|
||||
|
||||
- [system.query\_thread\_log](../../operations/system-tables/query_thread_log.md#system_tables-query_thread_log) — This table contains information about each query execution thread.
|
||||
|
||||
[Original article](https://clickhouse.tech/docs/en/operations/system_tables/query_log) <!--hide-->
|
||||
|
||||
|
@ -1,4 +1,4 @@
|
||||
# system.query\_thread\_log {#system_tables-query_thread_log}
|
||||
# system.query_thread_log {#system_tables-query_thread_log}
|
||||
|
||||
Contains information about threads which execute queries, for example, thread name, thread start time, duration of query processing.
|
||||
|
||||
@ -111,3 +111,5 @@ ProfileEvents.Values: [1,97,81,5,81]
|
||||
**See Also**
|
||||
|
||||
- [system.query\_log](../../operations/system-tables/query_log.md#system_tables-query_log) — Description of the `query_log` system table which contains common information about queries execution.
|
||||
|
||||
[Original article](https://clickhouse.tech/docs/en/operations/system_tables/query_thread_log) <!--hide-->
|
||||
|
@ -1,4 +1,4 @@
|
||||
# system.quota\_limits {#system_tables-quota_limits}
|
||||
# system.quota_limits {#system_tables-quota_limits}
|
||||
|
||||
Contains information about maximums for all intervals of all quotas. Any number of rows or zero can correspond to one quota.
|
||||
|
||||
@ -15,3 +15,5 @@ Columns:
|
||||
- `max_read_rows` ([Nullable](../../sql-reference/data-types/nullable.md)([UInt64](../../sql-reference/data-types/int-uint.md))) — Maximum number of rows read from all tables and table functions participated in queries.
|
||||
- `max_read_bytes` ([Nullable](../../sql-reference/data-types/nullable.md)([UInt64](../../sql-reference/data-types/int-uint.md))) — Maximum number of bytes read from all tables and table functions participated in queries.
|
||||
- `max_execution_time` ([Nullable](../../sql-reference/data-types/nullable.md)([Float64](../../sql-reference/data-types/float.md))) — Maximum of the query execution time, in seconds.
|
||||
|
||||
[Original article](https://clickhouse.tech/docs/en/operations/system_tables/quota_limits) <!--hide-->
|
||||
|
@ -1,4 +1,4 @@
|
||||
# system.quota\_usage {#system_tables-quota_usage}
|
||||
# system.quota_usage {#system_tables-quota_usage}
|
||||
|
||||
Quota usage by the current user: how much is used and how much is left.
|
||||
|
||||
@ -22,3 +22,5 @@ Columns:
|
||||
- `max_read_bytes` ([Nullable](../../sql-reference/data-types/nullable.md)([UInt64](../../sql-reference/data-types/int-uint.md))) — Maximum of bytes read from all tables and table functions.
|
||||
- `execution_time` ([Nullable](../../sql-reference/data-types/nullable.md)([Float64](../../sql-reference/data-types/float.md))) — The total query execution time, in seconds (wall time).
|
||||
- `max_execution_time` ([Nullable](../../sql-reference/data-types/nullable.md)([Float64](../../sql-reference/data-types/float.md))) — Maximum of query execution time.
|
||||
|
||||
[Original article](https://clickhouse.tech/docs/en/operations/system_tables/quota_usage) <!--hide-->
|
||||
|
@ -19,3 +19,6 @@ Columns:
|
||||
- `1` — The quota applies to all users except those listed in `apply_to_except`.
|
||||
- `apply_to_list` ([Array](../../sql-reference/data-types/array.md)([String](../../sql-reference/data-types/string.md))) — List of user names/[roles](../../operations/access-rights.md#role-management) that the quota should be applied to.
|
||||
- `apply_to_except` ([Array](../../sql-reference/data-types/array.md)([String](../../sql-reference/data-types/string.md))) — List of user names/roles that the quota should not apply to.
|
||||
|
||||
[Original article](https://clickhouse.tech/docs/en/operations/system_tables/quotas) <!--hide-->
|
||||
|
||||
|
@ -1,4 +1,4 @@
|
||||
# system.quotas\_usage {#system_tables-quotas_usage}
|
||||
# system.quotas_usage {#system_tables-quotas_usage}
|
||||
|
||||
Quota usage by all users.
|
||||
|
||||
@ -23,3 +23,5 @@ Columns:
|
||||
- `max_read_bytes` ([Nullable](../../sql-reference/data-types/nullable.md)([UInt64](../../sql-reference/data-types/int-uint.md))) — Maximum of bytes read from all tables and table functions.
|
||||
- `execution_time` ([Nullable](../../sql-reference/data-types/nullable.md)([Float64](../../sql-reference/data-types/float.md))) — The total query execution time, in seconds (wall time).
|
||||
- `max_execution_time` ([Nullable](../../sql-reference/data-types/nullable.md)([Float64](../../sql-reference/data-types/float.md))) — Maximum of query execution time.
|
||||
|
||||
[Original article](https://clickhouse.tech/docs/en/operations/system_tables/quotas_usage) <!--hide-->
|
||||
|
@ -119,3 +119,6 @@ WHERE
|
||||
```
|
||||
|
||||
If this query doesn’t return anything, it means that everything is fine.
|
||||
|
||||
[Original article](https://clickhouse.tech/docs/en/operations/system_tables/replicas) <!--hide-->
|
||||
|
||||
|
@ -1,4 +1,5 @@
|
||||
#system.role_grants {#system_tables-role_grants}
|
||||
|
||||
Contains the role grants for users and roles. To add entries to this table, use `GRANT role TO user`.
|
||||
|
||||
Columns:
|
||||
|
@ -1,4 +1,5 @@
|
||||
#system.roles {#system_tables-roles}
|
||||
# system.roles {#system_tables-roles}
|
||||
|
||||
Contains information about configured [roles](../../operations/access-rights.md#role-management).
|
||||
|
||||
Columns:
|
||||
@ -7,4 +8,4 @@ Columns:
|
||||
- `id` ([UUID](../../sql-reference/data-types/uuid.md)) — Role ID.
|
||||
- `storage` ([String](../../sql-reference/data-types/string.md)) — Path to the storage of roles. Configured in the `access_control_path` parameter.
|
||||
|
||||
[Original article](https://clickhouse.tech/docs/en/operations/system_tables/roles) <!--hide-->
|
||||
[Original article](https://clickhouse.tech/docs/en/operations/system_tables/roles) <!--hide-->
|
||||
|
@ -48,3 +48,5 @@ SELECT * FROM system.settings WHERE changed AND name='load_balancing'
|
||||
- [Settings](../../operations/settings/index.md#session-settings-intro)
|
||||
- [Permissions for Queries](../../operations/settings/permissions-for-queries.md#settings_readonly)
|
||||
- [Constraints on Settings](../../operations/settings/constraints-on-settings.md)
|
||||
|
||||
[Original article](https://clickhouse.tech/docs/en/operations/system_tables/settings) <!--hide-->
|
||||
|
@ -1,4 +1,4 @@
|
||||
# system.storage\_policies {#system_tables-storage_policies}
|
||||
# system.storage_policies {#system_tables-storage_policies}
|
||||
|
||||
Contains information about storage policies and volumes defined in the [server configuration](../../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-multiple-volumes_configure).
|
||||
|
||||
@ -12,3 +12,5 @@ Columns:
|
||||
- `move_factor` ([Float64](../../sql-reference/data-types/float.md)) — Ratio of free disk space. When the ratio exceeds the value of configuration parameter, ClickHouse start to move data to the next volume in order.
|
||||
|
||||
If the storage policy contains more then one volume, then information for each volume is stored in the individual row of the table.
|
||||
|
||||
[Original article](https://clickhouse.tech/docs/en/operations/system_tables/storage_policies) <!--hide-->
|
||||
|
@ -1,4 +1,4 @@
|
||||
# system.table\_engines {#system-table-engines}
|
||||
# system.table_engines {#system-table-engines}
|
||||
|
||||
Contains description of table engines supported by server and their feature support information.
|
||||
|
||||
@ -33,3 +33,5 @@ WHERE name in ('Kafka', 'MergeTree', 'ReplicatedCollapsingMergeTree')
|
||||
- MergeTree family [query clauses](../../engines/table-engines/mergetree-family/mergetree.md#mergetree-query-clauses)
|
||||
- Kafka [settings](../../engines/table-engines/integrations/kafka.md#table_engine-kafka-creating-a-table)
|
||||
- Join [settings](../../engines/table-engines/special/join.md#join-limitations-and-settings)
|
||||
|
||||
[Original article](https://clickhouse.tech/docs/en/operations/system_tables/table_engines) <!--hide-->
|
||||
|
@ -51,3 +51,5 @@ This table contains the following columns (the column type is shown in brackets)
|
||||
- `lifetime_bytes` (Nullable(UInt64)) - Total number of bytes INSERTed since server start (only for `Buffer` tables).
|
||||
|
||||
The `system.tables` table is used in `SHOW TABLES` query implementation.
|
||||
|
||||
[Original article](https://clickhouse.tech/docs/en/operations/system_tables/tables) <!--hide-->
|
||||
|
@ -1,4 +1,4 @@
|
||||
# system.text\_log {#system-tables-text-log}
|
||||
# system.text_log {#system-tables-text-log}
|
||||
|
||||
Contains logging entries. Logging level which goes to this table can be limited with `text_log.level` server setting.
|
||||
|
||||
@ -24,3 +24,5 @@ Columns:
|
||||
- `revision` (UInt32) — ClickHouse revision.
|
||||
- `source_file` (LowCardinality(String)) — Source file from which the logging was done.
|
||||
- `source_line` (UInt64) — Source line from which the logging was done.
|
||||
|
||||
[Original article](https://clickhouse.tech/docs/en/operations/system_tables/text_log) <!--hide-->
|
@ -1,4 +1,4 @@
|
||||
# system.trace\_log {#system_tables-trace_log}
|
||||
# system.trace_log {#system_tables-trace_log}
|
||||
|
||||
Contains stack traces collected by the sampling query profiler.
|
||||
|
||||
@ -46,3 +46,5 @@ thread_number: 48
|
||||
query_id: acc4d61f-5bd1-4a3e-bc91-2180be37c915
|
||||
trace: [94222141367858,94222152240175,94222152325351,94222152329944,94222152330796,94222151449980,94222144088167,94222151682763,94222144088167,94222151682763,94222144088167,94222144058283,94222144059248,94222091840750,94222091842302,94222091831228,94222189631488,140509950166747,140509942945935]
|
||||
```
|
||||
|
||||
[Original article](https://clickhouse.tech/docs/en/operations/system_tables/trace_log) <!--hide-->
|
@ -68,3 +68,4 @@ numChildren: 7
|
||||
pzxid: 987021252247
|
||||
path: /clickhouse/tables/01-08/visits/replicas
|
||||
```
|
||||
[Original article](https://clickhouse.tech/docs/en/operations/system_tables/zookeeper) <!--hide-->
|
||||
|
@ -28,7 +28,7 @@ ClickHouse-specific aggregate functions:
|
||||
- [argMin](../../../sql-reference/aggregate-functions/reference/argmin.md)
|
||||
- [argMax](../../../sql-reference/aggregate-functions/reference/argmax.md)
|
||||
- [avgWeighted](../../../sql-reference/aggregate-functions/reference/avgweighted.md)
|
||||
- [topK](../../../sql-reference/aggregate-functions/reference/topkweighted.md)
|
||||
- [topK](../../../sql-reference/aggregate-functions/reference/topk.md)
|
||||
- [topKWeighted](../../../sql-reference/aggregate-functions/reference/topkweighted.md)
|
||||
- [groupArray](../../../sql-reference/aggregate-functions/reference/grouparray.md)
|
||||
- [groupUniqArray](../../../sql-reference/aggregate-functions/reference/groupuniqarray.md)
|
||||
|
@ -277,8 +277,4 @@ SYSTEM RESTART REPLICA [db.]replicated_merge_tree_family_table_name
|
||||
|
||||
Provides possibility to reinitialize Zookeeper sessions state for all `ReplicatedMergeTree` tables, will compare current state with Zookeeper as source of true and add tasks to Zookeeper queue if needed
|
||||
|
||||
``` sql
|
||||
SYSTEM RESTART QUEUES [db.]replicated_merge_tree_family_table_name
|
||||
```
|
||||
|
||||
[Original article](https://clickhouse.tech/docs/en/query_language/system/) <!--hide-->
|
||||
|
@ -60,7 +60,7 @@ Constructions with `{}` are similar to the [remote table function](../../sql-ref
|
||||
- ‘hdfs://hdfs1:9000/another\_dir/some\_file\_2’
|
||||
- ‘hdfs://hdfs1:9000/another\_dir/some\_file\_3’
|
||||
|
||||
1. Query the amount of rows in these files:
|
||||
2. Query the amount of rows in these files:
|
||||
|
||||
<!-- -->
|
||||
|
||||
@ -69,7 +69,7 @@ SELECT count(*)
|
||||
FROM hdfs('hdfs://hdfs1:9000/{some,another}_dir/some_file_{1..3}', 'TSV', 'name String, value UInt32')
|
||||
```
|
||||
|
||||
1. Query the amount of rows in all files of these two directories:
|
||||
3. Query the amount of rows in all files of these two directories:
|
||||
|
||||
<!-- -->
|
||||
|
||||
|
@ -3,4 +3,16 @@ toc_folder_title: Integrations
|
||||
toc_priority: 30
|
||||
---
|
||||
|
||||
# Движки таблиц для интеграции {#table-engines-for-integrations}
|
||||
|
||||
Для интеграции с внешними системами ClickHouse предоставляет различные средства, включая движки таблиц. Конфигурирование интеграционных движков осуществляется с помощью запросов `CREATE TABLE` или `ALTER TABLE`, как и для других табличных движков. С точки зрения пользователя, настроенная интеграция выглядит как обычная таблица, но запросы к ней передаются через прокси во внешнюю систему. Этот прозрачный запрос является одним из ключевых преимуществ этого подхода по сравнению с альтернативными методами интеграции, такими как внешние словари или табличные функции, которые требуют использования пользовательских методов запроса при каждом использовании.
|
||||
|
||||
Список поддерживаемых интеграций:
|
||||
|
||||
- [ODBC](../../../engines/table-engines/integrations/odbc.md)
|
||||
- [JDBC](../../../engines/table-engines/integrations/jdbc.md)
|
||||
- [MySQL](../../../engines/table-engines/integrations/mysql.md)
|
||||
- [HDFS](../../../engines/table-engines/integrations/hdfs.md)
|
||||
- [Kafka](../../../engines/table-engines/integrations/kafka.md)
|
||||
|
||||
[Оригинальная статья](https://clickhouse.tech/docs/ru/engines/table-engines/integrations/) <!--hide-->
|
||||
|
@ -1,6 +1,6 @@
|
||||
# ReplacingMergeTree {#replacingmergetree}
|
||||
|
||||
Движок отличается от [MergeTree](mergetree.md#table_engines-mergetree) тем, что выполняет удаление дублирующихся записей с одинаковым значением первичного ключа (точнее, с одинаковым значением [ключа сортировки](mergetree.md)).
|
||||
Движок отличается от [MergeTree](mergetree.md#table_engines-mergetree) тем, что выполняет удаление дублирующихся записей с одинаковым значением [ключа сортировки](mergetree.md)).
|
||||
|
||||
Дедупликация данных производится лишь во время слияний. Слияние происходят в фоне в неизвестный момент времени, на который вы не можете ориентироваться. Некоторая часть данных может остаться необработанной. Хотя вы можете вызвать внеочередное слияние с помощью запроса `OPTIMIZE`, на это не стоит рассчитывать, так как запрос `OPTIMIZE` приводит к чтению и записи большого объёма данных.
|
||||
|
||||
@ -27,7 +27,7 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster]
|
||||
|
||||
- `ver` — столбец с версией, тип `UInt*`, `Date` или `DateTime`. Необязательный параметр.
|
||||
|
||||
При слиянии, из всех строк с одинаковым значением первичного ключа `ReplacingMergeTree` оставляет только одну:
|
||||
При слиянии, из всех строк с одинаковым значением ключа сортировки `ReplacingMergeTree` оставляет только одну:
|
||||
|
||||
- Последнюю в выборке, если `ver` не задан.
|
||||
- С максимальной версией, если `ver` задан.
|
||||
@ -40,7 +40,7 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster]
|
||||
|
||||
<summary>Устаревший способ создания таблицы</summary>
|
||||
|
||||
!!! attention "Attention"
|
||||
!!! attention "Внимание"
|
||||
Не используйте этот способ в новых проектах и по возможности переведите старые проекты на способ описанный выше.
|
||||
|
||||
``` sql
|
||||
|
@ -3,4 +3,14 @@ toc_folder_title: Special
|
||||
toc_priority: 31
|
||||
---
|
||||
|
||||
# Специальные движки таблиц {#special-table-engines}
|
||||
|
||||
Существует три основные категории движков таблиц:
|
||||
|
||||
- [Семейство MergeTree](../../../engines/table-engines/mergetree-family/index.md) для основного использования.
|
||||
- [Семейство Log](../../../engines/table-engines/log-family/index.md) для небольших временных данных.
|
||||
- [Движки таблиц для интеграции](../../../engines/table-engines/integrations/index.md).
|
||||
|
||||
Остальные движки таблиц уникальны по своему назначению и еще не сгруппированы в семейства, поэтому они помещены в эту специальную категорию.
|
||||
|
||||
[Оригинальная статья](https://clickhouse.tech/docs/ru/engines/table-engines/special/) <!--hide-->
|
||||
|
@ -1,5 +1,5 @@
|
||||
# MaterializedView {#materializedview}
|
||||
|
||||
Используется для реализации материализованных представлений (подробнее см. запрос [CREATE TABLE](../../../sql-reference/statements/create.md#create-table-query)). Для хранения данных, использует другой движок, который был указан при создании представления. При чтении из таблицы, просто использует этот движок.
|
||||
Используется для реализации материализованных представлений (подробнее см. запрос [CREATE TABLE](../../../sql-reference/statements/create/table.md#create-table-query)). Для хранения данных, использует другой движок, который был указан при создании представления. При чтении из таблицы, просто использует этот движок.
|
||||
|
||||
[Оригинальная статья](https://clickhouse.tech/docs/ru/operations/table_engines/materializedview/) <!--hide-->
|
||||
|
@ -99,7 +99,7 @@ As in most databases management systems, ClickHouse logically groups tables into
|
||||
clickhouse-client --query "CREATE DATABASE IF NOT EXISTS tutorial"
|
||||
```
|
||||
|
||||
Syntax for creating tables is way more complicated compared to databases (see [reference](../sql-reference/statements/create.md). In general `CREATE TABLE` statement has to specify three key things:
|
||||
Syntax for creating tables is way more complicated compared to databases (see [reference](../sql-reference/statements/create/table.md). In general `CREATE TABLE` statement has to specify three key things:
|
||||
|
||||
1. Name of table to create.
|
||||
2. Table schema, i.e. list of columns and their [data types](../sql-reference/data-types/index.md).
|
||||
|
@ -7,7 +7,7 @@ ClickHouse может принимать (`INSERT`) и отдавать (`SELECT
|
||||
| Формат | INSERT | SELECT |
|
||||
|-----------------------------------------------------------------|--------|--------|
|
||||
| [TabSeparated](#tabseparated) | ✔ | ✔ |
|
||||
| [TabSeparatedRaw](#tabseparatedraw) | ✗ | ✔ |
|
||||
| [TabSeparatedRaw](#tabseparatedraw) | ✔ | ✔ |
|
||||
| [TabSeparatedWithNames](#tabseparatedwithnames) | ✔ | ✔ |
|
||||
| [TabSeparatedWithNamesAndTypes](#tabseparatedwithnamesandtypes) | ✔ | ✔ |
|
||||
| [Template](#format-template) | ✔ | ✔ |
|
||||
@ -132,7 +132,7 @@ SELECT * FROM nestedt FORMAT TSV
|
||||
## TabSeparatedRaw {#tabseparatedraw}
|
||||
|
||||
Отличается от формата `TabSeparated` тем, что строки выводятся без экранирования.
|
||||
Этот формат подходит только для вывода результата выполнения запроса, но не для парсинга (приёма данных для вставки в таблицу).
|
||||
Используя этот формат, следите, чтобы в полях не было символов табуляции или разрыва строки.
|
||||
|
||||
Этот формат также доступен под именем `TSVRaw`.
|
||||
|
||||
@ -940,7 +940,7 @@ message MessageType {
|
||||
}
|
||||
```
|
||||
|
||||
не применяются; вместо них используются определенные в таблице [значения по умолчанию](../sql-reference/statements/create.md#create-default-values).
|
||||
не применяются; вместо них используются определенные в таблице [значения по умолчанию](../sql-reference/statements/create/table.md#create-default-values).
|
||||
|
||||
ClickHouse пишет и читает сообщения `Protocol Buffers` в формате `length-delimited`. Это означает, что перед каждым сообщением пишется его длина
|
||||
в формате [varint](https://developers.google.com/protocol-buffers/docs/encoding#varints). См. также [как читать и записывать сообщения Protocol Buffers в формате length-delimited в различных языках программирования](https://cwiki.apache.org/confluence/display/GEODE/Delimiting+Protobuf+Messages).
|
||||
|
12
docs/ru/interfaces/third-party/index.md
vendored
12
docs/ru/interfaces/third-party/index.md
vendored
@ -4,4 +4,16 @@ toc_folder_title: "\u041E\u0442 \u0441\u0442\u043E\u0440\u043E\u043D\u043D\u0438
|
||||
toc_priority: 24
|
||||
---
|
||||
|
||||
# Сторонние интерфейсы {#third-party-interfaces}
|
||||
|
||||
Раздел содержит список сторонних интерфейсов для ClickHouse. Это может быть визуальный интерфейс, интерфейс командной строки, либо API:
|
||||
|
||||
- [Client libraries](../../interfaces/third-party/client-libraries.md)
|
||||
- [Integrations](../../interfaces/third-party/integrations.md)
|
||||
- [GUI](../../interfaces/third-party/gui.md)
|
||||
- [Proxies](../../interfaces/third-party/proxy.md)
|
||||
|
||||
!!! note "Примечание"
|
||||
С ClickHouse работают также универсальные инструменты, поддерживающие общий API, такие как [ODBC](../../interfaces/odbc.md) или [JDBC](../../interfaces/jdbc.md).
|
||||
|
||||
[Оригинальная статья](https://clickhouse.tech/docs/ru/interfaces/third-party/) <!--hide-->
|
||||
|
@ -55,7 +55,7 @@ ClickHouse поддерживает управление доступом на
|
||||
|
||||
Запросы управления:
|
||||
|
||||
- [CREATE USER](../sql-reference/statements/create.md#create-user-statement)
|
||||
- [CREATE USER](../sql-reference/statements/create/user.md#create-user-statement)
|
||||
- [ALTER USER](../sql-reference/statements/alter.md#alter-user-statement)
|
||||
- [DROP USER](../sql-reference/statements/misc.md#drop-user-statement)
|
||||
- [SHOW CREATE USER](../sql-reference/statements/show.md#show-create-user-statement)
|
||||
@ -82,7 +82,7 @@ ClickHouse поддерживает управление доступом на
|
||||
|
||||
Запросы управления:
|
||||
|
||||
- [CREATE ROLE](../sql-reference/statements/create.md#create-role-statement)
|
||||
- [CREATE ROLE](../sql-reference/statements/create/index.md#create-role-statement)
|
||||
- [ALTER ROLE](../sql-reference/statements/alter.md#alter-role-statement)
|
||||
- [DROP ROLE](../sql-reference/statements/misc.md#drop-role-statement)
|
||||
- [SET ROLE](../sql-reference/statements/misc.md#set-role-statement)
|
||||
@ -97,7 +97,7 @@ ClickHouse поддерживает управление доступом на
|
||||
|
||||
Запросы управления:
|
||||
|
||||
- [CREATE ROW POLICY](../sql-reference/statements/create.md#create-row-policy-statement)
|
||||
- [CREATE ROW POLICY](../sql-reference/statements/create/index.md#create-row-policy-statement)
|
||||
- [ALTER ROW POLICY](../sql-reference/statements/alter.md#alter-row-policy-statement)
|
||||
- [DROP ROW POLICY](../sql-reference/statements/misc.md#drop-row-policy-statement)
|
||||
- [SHOW CREATE ROW POLICY](../sql-reference/statements/show.md#show-create-row-policy-statement)
|
||||
@ -109,7 +109,7 @@ ClickHouse поддерживает управление доступом на
|
||||
|
||||
Запросы управления:
|
||||
|
||||
- [CREATE SETTINGS PROFILE](../sql-reference/statements/create.md#create-settings-profile-statement)
|
||||
- [CREATE SETTINGS PROFILE](../sql-reference/statements/create/index.md#create-settings-profile-statement)
|
||||
- [ALTER SETTINGS PROFILE](../sql-reference/statements/alter.md#alter-settings-profile-statement)
|
||||
- [DROP SETTINGS PROFILE](../sql-reference/statements/misc.md#drop-settings-profile-statement)
|
||||
- [SHOW CREATE SETTINGS PROFILE](../sql-reference/statements/show.md#show-create-settings-profile-statement)
|
||||
@ -123,7 +123,7 @@ ClickHouse поддерживает управление доступом на
|
||||
|
||||
Запросы управления:
|
||||
|
||||
- [CREATE QUOTA](../sql-reference/statements/create.md#create-quota-statement)
|
||||
- [CREATE QUOTA](../sql-reference/statements/create/index.md#create-quota-statement)
|
||||
- [ALTER QUOTA](../sql-reference/statements/alter.md#alter-quota-statement)
|
||||
- [DROP QUOTA](../sql-reference/statements/misc.md#drop-quota-statement)
|
||||
- [SHOW CREATE QUOTA](../sql-reference/statements/show.md#show-create-quota-statement)
|
||||
|
@ -17,7 +17,7 @@ toc_title: intro
|
||||
- [Резервное копирование](backup.md)
|
||||
- [Конфигурационные файлы](configuration-files.md)
|
||||
- [Квоты](quotas.md)
|
||||
- [Системные таблицы](system-tables.md)
|
||||
- [Системные таблицы](system-tables/index.md)
|
||||
- [Конфигурационные параметры сервера](server-configuration-parameters/index.md)
|
||||
- [Тестирование серверов с помощью ClickHouse](performance-test.md)
|
||||
- [Настройки](settings/index.md#settings)
|
||||
|
@ -28,7 +28,7 @@ ClickHouse собирает:
|
||||
- Различные метрики того, как сервер использует вычислительные ресурсы.
|
||||
- Общую статистику обработки запросов.
|
||||
|
||||
Метрики находятся в таблицах [system.metrics](system-tables.md#system_tables-metrics), [system.events](system-tables.md#system_tables-events) и [system.asynchronous\_metrics](system-tables.md#system_tables-asynchronous_metrics).
|
||||
Метрики находятся в таблицах [system.metrics](system-tables/metrics.md#system_tables-metrics), [system.events](system-tables/events.md#system_tables-events) и [system.asynchronous_metrics](system-tables/asynchronous_metrics.md#system_tables-asynchronous_metrics).
|
||||
|
||||
Можно настроить экспорт метрик из ClickHouse в [Graphite](https://github.com/graphite-project). Смотрите секцию [graphite](server-configuration-parameters/settings.md#server_configuration_parameters-graphite) конфигурационного файла ClickHouse. Перед настройкой экспорта метрик необходимо настроить Graphite, как указано в [официальном руководстве](https://graphite.readthedocs.io/en/latest/install.html).
|
||||
|
||||
@ -37,3 +37,5 @@ ClickHouse собирает:
|
||||
Также, можно отслеживать доступность сервера через HTTP API. Отправьте `HTTP GET` к ресурсу `/ping`. Если сервер доступен, он отвечает `200 OK`.
|
||||
|
||||
Для мониторинга серверов в кластерной конфигурации необходимо установить параметр [max\_replica\_delay\_for\_distributed\_queries](settings/settings.md#settings-max_replica_delay_for_distributed_queries) и использовать HTTP ресурс `/replicas_status`. Если реплика доступна и не отстаёт от других реплик, то запрос к `/replicas_status` возвращает `200 OK`. Если реплика отстаёт, то запрос возвращает `503 HTTP_SERVICE_UNAVAILABLE`, включая информацию о размере отставания.
|
||||
|
||||
[Оригинальная статья](https://clickhouse.tech/docs/ru/operations/system_tables/monitoring) <!--hide-->
|
||||
|
@ -11,7 +11,7 @@ To use profiler:
|
||||
|
||||
- Setup the [trace\_log](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-trace_log) section of the server configuration.
|
||||
|
||||
This section configures the [trace\_log](../../operations/system-tables.md#system_tables-trace_log) system table containing the results of the profiler functioning. It is configured by default. Remember that data in this table is valid only for a running server. After the server restart, ClickHouse doesn’t clean up the table and all the stored virtual memory address may become invalid.
|
||||
This section configures the [trace_log](../../operations/system-tables/trace_log.md#system_tables-trace_log) system table containing the results of the profiler functioning. It is configured by default. Remember that data in this table is valid only for a running server. After the server restart, ClickHouse doesn’t clean up the table and all the stored virtual memory address may become invalid.
|
||||
|
||||
- Setup the [query\_profiler\_cpu\_time\_period\_ns](../../operations/settings/settings.md#query_profiler_cpu_time_period_ns) or [query\_profiler\_real\_time\_period\_ns](../../operations/settings/settings.md#query_profiler_real_time_period_ns) settings. Both settings can be used simultaneously.
|
||||
|
||||
|
@ -576,9 +576,9 @@ ClickHouse проверит условия `min_part_size` и `min_part_size_rat
|
||||
|
||||
- `endpoint` – путь по которому будет осуществляться экспорт данных метрик по HTTP протоколу для сбора с помощью prometheus. Должен начинаться с ‘/’.
|
||||
- `port` – порт по которому будет доступен endpoint для сбора метрик.
|
||||
- `metrics` – флаг для экспорта текущих значений метрик из таблицы [system.metrics](../system-tables.md#system_tables-metrics).
|
||||
- `events` – флаг для экспорта текущих значений метрик из таблицы [system.events](../system-tables.md#system_tables-events).
|
||||
- `asynchronous_metrics` – флаг для экспорта текущих значений значения метрик из таблицы [system.asynchronous\_metrics](../system-tables.md#system_tables-asynchronous_metrics).
|
||||
- `metrics` – флаг для экспорта текущих значений метрик из таблицы [system.metrics](../system-tables/metrics.md#system_tables-metrics).
|
||||
- `events` – флаг для экспорта текущих значений метрик из таблицы [system.events](../system-tables/events.md#system_tables-events).
|
||||
- `asynchronous_metrics` – флаг для экспорта текущих значений значения метрик из таблицы [system.asynchronous\_metrics](../system-tables/asynchronous_metrics.md#system_tables-asynchronous_metrics).
|
||||
|
||||
**Пример**
|
||||
|
||||
|
@ -1227,7 +1227,7 @@ Default value: 1000000000 nanoseconds (once a second).
|
||||
|
||||
See also:
|
||||
|
||||
- System table [trace\_log](../../operations/system-tables.md#system_tables-trace_log)
|
||||
- System table [trace\_log](../../operations/system-tables/trace_log.md#system_tables-trace_log)
|
||||
|
||||
## query\_profiler\_cpu\_time\_period\_ns {#query_profiler_cpu_time_period_ns}
|
||||
|
||||
@ -1250,9 +1250,9 @@ Default value: 1000000000 nanoseconds.
|
||||
|
||||
See also:
|
||||
|
||||
- System table [trace\_log](../../operations/system-tables.md#system_tables-trace_log)
|
||||
- System table [trace\_log](../../operations/system-tables/trace_log.md#system_tables-trace_log)
|
||||
|
||||
## allow\_introspection\_functions {#settings-allow_introspection_functions}
|
||||
## allow_introspection_functions {#settings-allow_introspection_functions}
|
||||
|
||||
Enables of disables [introspections functions](../../sql-reference/functions/introspection.md) for query profiling.
|
||||
|
||||
@ -1266,9 +1266,9 @@ Default value: 0.
|
||||
**See Also**
|
||||
|
||||
- [Sampling Query Profiler](../optimizing-performance/sampling-query-profiler.md)
|
||||
- System table [trace\_log](../../operations/system-tables.md#system_tables-trace_log)
|
||||
- System table [trace\_log](../../operations/system-tables/trace_log.md#system_tables-trace_log)
|
||||
|
||||
## background\_pool\_size {#background_pool_size}
|
||||
## background_pool_size {#background_pool_size}
|
||||
|
||||
Задает количество потоков для выполнения фоновых операций в движках таблиц (например, слияния в таблицах c движком [MergeTree](../../engines/table-engines/mergetree-family/index.md)). Настройка применяется при запуске сервера ClickHouse и не может быть изменена во пользовательском сеансе. Настройка позволяет управлять загрузкой процессора и диска. Чем меньше пулл, тем ниже нагрузка на CPU и диск, при этом фоновые процессы замедляются, что может повлиять на скорость выполнения запроса.
|
||||
|
||||
@ -1452,7 +1452,7 @@ SELECT idx, i FROM null_in WHERE i IN (1, NULL) SETTINGS transform_null_in = 1;
|
||||
|
||||
## min_insert_block_size_rows_for_materialized_views {#min-insert-block-size-rows-for-materialized-views}
|
||||
|
||||
Устанавливает минимальное количество строк в блоке, который может быть вставлен в таблицу запросом `INSERT`. Блоки меньшего размера склеиваются в блоки большего размера. Настройка применяется только для блоков, вставляемых в [материализованное представление](../../sql-reference/statements/create.md#create-view). Настройка позволяет избежать избыточного потребления памяти.
|
||||
Устанавливает минимальное количество строк в блоке, который может быть вставлен в таблицу запросом `INSERT`. Блоки меньшего размера склеиваются в блоки большего размера. Настройка применяется только для блоков, вставляемых в [материализованное представление](../../sql-reference/statements/create/view.md#create-view). Настройка позволяет избежать избыточного потребления памяти.
|
||||
|
||||
Допустимые значения:
|
||||
|
||||
@ -1467,7 +1467,7 @@ SELECT idx, i FROM null_in WHERE i IN (1, NULL) SETTINGS transform_null_in = 1;
|
||||
|
||||
## min_insert_block_size_bytes_for_materialized_views {#min-insert-block-size-bytes-for-materialized-views}
|
||||
|
||||
Устанавливает минимальное количество байтов в блоке, который может быть вставлен в таблицу запросом `INSERT`. Блоки меньшего размера склеиваются в блоки большего размера. Настройка применяется только для блоков, вставляемых в [материализованное представление](../../sql-reference/statements/create.md#create-view). Настройка позволяет избежать избыточного потребления памяти.
|
||||
Устанавливает минимальное количество байтов в блоке, который может быть вставлен в таблицу запросом `INSERT`. Блоки меньшего размера склеиваются в блоки большего размера. Настройка применяется только для блоков, вставляемых в [материализованное представление](../../sql-reference/statements/create/view.md#create-view). Настройка позволяет избежать избыточного потребления памяти.
|
||||
|
||||
Допустимые значения:
|
||||
|
||||
|
File diff suppressed because it is too large
Load Diff
36
docs/ru/operations/system-tables/asynchronous_metric_log.md
Normal file
36
docs/ru/operations/system-tables/asynchronous_metric_log.md
Normal file
@ -0,0 +1,36 @@
|
||||
## system.asynchronous_metric_log {#system-tables-async-log}
|
||||
|
||||
Содержит исторические значения метрик из таблицы `system.asynchronous_metrics`, которые сохраняются раз в минуту. По умолчанию включена.
|
||||
|
||||
Столбцы:
|
||||
- `event_date` ([Date](../../sql-reference/data-types/date.md)) — дата события.
|
||||
- `event_time` ([DateTime](../../sql-reference/data-types/datetime.md)) — время события.
|
||||
- `name` ([String](../../sql-reference/data-types/string.md)) — название метрики.
|
||||
- `value` ([Float64](../../sql-reference/data-types/float.md)) — значение метрики.
|
||||
|
||||
**Пример**
|
||||
|
||||
``` sql
|
||||
SELECT * FROM system.asynchronous_metric_log LIMIT 10
|
||||
```
|
||||
|
||||
``` text
|
||||
┌─event_date─┬──────────event_time─┬─name─────────────────────────────────────┬────value─┐
|
||||
│ 2020-06-22 │ 2020-06-22 06:57:30 │ jemalloc.arenas.all.pmuzzy │ 0 │
|
||||
│ 2020-06-22 │ 2020-06-22 06:57:30 │ jemalloc.arenas.all.pdirty │ 4214 │
|
||||
│ 2020-06-22 │ 2020-06-22 06:57:30 │ jemalloc.background_thread.run_intervals │ 0 │
|
||||
│ 2020-06-22 │ 2020-06-22 06:57:30 │ jemalloc.background_thread.num_runs │ 0 │
|
||||
│ 2020-06-22 │ 2020-06-22 06:57:30 │ jemalloc.retained │ 17657856 │
|
||||
│ 2020-06-22 │ 2020-06-22 06:57:30 │ jemalloc.mapped │ 71471104 │
|
||||
│ 2020-06-22 │ 2020-06-22 06:57:30 │ jemalloc.resident │ 61538304 │
|
||||
│ 2020-06-22 │ 2020-06-22 06:57:30 │ jemalloc.metadata │ 6199264 │
|
||||
│ 2020-06-22 │ 2020-06-22 06:57:30 │ jemalloc.allocated │ 38074336 │
|
||||
│ 2020-06-22 │ 2020-06-22 06:57:30 │ jemalloc.epoch │ 2 │
|
||||
└────────────┴─────────────────────┴──────────────────────────────────────────┴──────────┘
|
||||
```
|
||||
|
||||
**Смотрите также**
|
||||
- [system.asynchronous_metrics](#system_tables-asynchronous_metrics) — Содержит метрики, которые периодически вычисляются в фоновом режиме.
|
||||
- [system.metric_log](#system_tables-metric_log) — таблица фиксирующая историю значений метрик из `system.metrics` и `system.events`.
|
||||
|
||||
[Оригинальная статья](https://clickhouse.tech/docs/ru/operations/system_tables/asynchronous_metric_log) <!--hide-->
|
39
docs/ru/operations/system-tables/asynchronous_metrics.md
Normal file
39
docs/ru/operations/system-tables/asynchronous_metrics.md
Normal file
@ -0,0 +1,39 @@
|
||||
# system.asynchronous_metrics {#system_tables-asynchronous_metrics}
|
||||
|
||||
Содержит метрики, которые периодически вычисляются в фоновом режиме. Например, объём используемой оперативной памяти.
|
||||
|
||||
Столбцы:
|
||||
|
||||
- `metric` ([String](../../sql-reference/data-types/string.md)) — название метрики.
|
||||
- `value` ([Float64](../../sql-reference/data-types/float.md)) — значение метрики.
|
||||
|
||||
**Пример**
|
||||
|
||||
``` sql
|
||||
SELECT * FROM system.asynchronous_metrics LIMIT 10
|
||||
```
|
||||
|
||||
``` text
|
||||
┌─metric──────────────────────────────────┬──────value─┐
|
||||
│ jemalloc.background_thread.run_interval │ 0 │
|
||||
│ jemalloc.background_thread.num_runs │ 0 │
|
||||
│ jemalloc.background_thread.num_threads │ 0 │
|
||||
│ jemalloc.retained │ 422551552 │
|
||||
│ jemalloc.mapped │ 1682989056 │
|
||||
│ jemalloc.resident │ 1656446976 │
|
||||
│ jemalloc.metadata_thp │ 0 │
|
||||
│ jemalloc.metadata │ 10226856 │
|
||||
│ UncompressedCacheCells │ 0 │
|
||||
│ MarkCacheFiles │ 0 │
|
||||
└─────────────────────────────────────────┴────────────┘
|
||||
```
|
||||
|
||||
**Смотрите также**
|
||||
|
||||
- [Мониторинг](../../operations/monitoring.md) — основы мониторинга в ClickHouse.
|
||||
- [system.metrics](#system_tables-metrics) — таблица с мгновенно вычисляемыми метриками.
|
||||
- [system.events](#system_tables-events) — таблица с количеством произошедших событий.
|
||||
- [system.metric\_log](#system_tables-metric_log) — таблица фиксирующая историю значений метрик из `system.metrics` и `system.events`.
|
||||
|
||||
[Оригинальная статья](https://clickhouse.tech/docs/ru/operations/system_tables/asynchronous_metrics) <!--hide-->
|
||||
|
16
docs/ru/operations/system-tables/clusters.md
Normal file
16
docs/ru/operations/system-tables/clusters.md
Normal file
@ -0,0 +1,16 @@
|
||||
# system.clusters {#system-clusters}
|
||||
|
||||
Содержит информацию о доступных в конфигурационном файле кластерах и серверах, которые в них входят.
|
||||
|
||||
Столбцы:
|
||||
|
||||
- `cluster` (String) — имя кластера.
|
||||
- `shard_num` (UInt32) — номер шарда в кластере, начиная с 1.
|
||||
- `shard_weight` (UInt32) — относительный вес шарда при записи данных.
|
||||
- `replica_num` (UInt32) — номер реплики в шарде, начиная с 1.
|
||||
- `host_name` (String) — хост, указанный в конфигурации.
|
||||
- `host_address` (String) — TIP-адрес хоста, полученный из DNS.
|
||||
- `port` (UInt16) — порт, на который обращаться для соединения с сервером.
|
||||
- `user` (String) — имя пользователя, которого использовать для соединения с сервером.
|
||||
|
||||
[Оригинальная статья](https://clickhouse.tech/docs/ru/operations/system_tables/clusters) <!--hide-->
|
24
docs/ru/operations/system-tables/columns.md
Normal file
24
docs/ru/operations/system-tables/columns.md
Normal file
@ -0,0 +1,24 @@
|
||||
# system.columns {#system-columns}
|
||||
|
||||
Содержит информацию о столбцах всех таблиц.
|
||||
|
||||
С помощью этой таблицы можно получить информацию аналогично запросу [DESCRIBE TABLE](../../sql-reference/statements/misc.md#misc-describe-table), но для многих таблиц сразу.
|
||||
|
||||
Таблица `system.columns` содержит столбцы (тип столбца указан в скобках):
|
||||
|
||||
- `database` (String) — имя базы данных.
|
||||
- `table` (String) — имя таблицы.
|
||||
- `name` (String) — имя столбца.
|
||||
- `type` (String) — тип столбца.
|
||||
- `default_kind` (String) — тип выражения (`DEFAULT`, `MATERIALIZED`, `ALIAS`) значения по умолчанию, или пустая строка.
|
||||
- `default_expression` (String) — выражение для значения по умолчанию или пустая строка.
|
||||
- `data_compressed_bytes` (UInt64) — размер сжатых данных в байтах.
|
||||
- `data_uncompressed_bytes` (UInt64) — размер распакованных данных в байтах.
|
||||
- `marks_bytes` (UInt64) — размер засечек в байтах.
|
||||
- `comment` (String) — комментарий к столбцу или пустая строка.
|
||||
- `is_in_partition_key` (UInt8) — флаг, показывающий включение столбца в ключ партиционирования.
|
||||
- `is_in_sorting_key` (UInt8) — флаг, показывающий включение столбца в ключ сортировки.
|
||||
- `is_in_primary_key` (UInt8) — флаг, показывающий включение столбца в первичный ключ.
|
||||
- `is_in_sampling_key` (UInt8) — флаг, показывающий включение столбца в ключ выборки.
|
||||
|
||||
[Оригинальная статья](https://clickhouse.tech/docs/ru/operations/system_tables/columns) <!--hide-->
|
42
docs/ru/operations/system-tables/contributors.md
Normal file
42
docs/ru/operations/system-tables/contributors.md
Normal file
@ -0,0 +1,42 @@
|
||||
# system.contributors {#system-contributors}
|
||||
|
||||
Содержит информацию о контрибьютерах. Контрибьютеры расположены в таблице в случайном порядке. Порядок определяется заново при каждом запросе.
|
||||
|
||||
Столбцы:
|
||||
|
||||
- `name` (String) — Имя контрибьютера (автора коммита) из git log.
|
||||
|
||||
**Пример**
|
||||
|
||||
``` sql
|
||||
SELECT * FROM system.contributors LIMIT 10
|
||||
```
|
||||
|
||||
``` text
|
||||
┌─name─────────────┐
|
||||
│ Olga Khvostikova │
|
||||
│ Max Vetrov │
|
||||
│ LiuYangkuan │
|
||||
│ svladykin │
|
||||
│ zamulla │
|
||||
│ Šimon Podlipský │
|
||||
│ BayoNet │
|
||||
│ Ilya Khomutov │
|
||||
│ Amy Krishnevsky │
|
||||
│ Loud_Scream │
|
||||
└──────────────────┘
|
||||
```
|
||||
|
||||
Чтобы найти себя в таблице, выполните запрос:
|
||||
|
||||
``` sql
|
||||
SELECT * FROM system.contributors WHERE name='Olga Khvostikova'
|
||||
```
|
||||
|
||||
``` text
|
||||
┌─name─────────────┐
|
||||
│ Olga Khvostikova │
|
||||
└──────────────────┘
|
||||
```
|
||||
|
||||
[Оригинальная статья](https://clickhouse.tech/docs/ru/operations/system_tables/contributors) <!--hide-->
|
11
docs/ru/operations/system-tables/current-roles.md
Normal file
11
docs/ru/operations/system-tables/current-roles.md
Normal file
@ -0,0 +1,11 @@
|
||||
# system.current_roles {#system_tables-current_roles}
|
||||
|
||||
Содержит активные роли текущего пользователя. `SET ROLE` изменяет содержимое этой таблицы.
|
||||
|
||||
Столбцы:
|
||||
|
||||
- `role_name` ([String](../../sql-reference/data-types/string.md))) — Имя роли.
|
||||
- `with_admin_option` ([UInt8](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Флаг, который показывает, обладает ли `current_role` роль привилегией `ADMIN OPTION`.
|
||||
- `is_default` ([UInt8](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Флаг, который показывает, является ли `current_role` ролью по умолчанию.
|
||||
|
||||
[Оригинальная статья](https://clickhouse.tech/docs/ru/operations/system_tables/current-roles) <!--hide-->
|
36
docs/ru/operations/system-tables/data_type_families.md
Normal file
36
docs/ru/operations/system-tables/data_type_families.md
Normal file
@ -0,0 +1,36 @@
|
||||
# system.data_type_families {#system_tables-data_type_families}
|
||||
|
||||
Содержит информацию о поддерживаемых [типах данных](../../sql-reference/data-types/).
|
||||
|
||||
Столбцы:
|
||||
|
||||
- `name` ([String](../../sql-reference/data-types/string.md)) — имя типа данных.
|
||||
- `case_insensitive` ([UInt8](../../sql-reference/data-types/int-uint.md)) — свойство, которое показывает, зависит ли имя типа данных в запросе от регистра. Например, допустимы и `Date`, и `date`.
|
||||
- `alias_to` ([String](../../sql-reference/data-types/string.md)) — тип данных, для которого `name` является алиасом.
|
||||
|
||||
**Пример**
|
||||
|
||||
``` sql
|
||||
SELECT * FROM system.data_type_families WHERE alias_to = 'String'
|
||||
```
|
||||
|
||||
``` text
|
||||
┌─name───────┬─case_insensitive─┬─alias_to─┐
|
||||
│ LONGBLOB │ 1 │ String │
|
||||
│ LONGTEXT │ 1 │ String │
|
||||
│ TINYTEXT │ 1 │ String │
|
||||
│ TEXT │ 1 │ String │
|
||||
│ VARCHAR │ 1 │ String │
|
||||
│ MEDIUMBLOB │ 1 │ String │
|
||||
│ BLOB │ 1 │ String │
|
||||
│ TINYBLOB │ 1 │ String │
|
||||
│ CHAR │ 1 │ String │
|
||||
│ MEDIUMTEXT │ 1 │ String │
|
||||
└────────────┴──────────────────┴──────────┘
|
||||
```
|
||||
|
||||
**See Also**
|
||||
|
||||
- [Синтаксис](../../sql-reference/syntax.md) — поддерживаемый SQL синтаксис.
|
||||
|
||||
[Оригинальная статья](https://clickhouse.tech/docs/ru/operations/system_tables/data_type_families) <!--hide-->
|
7
docs/ru/operations/system-tables/databases.md
Normal file
7
docs/ru/operations/system-tables/databases.md
Normal file
@ -0,0 +1,7 @@
|
||||
# system.databases {#system-databases}
|
||||
|
||||
Таблица содержит один столбец name типа String - имя базы данных.
|
||||
Для каждой базы данных, о которой знает сервер, будет присутствовать соответствующая запись в таблице.
|
||||
Эта системная таблица используется для реализации запроса `SHOW DATABASES`.
|
||||
|
||||
[Оригинальная статья](https://clickhouse.tech/docs/ru/operations/system_tables/databases) <!--hide-->
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user