Merge commit 'ceac649c01b0158090cd271776f3219f5e7ff57c' into ldap-per-user-authentication

* commit 'ceac649c01b0158090cd271776f3219f5e7ff57c': (75 commits)
  [docs] split misc statements (#12403)
  Update 00405_pretty_formats.reference
  Update PrettyCompactBlockOutputFormat.cpp
  Update PrettyBlockOutputFormat.cpp
  Update DataTypeNullable.cpp
  Update 01383_remote_ambiguous_column_shard.sql
  add output_format_pretty_grid_charset setting in docs
  add setting output_format_pretty_grid_charset
  Added a test for #11135
  Update index.md
  RIGHT and FULL JOIN for MergeJoin (#12118)
  Update MergeTreeIndexFullText.cpp
  restart the tests
  [docs] add syntax highlight (#12398)
  query fuzzer
  Fix std::bad_typeid when JSON functions called with argument of wrong type.
  Allow typeid_cast() to cast nullptr to nullptr.
  fix another context-related segfault
  [security docs] actually, only admins can create advisories
  query fuzzer
  ...
This commit is contained in:
Denis Glazachev 2020-07-11 21:32:36 +04:00
commit edb6ef8c09
308 changed files with 6420 additions and 2818 deletions

View File

@ -12,6 +12,20 @@ foreach(policy
endif()
endforeach()
# set default policy
foreach(default_policy_var_name
# make option() honor normal variables for BUILD_SHARED_LIBS:
# - re2
# - snappy
CMAKE_POLICY_DEFAULT_CMP0077
# Google Test from sources uses too old cmake, 2.6.x, and CMP0022 should
# set, to avoid using deprecated LINK_INTERFACE_LIBRARIES(_<CONFIG>)? over
# INTERFACE_LINK_LIBRARIES.
CMAKE_POLICY_DEFAULT_CMP0022
)
set(${default_policy_var_name} NEW)
endforeach()
project(ClickHouse)
include (cmake/arch.cmake)
@ -378,10 +392,6 @@ include (cmake/find/mysqlclient.cmake)
# When testing for memory leaks with Valgrind, don't link tcmalloc or jemalloc.
if (OS_LINUX AND NOT ENABLE_JEMALLOC)
message (WARNING "Non default allocator is disabled. This is not recommended for production Linux builds.")
endif ()
if (USE_OPENCL)
if (OS_DARWIN)
set(OPENCL_LINKER_FLAGS "-framework OpenCL")
@ -397,6 +407,10 @@ endif ()
add_subdirectory (contrib EXCLUDE_FROM_ALL)
if (NOT ENABLE_JEMALLOC)
message (WARNING "Non default allocator is disabled. This is not recommended for production builds.")
endif ()
macro (add_executable target)
# invoke built-in add_executable
# explicitly acquire and interpose malloc symbols by clickhouse_malloc

View File

@ -18,5 +18,4 @@ currently being supported with security updates:
## Reporting a Vulnerability
To report a potential vulnerability in ClickHouse please use the security advisory feature of GitHub:
https://github.com/ClickHouse/ClickHouse/security/advisories
To report a potential vulnerability in ClickHouse please send the details about it to [clickhouse-feedback@yandex-team.com](mailto:clickhouse-feedback@yandex-team.com).

View File

@ -163,7 +163,8 @@ public:
enum Signals : int
{
StdTerminate = -1,
StopThread = -2
StopThread = -2,
SanitizerTrap = -3,
};
explicit SignalListener(BaseDaemon & daemon_)
@ -223,8 +224,12 @@ public:
std::string query_id;
DB::ThreadStatus * thread_ptr{};
DB::readPODBinary(info, in);
DB::readPODBinary(context, in);
if (sig != SanitizerTrap)
{
DB::readPODBinary(info, in);
DB::readPODBinary(context, in);
}
DB::readPODBinary(stack_trace, in);
DB::readBinary(thread_num, in);
DB::readBinary(query_id, in);
@ -279,7 +284,14 @@ private:
VERSION_STRING, VERSION_OFFICIAL, daemon.build_id_info, thread_num, query_id, strsignal(sig), sig);
}
LOG_FATAL(log, signalToErrorMessage(sig, info, context));
String error_message;
if (sig != SanitizerTrap)
error_message = signalToErrorMessage(sig, info, context);
else
error_message = "Sanitizer trap.";
LOG_FATAL(log, error_message);
if (stack_trace.getSize())
{
@ -305,12 +317,12 @@ private:
String build_id_hex{};
#endif
SentryWriter::onFault(sig, info, context, stack_trace, build_id_hex);
if (sig != SanitizerTrap)
SentryWriter::onFault(sig, error_message, stack_trace, build_id_hex);
/// When everything is done, we will try to send these error messages to client.
if (thread_ptr)
thread_ptr->onFatalError();
}
};
@ -320,35 +332,27 @@ extern "C" void __sanitizer_set_death_callback(void (*)());
static void sanitizerDeathCallback()
{
Poco::Logger * log = &Poco::Logger::get("BaseDaemon");
/// Also need to send data via pipe. Otherwise it may lead to deadlocks or failures in printing diagnostic info.
StringRef query_id = DB::CurrentThread::getQueryId(); /// This is signal safe.
char buf[signal_pipe_buf_size];
DB::WriteBufferFromFileDescriptorDiscardOnFailure out(signal_pipe.fds_rw[1], signal_pipe_buf_size, buf);
if (query_id.size == 0)
{
LOG_FATAL(log, "(version {}{}) (from thread {}) (no query) Sanitizer trap.",
VERSION_STRING, VERSION_OFFICIAL, getThreadId());
}
else
{
LOG_FATAL(log, "(version {}{}) (from thread {}) (query_id: {}) Sanitizer trap.",
VERSION_STRING, VERSION_OFFICIAL, getThreadId(), query_id);
}
const StackTrace stack_trace;
/// Just in case print our own stack trace. In case when llvm-symbolizer does not work.
StackTrace stack_trace;
if (stack_trace.getSize())
{
std::stringstream bare_stacktrace;
bare_stacktrace << "Stack trace:";
for (size_t i = stack_trace.getOffset(); i < stack_trace.getSize(); ++i)
bare_stacktrace << ' ' << stack_trace.getFramePointers()[i];
StringRef query_id = DB::CurrentThread::getQueryId();
query_id.size = std::min(query_id.size, max_query_id_size);
LOG_FATAL(log, bare_stacktrace.str());
}
int sig = SignalListener::SanitizerTrap;
DB::writeBinary(sig, out);
DB::writePODBinary(stack_trace, out);
DB::writeBinary(UInt32(getThreadId()), out);
DB::writeStringBinary(query_id, out);
DB::writePODBinary(DB::current_thread, out);
/// Write symbolized stack trace line by line for better grep-ability.
stack_trace.toStringEveryLine([&](const std::string & s) { LOG_FATAL(log, s); });
out.next();
/// The time that is usually enough for separate thread to print info into log.
sleepForSeconds(10);
}
#endif

View File

@ -2,23 +2,26 @@
#include <Poco/File.h>
#include <Poco/Util/Application.h>
#include <Poco/Util/LayeredConfiguration.h>
#include <common/defines.h>
#include <common/getFQDNOrHostName.h>
#include <common/logger_useful.h>
#include <Common/StackTrace.h>
#if !defined(ARCADIA_BUILD)
# include "Common/config_version.h"
# include <Common/config.h>
#endif
#if USE_SENTRY
# include <sentry.h> // Y_IGNORE
# include <stdio.h>
# include <filesystem>
#endif
#if USE_SENTRY
namespace
{
@ -76,12 +79,12 @@ void sentry_logger(sentry_level_t level, const char * message, va_list args)
}
}
}
}
#endif
void SentryWriter::initialize(Poco::Util::LayeredConfiguration & config)
{
#if USE_SENTRY
bool enabled = false;
bool debug = config.getBool("send_crash_reports.debug", false);
auto * logger = &Poco::Logger::get("SentryWriter");
@ -146,28 +149,19 @@ void SentryWriter::initialize(Poco::Util::LayeredConfiguration & config)
{
LOG_INFO(logger, "Sending crash reports is disabled");
}
#else
UNUSED(config);
#endif
}
void SentryWriter::shutdown()
{
#if USE_SENTRY
if (initialized)
{
sentry_shutdown();
}
#endif
}
void SentryWriter::onFault(int sig, const siginfo_t & info, const ucontext_t & context, const StackTrace & stack_trace, const String & build_id_hex)
void SentryWriter::onFault(int sig, const std::string & error_message, const StackTrace & stack_trace, const std::string & build_id_hex)
{
#if USE_SENTRY
auto * logger = &Poco::Logger::get("SentryWriter");
if (initialized)
{
const std::string & error_message = signalToErrorMessage(sig, info, context);
sentry_value_t event = sentry_value_new_message_event(SENTRY_LEVEL_FATAL, "fault", error_message.c_str());
sentry_set_tag("signal", strsignal(sig));
sentry_set_extra("signal_number", sentry_value_new_int32(sig));
@ -240,11 +234,12 @@ void SentryWriter::onFault(int sig, const siginfo_t & info, const ucontext_t & c
{
LOG_INFO(logger, "Not sending crash report");
}
#else
UNUSED(sig);
UNUSED(info);
UNUSED(context);
UNUSED(stack_trace);
UNUSED(build_id_hex);
#endif
}
#else
void SentryWriter::initialize(Poco::Util::LayeredConfiguration &) {}
void SentryWriter::shutdown() {}
void SentryWriter::onFault(int, const std::string &, const StackTrace &, const std::string &) {}
#endif

View File

@ -1,12 +1,12 @@
#pragma once
#include <common/types.h>
#include <Common/StackTrace.h>
#include <Poco/Util/LayeredConfiguration.h>
#include <string>
namespace Poco { namespace Util { class LayeredConfiguration; }}
class StackTrace;
/// \brief Sends crash reports to ClickHouse core developer team via https://sentry.io
///
/// This feature can enabled with "send_crash_reports.enabled" server setting,
@ -14,20 +14,16 @@
///
/// It is possible to send those reports to your own sentry account or account of consulting company you hired
/// by overriding "send_crash_reports.endpoint" setting. "send_crash_reports.debug" setting will allow to do that for
class SentryWriter
namespace SentryWriter
{
public:
SentryWriter() = delete;
static void initialize(Poco::Util::LayeredConfiguration & config);
static void shutdown();
void initialize(Poco::Util::LayeredConfiguration & config);
void shutdown();
/// Not signal safe and can't be called from a signal handler
static void onFault(
void onFault(
int sig,
const siginfo_t & info,
const ucontext_t & context,
const std::string & error_message,
const StackTrace & stack_trace,
const String & build_id_hex
const std::string & build_id_hex
);
};

View File

@ -44,13 +44,8 @@ endif ()
if (USE_INTERNAL_RE2_LIBRARY)
set(RE2_BUILD_TESTING 0 CACHE INTERNAL "")
function(re2_support)
# make option() honor normal variables for BUILD_SHARED_LIBS
set(CMAKE_POLICY_DEFAULT_CMP0077 NEW)
add_subdirectory (re2)
add_subdirectory (re2_st)
endfunction()
re2_support()
add_subdirectory (re2)
add_subdirectory (re2_st)
endif ()
if (USE_INTERNAL_DOUBLE_CONVERSION_LIBRARY)
@ -227,19 +222,11 @@ if (USE_INTERNAL_AVRO_LIBRARY)
endif()
if(USE_INTERNAL_GTEST_LIBRARY)
# Wrap into function because of CMAKE_POLICY_DEFAULT_CMP0022
function(googletest_support)
set(GOOGLETEST_VERSION 1.10.0) # master
# Google Test from sources uses too old cmake, 2.6.x, and CMP0022 should
# set, to avoid using deprecated LINK_INTERFACE_LIBRARIES(_<CONFIG>)? over
# INTERFACE_LINK_LIBRARIES.
set(CMAKE_POLICY_DEFAULT_CMP0022 NEW)
# Google Test from sources
add_subdirectory(${ClickHouse_SOURCE_DIR}/contrib/googletest/googletest ${CMAKE_CURRENT_BINARY_DIR}/googletest)
# avoid problems with <regexp.h>
target_compile_definitions (gtest INTERFACE GTEST_HAS_POSIX_RE=0)
endfunction()
googletest_support()
set(GOOGLETEST_VERSION 1.10.0) # master
# Google Test from sources
add_subdirectory(${ClickHouse_SOURCE_DIR}/contrib/googletest/googletest ${CMAKE_CURRENT_BINARY_DIR}/googletest)
# avoid problems with <regexp.h>
target_compile_definitions (gtest INTERFACE GTEST_HAS_POSIX_RE=0)
elseif(GTEST_SRC_DIR)
add_subdirectory(${GTEST_SRC_DIR}/googletest ${CMAKE_CURRENT_BINARY_DIR}/googletest)
target_compile_definitions(gtest INTERFACE GTEST_HAS_POSIX_RE=0)

View File

@ -20,5 +20,7 @@
#define ARROW_VERSION_PATCH
#define ARROW_VERSION ((ARROW_VERSION_MAJOR * 1000) + ARROW_VERSION_MINOR) * 1000 + ARROW_VERSION_PATCH
/* #undef DOUBLE_CONVERSION_HAS_CASE_INSENSIBILITY */
#define ARROW_SO_VERSION ""
#define ARROW_FULL_SO_VERSION ""
/* #undef GRPCPP_PP_INCLUDE */

View File

@ -31,6 +31,10 @@
"name": "yandex/clickhouse-integration-test",
"dependent": []
},
"docker/test/fuzzer": {
"name": "yandex/clickhouse-fuzzer",
"dependent": []
},
"docker/test/performance-comparison": {
"name": "yandex/clickhouse-performance-comparison",
"dependent": []

View File

@ -0,0 +1,36 @@
# docker build -t yandex/clickhouse-fuzzer .
FROM ubuntu:18.04
ENV LANG=C.UTF-8
ENV TZ=Europe/Moscow
RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone
RUN apt-get update \
&& DEBIAN_FRONTEND=noninteractive apt-get install --yes --no-install-recommends \
bash \
ca-certificates \
curl \
gdb \
git \
libc6-dbg \
moreutils \
ncdu \
p7zip-full \
parallel \
psmisc \
rsync \
tree \
tzdata \
vim \
wget \
&& apt-get autoremove --yes \
&& apt-get clean \
&& rm -rf /var/lib/apt/lists/*
COPY * /
CMD cd /workspace \
&& /run-fuzzer.sh 2>&1 | ts "$(printf '%%Y-%%m-%%d %%H:%%M:%%S\t')" | tee main.log
# docker run --network=host --volume <workspace>:/workspace -e PR_TO_TEST=<> -e SHA_TO_TEST=<> yandex/clickhouse-fuzzer

114
docker/test/fuzzer/run-fuzzer.sh Executable file
View File

@ -0,0 +1,114 @@
#!/bin/bash
set -eux
set -o pipefail
trap "exit" INT TERM
trap 'kill $(jobs -pr) ||:' EXIT
stage=${stage:-}
script_dir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
echo "$script_dir"
repo_dir=ch
function clone
{
(
rm -rf ch ||:
mkdir ch
cd ch
git init
git remote add origin https://github.com/ClickHouse/ClickHouse
git fetch --depth=1 origin "$SHA_TO_TEST"
# If not master, try to fetch pull/.../{head,merge}
if [ "$PR_TO_TEST" != "0" ]
then
git fetch --depth=1 origin "refs/pull/$PR_TO_TEST/*:refs/heads/pull/$PR_TO_TEST/*"
fi
git checkout "$SHA_TO_TEST"
)
}
function download
{
# wget -O- -nv -nd -c "https://clickhouse-builds.s3.yandex.net/$PR_TO_TEST/$SHA_TO_TEST/clickhouse_build_check/performance/performance.tgz" \
# | tar --strip-components=1 -zxv
wget -nv -nd -c "https://clickhouse-builds.s3.yandex.net/$PR_TO_TEST/$SHA_TO_TEST/clickhouse_build_check/clang-10_debug_none_bundled_unsplitted_disable_False_binary/clickhouse"
chmod +x clickhouse
}
function configure
{
rm -rf db ||:
mkdir db ||:
cp -av "$repo_dir"/programs/server/config* db
cp -av "$repo_dir"/programs/server/user* db
cp -av "$repo_dir"/tests/config db/config.d
}
function watchdog
{
sleep 3600
echo "Fuzzing run has timed out"
killall -9 clickhouse clickhouse-server clickhouse-client
}
function fuzz
{
./clickhouse server --config-file db/config.xml -- --path db 2>&1 | tail -1000000 > server.log &
server_pid=$!
kill -0 $server_pid
while ! ./clickhouse client --query "select 1" && kill -0 $server_pid ; do echo . ; sleep 1 ; done
./clickhouse client --query "select 1"
kill -0 $server_pid
echo Server started
fuzzer_exit_code=0
./clickhouse client --query-fuzzer-runs=100 \
< <(for f in $(ls ch/tests/queries/0_stateless/*.sql | sort -R); do cat "$f"; echo ';'; done) \
> >(tail -1000000 > fuzzer.log) \
2>&1 \
|| fuzzer_exit_code=$?
echo "Fuzzer exit code is $fuzzer_exit_code"
kill -9 $server_pid
return $fuzzer_exit_code
}
case "$stage" in
"")
;&
"clone")
time clone
if [ -v FUZZ_LOCAL_SCRIPT ]
then
# just fall through
echo Using the testing script from docker container
:
else
# Run the testing script from the repository
echo Using the testing script from the repository
export stage=download
# Keep the error code
time ch/docker/test/fuzzer/run-fuzzer.sh || exit $?
fi
;&
"download")
time download
;&
"configure")
time configure
;&
"fuzz")
watchdog &
watchdog_pid=$!
fuzzer_exit_code=0
time fuzz || fuzzer_exit_code=$?
kill $watchdog_pid
exit $fuzzer_exit_code
;&
esac

View File

@ -4,15 +4,14 @@ toc_priority: 70
toc_title: Introduction
---
# ClickHouse Commercial Services
# ClickHouse Commercial Services {#clickhouse-commercial-services}
This section is a directory of commercial service providers specializing in ClickHouse. They are independent companies not necessarily affiliated with Yandex.
Service categories:
- [Cloud](cloud.md)
- [Support](support.md)
- [Cloud](../commercial/cloud.md)
- [Support](../commercial/support.md)
!!! note "For service providers"
If you happen to represent one of them, feel free to open a pull request adding your company to the respective section (or even adding a new section if the service doesn't fit into existing categories). The easiest way to open a pull-request for documentation page is by using a “pencil” edit button in the top-right corner. If your service available in some local market, make sure to mention it in a localized documentation page as well (or at least point it out in a pull-request description).
If you happen to represent one of them, feel free to open a pull request adding your company to the respective section (or even adding a new section if the service doesnt fit into existing categories). The easiest way to open a pull-request for documentation page is by using a “pencil” edit button in the top-right corner. If your service available in some local market, make sure to mention it in a localized documentation page as well (or at least point it out in a pull-request description).

View File

@ -5,7 +5,7 @@ toc_title: Architecture Overview
# Overview of ClickHouse Architecture {#overview-of-clickhouse-architecture}
ClickHouse is a true column-oriented DBMS. Data is stored by columns, and during the execution of arrays (vectors or chunks of columns). Whenever possible, operations are dispatched on arrays, rather than on individual values. It is called "vectorized query execution" and it helps lower the cost of actual data processing.
ClickHouse is a true column-oriented DBMS. Data is stored by columns, and during the execution of arrays (vectors or chunks of columns). Whenever possible, operations are dispatched on arrays, rather than on individual values. It is called “vectorized query execution” and it helps lower the cost of actual data processing.
> This idea is nothing new. It dates back to the `APL` (A programming language, 1957) and its descendants: `A +` (APL dialect), `J` (1990), `K` (1993), and `Q` (programming language from Kx Systems, 2003). Array programming is used in scientific data processing. Neither is this idea something new in relational databases: for example, it is used in the `VectorWise` system (also known as Actian Vector Analytic Database by Actian Corporation).
@ -21,11 +21,11 @@ Various `IColumn` implementations (`ColumnUInt8`, `ColumnString`, and so on) are
Nevertheless, it is possible to work with individual values as well. To represent an individual value, the `Field` is used. `Field` is just a discriminated union of `UInt64`, `Int64`, `Float64`, `String` and `Array`. `IColumn` has the `operator []` method to get the n-th value as a `Field`, and the `insert` method to append a `Field` to the end of a column. These methods are not very efficient, because they require dealing with temporary `Field` objects representing an individual value. There are more efficient methods, such as `insertFrom`, `insertRangeFrom`, and so on.
`Field` doesn't have enough information about a specific data type for a table. For example, `UInt8`, `UInt16`, `UInt32`, and `UInt64` are all represented as `UInt64` in a `Field`.
`Field` doesnt have enough information about a specific data type for a table. For example, `UInt8`, `UInt16`, `UInt32`, and `UInt64` are all represented as `UInt64` in a `Field`.
## Leaky Abstractions {#leaky-abstractions}
`IColumn` has methods for common relational transformations of data, but they dont meet all needs. For example, `ColumnUInt64` doesn't have a method to calculate the sum of two columns, and `ColumnString` doesn't have a method to run a substring search. These countless routines are implemented outside of `IColumn`.
`IColumn` has methods for common relational transformations of data, but they dont meet all needs. For example, `ColumnUInt64` doesnt have a method to calculate the sum of two columns, and `ColumnString` doesnt have a method to run a substring search. These countless routines are implemented outside of `IColumn`.
Various functions on columns can be implemented in a generic, non-efficient way using `IColumn` methods to extract `Field` values, or in a specialized way using knowledge of inner memory layout of data in a specific `IColumn` implementation. It is implemented by casting functions to a specific `IColumn` type and deal with internal representation directly. For example, `ColumnUInt64` has the `getData` method that returns a reference to an internal array, then a separate routine reads or fills that array directly. We have “leaky abstractions” to allow efficient specializations of various routines.
@ -35,7 +35,7 @@ Various functions on columns can be implemented in a generic, non-efficient way
`IDataType` and `IColumn` are only loosely related to each other. Different data types can be represented in memory by the same `IColumn` implementations. For example, `DataTypeUInt32` and `DataTypeDateTime` are both represented by `ColumnUInt32` or `ColumnConstUInt32`. In addition, the same data type can be represented by different `IColumn` implementations. For example, `DataTypeUInt8` can be represented by `ColumnUInt8` or `ColumnConstUInt8`.
`IDataType` only stores metadata. For instance, `DataTypeUInt8` doesn't store anything at all (except virtual pointer `vptr`) and `DataTypeFixedString` stores just `N` (the size of fixed-size strings).
`IDataType` only stores metadata. For instance, `DataTypeUInt8` doesnt store anything at all (except virtual pointer `vptr`) and `DataTypeFixedString` stores just `N` (the size of fixed-size strings).
`IDataType` has helper methods for various data formats. Examples are methods to serialize a value with possible quoting, to serialize a value for JSON, and to serialize a value as part of the XML format. There is no direct correspondence to data formats. For example, the different data formats `Pretty` and `TabSeparated` can use the same `serializeTextEscaped` helper method from the `IDataType` interface.
@ -120,9 +120,9 @@ There are ordinary functions and aggregate functions. For aggregate functions, s
Ordinary functions dont change the number of rows they work as if they are processing each row independently. In fact, functions are not called for individual rows, but for `Block`s of data to implement vectorized query execution.
There are some miscellaneous functions, like [blockSize](../sql-reference/functions/other-functions.md#function-blocksize), [rowNumberInBlock](../sql-reference/functions/other-functions.md#function-rownumberinblock), and [runningAccumulate](../sql-reference/functions/other-functions.md#runningaccumulatexploit block processing and violate the independence of rows.
There are some miscellaneous functions, like [blockSize](../sql-reference/functions/other-functions.md#function-blocksize), [rowNumberInBlock](../sql-reference/functions/other-functions.md#function-rownumberinblock), and \[runningAccumulate\](../sql-reference/functions/other-functions.md\#runningaccumulatexploit block processing and violate the independence of rows.
ClickHouse has strong typing, so theres no implicit type conversion. If a function doesn't support a specific combination of types, it throws an exception. But functions can work (be overloaded) for many different combinations of types. For example, the `plus` function (to implement the `+` operator) works for any combination of numeric types: `UInt8` + `Float32`, `UInt16` + `Int8`, and so on. Also, some variadic functions can accept any number of arguments, such as the `concat` function.
ClickHouse has strong typing, so theres no implicit type conversion. If a function doesnt support a specific combination of types, it throws an exception. But functions can work (be overloaded) for many different combinations of types. For example, the `plus` function (to implement the `+` operator) works for any combination of numeric types: `UInt8` + `Float32`, `UInt16` + `Int8`, and so on. Also, some variadic functions can accept any number of arguments, such as the `concat` function.
Implementing a function may be slightly inconvenient because a function explicitly dispatches supported data types and supported `IColumns`. For example, the `plus` function has code generated by instantiation of a C++ template for each combination of numeric types, and constant or non-constant left and right arguments.
@ -169,13 +169,13 @@ There is no global query plan for distributed query execution. Each node has its
`MergeTree` is a family of storage engines that supports indexing by primary key. The primary key can be an arbitrary tuple of columns or expressions. Data in a `MergeTree` table is stored in “parts”. Each part stores data in the primary key order, so data is ordered lexicographically by the primary key tuple. All the table columns are stored in separate `column.bin` files in these parts. The files consist of compressed blocks. Each block is usually from 64 KB to 1 MB of uncompressed data, depending on the average value size. The blocks consist of column values placed contiguously one after the other. Column values are in the same order for each column (the primary key defines the order), so when you iterate by many columns, you get values for the corresponding rows.
The primary key itself is “sparse”. It doesn't address every single row, but only some ranges of data. A separate `primary.idx` file has the value of the primary key for each N-th row, where N is called `index_granularity` (usually, N = 8192). Also, for each column, we have `column.mrk` files with “marks,” which are offsets to each N-th row in the data file. Each mark is a pair: the offset in the file to the beginning of the compressed block, and the offset in the decompressed block to the beginning of data. Usually, compressed blocks are aligned by marks, and the offset in the decompressed block is zero. Data for `primary.idx` always resides in memory, and data for `column.mrk` files is cached.
The primary key itself is “sparse”. It doesnt address every single row, but only some ranges of data. A separate `primary.idx` file has the value of the primary key for each N-th row, where N is called `index_granularity` (usually, N = 8192). Also, for each column, we have `column.mrk` files with “marks,” which are offsets to each N-th row in the data file. Each mark is a pair: the offset in the file to the beginning of the compressed block, and the offset in the decompressed block to the beginning of data. Usually, compressed blocks are aligned by marks, and the offset in the decompressed block is zero. Data for `primary.idx` always resides in memory, and data for `column.mrk` files is cached.
When we are going to read something from a part in `MergeTree`, we look at `primary.idx` data and locate ranges that could contain requested data, then look at `column.mrk` data and calculate offsets for where to start reading those ranges. Because of sparseness, excess data may be read. ClickHouse is not suitable for a high load of simple point queries, because the entire range with `index_granularity` rows must be read for each key, and the entire compressed block must be decompressed for each column. We made the index sparse because we must be able to maintain trillions of rows per single server without noticeable memory consumption for the index. Also, because the primary key is sparse, it is not unique: it cannot check the existence of the key in the table at INSERT time. You could have many rows with the same key in a table.
When you `INSERT` a bunch of data into `MergeTree`, that bunch is sorted by primary key order and forms a new part. There are background threads that periodically select some parts and merge them into a single sorted part to keep the number of parts relatively low. Thats why it is called `MergeTree`. Of course, merging leads to “write amplification”. All parts are immutable: they are only created and deleted, but not modified. When SELECT is executed, it holds a snapshot of the table (a set of parts). After merging, we also keep old parts for some time to make a recovery after failure easier, so if we see that some merged part is probably broken, we can replace it with its source parts.
`MergeTree` is not an LSM tree because it doesn't contain “memtable” and “log”: inserted data is written directly to the filesystem. This makes it suitable only to INSERT data in batches, not by individual row and not very frequently about once per second is ok, but a thousand times a second is not. We did it this way for simplicitys sake, and because we are already inserting data in batches in our applications.
`MergeTree` is not an LSM tree because it doesnt contain “memtable” and “log”: inserted data is written directly to the filesystem. This makes it suitable only to INSERT data in batches, not by individual row and not very frequently about once per second is ok, but a thousand times a second is not. We did it this way for simplicitys sake, and because we are already inserting data in batches in our applications.
> MergeTree tables can only have one (primary) index: there arent any secondary indices. It would be nice to allow multiple physical representations under one logical table, for example, to store data in more than one physical order or even to allow representations with pre-aggregated data along with original data.
@ -187,7 +187,7 @@ Replication in ClickHouse can be configured on a per-table basis. You could have
Replication is implemented in the `ReplicatedMergeTree` storage engine. The path in `ZooKeeper` is specified as a parameter for the storage engine. All tables with the same path in `ZooKeeper` become replicas of each other: they synchronize their data and maintain consistency. Replicas can be added and removed dynamically simply by creating or dropping a table.
Replication uses an asynchronous multi-master scheme. You can insert data into any replica that has a session with `ZooKeeper`, and data is replicated to all other replicas asynchronously. Because ClickHouse doesn't support UPDATEs, replication is conflict-free. As there is no quorum acknowledgment of inserts, just-inserted data might be lost if one node fails.
Replication uses an asynchronous multi-master scheme. You can insert data into any replica that has a session with `ZooKeeper`, and data is replicated to all other replicas asynchronously. Because ClickHouse doesnt support UPDATEs, replication is conflict-free. As there is no quorum acknowledgment of inserts, just-inserted data might be lost if one node fails.
Metadata for replication is stored in ZooKeeper. There is a replication log that lists what actions to do. Actions are: get part; merge parts; drop a partition, and so on. Each replica copies the replication log to its queue and then executes the actions from the queue. For example, on insertion, the “get the part” action is created in the log, and every replica downloads that part. Merges are coordinated between replicas to get byte-identical results. All parts are merged in the same way on all replicas. It is achieved by electing one replica as the leader, and that replica initiates merges and writes “merge parts” actions to the log.

View File

@ -5,11 +5,11 @@ toc_priority: 25
toc_title: hidden
---
# ClickHouse Engines
# ClickHouse Engines {#clickhouse-engines}
There are two key engine kinds in ClickHouse:
- [Table engines](table-engines/index.md)
- [Database engines](database-engines/index.md)
- [Table engines](../engines/table-engines/index.md)
- [Database engines](../engines/database-engines/index.md)
{## [Original article](https://clickhouse.tech/docs/en/engines/) ##}

View File

@ -3,14 +3,14 @@ toc_folder_title: Integrations
toc_priority: 30
---
# Table Engines for Integrations
# Table Engines for Integrations {#table-engines-for-integrations}
ClickHouse provides various means for integrating with external systems, including table engines. Like with all other table engines, the configuration is done using `CREATE TABLE` or `ALTER TABLE` queries. Then from a user perspective, the configured integration looks like a normal table, but queries to it are proxied to the external system. This transparent querying is one of the key advantages of this approach over alternative integration methods, like external dictionaries or table functions, which require to use custom query methods on each use.
List of supported integrations:
- [ODBC](odbc.md)
- [JDBC](jdbc.md)
- [MySQL](mysql.md)
- [HDFS](hdfs.md)
- [Kafka](kafka.md)
- [ODBC](../../../engines/table-engines/integrations/odbc.md)
- [JDBC](../../../engines/table-engines/integrations/jdbc.md)
- [MySQL](../../../engines/table-engines/integrations/mysql.md)
- [HDFS](../../../engines/table-engines/integrations/hdfs.md)
- [Kafka](../../../engines/table-engines/integrations/kafka.md)

View File

@ -18,7 +18,7 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster]
) ENGINE = MySQL('host:port', 'database', 'table', 'user', 'password'[, replace_query, 'on_duplicate_clause']);
```
See a detailed description of the [CREATE TABLE](../../../sql-reference/statements/create.md#create-table-query) query.
See a detailed description of the [CREATE TABLE](../../../sql-reference/statements/create/table.md#create-table-query) query.
The table structure can differ from the original MySQL table structure:

View File

@ -23,7 +23,7 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster]
ENGINE = ODBC(connection_settings, external_database, external_table)
```
See a detailed description of the [CREATE TABLE](../../../sql-reference/statements/create.md#create-table-query) query.
See a detailed description of the [CREATE TABLE](../../../sql-reference/statements/create/table.md#create-table-query) query.
The table structure can differ from the source table structure:

View File

@ -1,6 +1,45 @@
---
toc_folder_title: Log Family
toc_priority: 29
toc_title: Introduction
---
# Log Engine Family {#log-engine-family}
These engines were developed for scenarios when you need to quickly write many small tables (up to about 1 million rows) and read them later as a whole.
Engines of the family:
- [StripeLog](../../../engines/table-engines/log-family/stripelog.md)
- [Log](../../../engines/table-engines/log-family/log.md)
- [TinyLog](../../../engines/table-engines/log-family/tinylog.md)
## Common Properties {#common-properties}
Engines:
- Store data on a disk.
- Append data to the end of file when writing.
- Support locks for concurrent data access.
During `INSERT` queries, the table is locked, and other queries for reading and writing data both wait for the table to unlock. If there are no data writing queries, any number of data reading queries can be performed concurrently.
- Do not support [mutation](../../../sql-reference/statements/alter.md#alter-mutations) operations.
- Do not support indexes.
This means that `SELECT` queries for ranges of data are not efficient.
- Do not write data atomically.
You can get a table with corrupted data if something breaks the write operation, for example, abnormal server shutdown.
## Differences {#differences}
The `TinyLog` engine is the simplest in the family and provides the poorest functionality and lowest efficiency. The `TinyLog` engine doesnt support parallel data reading by several threads in a single query. It reads data slower than other engines in the family that support parallel reading from a single query and it uses almost as many file descriptors as the `Log` engine because it stores each column in a separate file. Use it only in simple scenarios.
The `Log` and `StripeLog` engines support parallel data reading. When reading data, ClickHouse uses multiple threads. Each thread processes a separate data block. The `Log` engine uses a separate file for each column of the table. `StripeLog` stores all the data in one file. As a result, the `StripeLog` engine uses fewer file descriptors, but the `Log` engine provides higher efficiency when reading data.
[Original article](https://clickhouse.tech/docs/en/operations/table_engines/log_family/) <!--hide-->

View File

@ -1,44 +0,0 @@
---
toc_priority: 31
toc_title: Introduction
---
# Log Engine Family {#log-engine-family}
These engines were developed for scenarios when you need to quickly write many small tables (up to about 1 million rows) and read them later as a whole.
Engines of the family:
- [StripeLog](../../../engines/table-engines/log-family/stripelog.md)
- [Log](../../../engines/table-engines/log-family/log.md)
- [TinyLog](../../../engines/table-engines/log-family/tinylog.md)
## Common Properties {#common-properties}
Engines:
- Store data on a disk.
- Append data to the end of file when writing.
- Support locks for concurrent data access.
During `INSERT` queries, the table is locked, and other queries for reading and writing data both wait for the table to unlock. If there are no data writing queries, any number of data reading queries can be performed concurrently.
- Do not support [mutation](../../../sql-reference/statements/alter.md#alter-mutations) operations.
- Do not support indexes.
This means that `SELECT` queries for ranges of data are not efficient.
- Do not write data atomically.
You can get a table with corrupted data if something breaks the write operation, for example, abnormal server shutdown.
## Differences {#differences}
The `TinyLog` engine is the simplest in the family and provides the poorest functionality and lowest efficiency. The `TinyLog` engine doesnt support parallel data reading by several threads. It reads data slower than other engines in the family that support parallel reading and it uses almost as many descriptors as the `Log` engine because it stores each column in a separate file. Use it in simple low-load scenarios.
The `Log` and `StripeLog` engines support parallel data reading. When reading data, ClickHouse uses multiple threads. Each thread processes a separate data block. The `Log` engine uses a separate file for each column of the table. `StripeLog` stores all the data in one file. As a result, the `StripeLog` engine uses fewer descriptors in the operating system, but the `Log` engine provides higher efficiency when reading data.
[Original article](https://clickhouse.tech/docs/en/operations/table_engines/log_family/) <!--hide-->

View File

@ -5,7 +5,7 @@ toc_title: Log
# Log {#log}
Engine belongs to the family of log engines. See the common properties of log engines and their differences in the [Log Engine Family](../../../engines/table-engines/log-family/log-family.md) article.
Engine belongs to the family of log engines. See the common properties of log engines and their differences in the [Log Engine Family](../../../engines/table-engines/log-family/index.md) article.
Log differs from [TinyLog](../../../engines/table-engines/log-family/tinylog.md) in that a small file of “marks” resides with the column files. These marks are written on every data block and contain offsets that indicate where to start reading the file in order to skip the specified number of rows. This makes it possible to read table data in multiple threads.
For concurrent data access, the read operations can be performed simultaneously, while write operations block reads and each other.

View File

@ -5,7 +5,7 @@ toc_title: StripeLog
# Stripelog {#stripelog}
This engine belongs to the family of log engines. See the common properties of log engines and their differences in the [Log Engine Family](../../../engines/table-engines/log-family/log-family.md) article.
This engine belongs to the family of log engines. See the common properties of log engines and their differences in the [Log Engine Family](../../../engines/table-engines/log-family/index.md) article.
Use this engine in scenarios when you need to write many tables with a small amount of data (less than 1 million rows).
@ -20,7 +20,7 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster]
) ENGINE = StripeLog
```
See the detailed description of the [CREATE TABLE](../../../sql-reference/statements/create.md#create-table-query) query.
See the detailed description of the [CREATE TABLE](../../../sql-reference/statements/create/table.md#create-table-query) query.
## Writing the Data {#table_engines-stripelog-writing-the-data}

View File

@ -5,7 +5,7 @@ toc_title: TinyLog
# TinyLog {#tinylog}
The engine belongs to the log engine family. See [Log Engine Family](../../../engines/table-engines/log-family/log-family.md) for common properties of log engines and their differences.
The engine belongs to the log engine family. See [Log Engine Family](../../../engines/table-engines/log-family/index.md) for common properties of log engines and their differences.
This table engine is typically used with the write-once method: write data one time, then read it as many times as necessary. For example, you can use `TinyLog`-type tables for intermediary data that is processed in small batches. Note that storing data in a large number of small tables is inefficient.

View File

@ -32,7 +32,7 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster]
[SETTINGS name=value, ...]
```
For a description of request parameters, see [request description](../../../sql-reference/statements/create.md).
For a description of request parameters, see [request description](../../../sql-reference/statements/create/table.md).
**Query clauses**

View File

@ -26,7 +26,7 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster]
[SETTINGS name=value, ...]
```
For a description of query parameters, see [query description](../../../sql-reference/statements/create.md).
For a description of query parameters, see [query description](../../../sql-reference/statements/create/table.md).
**CollapsingMergeTree Parameters**

View File

@ -77,7 +77,7 @@ Lets break down the name of the first part: `201901_1_3_1`:
The `active` column shows the status of the part. `1` is active; `0` is inactive. The inactive parts are, for example, source parts remaining after merging to a larger part. The corrupted data parts are also indicated as inactive.
As you can see in the example, there are several separated parts of the same partition (for example, `201901_1_3_1` and `201901_1_9_2`). This means that these parts are not merged yet. ClickHouse merges the inserted parts of data periodically, approximately 15 minutes after inserting. In addition, you can perform a non-scheduled merge using the [OPTIMIZE](../../../sql-reference/statements/misc.md#misc_operations-optimize) query. Example:
As you can see in the example, there are several separated parts of the same partition (for example, `201901_1_3_1` and `201901_1_9_2`). This means that these parts are not merged yet. ClickHouse merges the inserted parts of data periodically, approximately 15 minutes after inserting. In addition, you can perform a non-scheduled merge using the [OPTIMIZE](../../../sql-reference/statements/optimize.md) query. Example:
``` sql
OPTIMIZE TABLE visits PARTITION 201902;

View File

@ -28,7 +28,7 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster]
[SETTINGS name=value, ...]
```
See a detailed description of the [CREATE TABLE](../../../sql-reference/statements/create.md#create-table-query) query.
See a detailed description of the [CREATE TABLE](../../../sql-reference/statements/create/table.md#create-table-query) query.
A table for the Graphite data should have the following columns for the following data:

View File

@ -1,6 +1,17 @@
---
toc_folder_title: MergeTree Family
toc_priority: 28
toc_title: Introduction
---
# MergeTree Engine Family {#mergetree-engine-family}
Table engines from the MergeTree family are the core of ClickHouse data storage capabilities. They provide most features for resilience and high-performance data retrieval: columnar storage, custom partitioning, sparse primary index, secondary data-skipping indexes, etc.
Base [MergeTree](../../../engines/table-engines/mergetree-family/mergetree.md) table engine can be considered the default table engine for single-node ClickHouse instances because it is versatile and practical for a wide range of use cases.
For production usage [ReplicatedMergeTree](../../../engines/table-engines/mergetree-family/replication.md) is the way to go, because it adds high-availability to all features of regular MergeTree engine. A bonus is automatic data deduplication on data ingestion, so the software can safely retry if there was some network issue during insert.
All other engines of MergeTree family add extra functionality for some specific use cases. Usually, its implemented as additional data manipulation in background.
The main downside of MergeTree engines is that they are rather heavy-weight. So the typical pattern is to have not so many of them. If you need many small tables, for example for temporary data, consider [Log engine family](../../../engines/table-engines/log-family/index.md).

View File

@ -49,7 +49,7 @@ ORDER BY expr
[SETTINGS name=value, ...]
```
For a description of parameters, see the [CREATE query description](../../../sql-reference/statements/create.md).
For a description of parameters, see the [CREATE query description](../../../sql-reference/statements/create/table.md).
### Query Clauses {#mergetree-query-clauses}
@ -482,7 +482,7 @@ Data with an expired TTL is removed when ClickHouse merges data parts.
When ClickHouse see that data is expired, it performs an off-schedule merge. To control the frequency of such merges, you can set `merge_with_ttl_timeout`. If the value is too low, it will perform many off-schedule merges that may consume a lot of resources.
If you perform the `SELECT` query between merges, you may get expired data. To avoid it, use the [OPTIMIZE](../../../sql-reference/statements/misc.md#misc_operations-optimize) query before `SELECT`.
If you perform the `SELECT` query between merges, you may get expired data. To avoid it, use the [OPTIMIZE](../../../sql-reference/statements/optimize.md) query before `SELECT`.
## Using Multiple Block Devices for Data Storage {#table_engine-mergetree-multiple-volumes}

View File

@ -27,7 +27,7 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster]
[SETTINGS name=value, ...]
```
For a description of request parameters, see [request description](../../../sql-reference/statements/create.md).
For a description of request parameters, see [statement description](../../../sql-reference/statements/create/table.md).
**ReplacingMergeTree Parameters**

View File

@ -24,7 +24,7 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster]
[SETTINGS name=value, ...]
```
For a description of request parameters, see [request description](../../../sql-reference/statements/create.md).
For a description of request parameters, see [request description](../../../sql-reference/statements/create/table.md).
**Parameters of SummingMergeTree**

View File

@ -29,7 +29,7 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster]
[SETTINGS name=value, ...]
```
For a description of query parameters, see the [query description](../../../sql-reference/statements/create.md).
For a description of query parameters, see the [query description](../../../sql-reference/statements/create/table.md).
**Engine Parameters**

View File

@ -28,7 +28,7 @@ ClickHouse does not allow to specify filesystem path for`File`. It will use fold
When creating table using `File(Format)` it creates empty subdirectory in that folder. When data is written to that table, its put into `data.Format` file in that subdirectory.
You may manually create this subfolder and file in server filesystem and then [ATTACH](../../../sql-reference/statements/misc.md) it to table information with matching name, so you can query data from that file.
You may manually create this subfolder and file in server filesystem and then [ATTACH](../../../sql-reference/statements/attach.md) it to table information with matching name, so you can query data from that file.
!!! warning "Warning"
Be careful with this functionality, because ClickHouse does not keep track of external changes to such files. The result of simultaneous writes via ClickHouse and outside of ClickHouse is undefined.

View File

@ -3,7 +3,7 @@ toc_folder_title: Special
toc_priority: 31
---
# Special Table Engines
# Special Table Engines {#special-table-engines}
There are three main categories of table engines:

View File

@ -20,7 +20,7 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster]
) ENGINE = Join(join_strictness, join_type, k1[, k2, ...])
```
See the detailed description of the [CREATE TABLE](../../../sql-reference/statements/create.md#create-table-query) query.
See the detailed description of the [CREATE TABLE](../../../sql-reference/statements/create/table.md#create-table-query) query.
**Engine Parameters**

View File

@ -5,6 +5,6 @@ toc_title: MaterializedView
# MaterializedView Table Engine {#materializedview}
Used for implementing materialized views (for more information, see [CREATE TABLE](../../../sql-reference/statements/create.md)). For storing data, it uses a different engine that was specified when creating the view. When reading from a table, it just uses that engine.
Used for implementing materialized views (for more information, see [CREATE TABLE](../../../sql-reference/statements/create/table.md)). For storing data, it uses a different engine that was specified when creating the view. When reading from a table, it just uses that engine.
[Original article](https://clickhouse.tech/docs/en/operations/table_engines/materializedview/) <!--hide-->

View File

@ -17,7 +17,7 @@ Indexes
: ClickHouse keeps data structures in memory that allows reading not only used columns but only necessary row ranges of those columns.
Data compression
: Storing different values of the same column together often leads to better compression ratios (compared to row-oriented systems) because in real data column often has the same or not so many different values for neighboring rows. In addition to general-purpose compression, ClickHouse supports [specialized codecs](../../sql-reference/statements/create.md#create-query-specialized-codecs) that can make data even more compact.
: Storing different values of the same column together often leads to better compression ratios (compared to row-oriented systems) because in real data column often has the same or not so many different values for neighboring rows. In addition to general-purpose compression, ClickHouse supports [specialized codecs](../../sql-reference/statements/create/table.md#create-query-specialized-codecs) that can make data even more compact.
Vectorized query execution
: ClickHouse not only stores data in columns but also processes data in columns. It leads to better CPU cache utilization and allows for [SIMD](https://en.wikipedia.org/wiki/SIMD) CPU instructions usage.

View File

@ -8,8 +8,8 @@ toc_priority: 101
ClickHouse is a generic data storage solution for [OLAP](../../faq/general/olap.md) workloads, while there are many specialized time-series database management systems. Nevertheless, ClickHouses [focus on query execution speed](../../faq/general/why-clickhouse-is-so-fast.md) allows it to outperform specialized systems in many cases. There are many independent benchmarks on this topic out there ([example](https://medium.com/@AltinityDB/clickhouse-for-time-series-scalability-benchmarks-e181132a895b)), so were not going to conduct one here. Instead, lets focus on ClickHouse features that are important to use if thats your use case.
First of all, there are **[specialized codecs](../../sql-reference/statements/create.md#create-query-specialized-codecs)** which make typical time-series. Either common algorithms like `DoubleDelta` and `Gorilla` or specific to ClickHouse like `T64`.
First of all, there are **[specialized codecs](../../sql-reference/statements/create/table.md#create-query-specialized-codecs)** which make typical time-series. Either common algorithms like `DoubleDelta` and `Gorilla` or specific to ClickHouse like `T64`.
Second, time-series queries often hit only recent data, like one day or one week old. It makes sense to use servers that have both fast nVME/SSD drives and high-capacity HDD drives. ClickHouse [TTL](../../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-multiple-volumes) feature allows to configure keeping fresh hot data on fast drives and gradually move it to slower drives as it ages. Rollup or removal of even older data is also possible if your requirements demand it.
Even though its against ClickHouse philosophy of storing and processing raw data, you can use [materialized views](../../sql-reference/statements/create.md#create-view) to fit into even tighter latency or costs requirements.
Even though its against ClickHouse philosophy of storing and processing raw data, you can use [materialized views](../../sql-reference/statements/create/view.md) to fit into even tighter latency or costs requirements.

View File

@ -98,9 +98,9 @@ To run ClickHouse inside Docker follow the guide on [Docker Hub](https://hub.doc
For non-Linux operating systems and for AArch64 CPU arhitecture, ClickHouse builds are provided as a cross-compiled binary from the latest commit of the `master` branch (with a few hours delay).
- [macOS](https://builds.clickhouse.tech/master/macos/clickhouse) — `curl -O 'https://builds.clickhouse.tech/master/macos/clickhouse' && chmod a+x ./clickhouse`
- [FreeBSD](https://builds.clickhouse.tech/master/freebsd/clickhouse) — `curl -O 'https://builds.clickhouse.tech/master/freebsd/clickhouse' && chmod a+x ./clickhouse`
- [AArch64](https://builds.clickhouse.tech/master/aarch64/clickhouse) — `curl -O 'https://builds.clickhouse.tech/master/aarch64/clickhouse' && chmod a+x ./clickhouse`
- [macOS](https://builds.clickhouse.tech/master/macos/clickhouse) — `curl -O 'https://builds.clickhouse.tech/master/macos/clickhouse' && chmod a+x ./clickhouse`
- [FreeBSD](https://builds.clickhouse.tech/master/freebsd/clickhouse) — `curl -O 'https://builds.clickhouse.tech/master/freebsd/clickhouse' && chmod a+x ./clickhouse`
- [AArch64](https://builds.clickhouse.tech/master/aarch64/clickhouse) — `curl -O 'https://builds.clickhouse.tech/master/aarch64/clickhouse' && chmod a+x ./clickhouse`
After downloading, you can use the `clickhouse client` to connect to the server, or `clickhouse local` to process local data. To run `clickhouse server`, you have to additionally download [server](https://github.com/ClickHouse/ClickHouse/blob/master/programs/server/config.xml) and [users](https://github.com/ClickHouse/ClickHouse/blob/master/programs/server/users.xml) configuration files from GitHub.

View File

@ -99,7 +99,7 @@ As in most databases management systems, ClickHouse logically groups tables into
clickhouse-client --query "CREATE DATABASE IF NOT EXISTS tutorial"
```
Syntax for creating tables is way more complicated compared to databases (see [reference](../sql-reference/statements/create.md). In general `CREATE TABLE` statement has to specify three key things:
Syntax for creating tables is way more complicated compared to databases (see [reference](../sql-reference/statements/create/table.md). In general `CREATE TABLE` statement has to specify three key things:
1. Name of table to create.
2. Table schema, i.e. list of columns and their [data types](../sql-reference/data-types/index.md).
@ -479,7 +479,7 @@ FORMAT TSV
max_insert_block_size 1048576 0 "The maximum block size for insertion, if we control the creation of blocks for insertion."
```
Optionally you can [OPTIMIZE](../sql-reference/statements/misc.md#misc_operations-optimize) the tables after import. Tables that are configured with an engine from MergeTree-family always do merges of data parts in the background to optimize data storage (or at least check if it makes sense). These queries force the table engine to do storage optimization right now instead of some time later:
Optionally you can [OPTIMIZE](../sql-reference/statements/optimize.md) the tables after import. Tables that are configured with an engine from MergeTree-family always do merges of data parts in the background to optimize data storage (or at least check if it makes sense). These queries force the table engine to do storage optimization right now instead of some time later:
``` bash
clickhouse-client --query "OPTIMIZE TABLE tutorial.hits_v1 FINAL"

View File

@ -979,7 +979,7 @@ message MessageType {
}
```
are not applied; the [table defaults](../sql-reference/statements/create.md#create-default-values) are used instead of them.
are not applied; the [table defaults](../sql-reference/statements/create/table.md#create-default-values) are used instead of them.
ClickHouse inputs and outputs protobuf messages in the `length-delimited` format.
It means before every message should be written its length as a [varint](https://developers.google.com/protocol-buffers/docs/encoding#varints).

View File

@ -367,34 +367,34 @@ $ curl -v 'http://localhost:8123/predefined_query'
As you can see from the example if `http_handlers` is configured in the config.xml file and `http_handlers` can contain many `rules`. ClickHouse will match the HTTP requests received to the predefined type in `rule` and the first matched runs the handler. Then ClickHouse will execute the corresponding predefined query if the match is successful.
Now `rule` can configure `method`, `headers`, `url`, `handler`:
- `method` is responsible for matching the method part of the HTTP request. `method` fully conforms to the definition of [method](https://developer.mozilla.org/en-US/docs/Web/HTTP/Methods) in the HTTP protocol. It is an optional configuration. If it is not defined in the configuration file, it does not match the method portion of the HTTP request.
- `method` is responsible for matching the method part of the HTTP request. `method` fully conforms to the definition of [method](https://developer.mozilla.org/en-US/docs/Web/HTTP/Methods) in the HTTP protocol. It is an optional configuration. If it is not defined in the configuration file, it does not match the method portion of the HTTP request.
- `url` is responsible for matching the URL part of the HTTP request. It is compatible with [RE2](https://github.com/google/re2)s regular expressions. It is an optional configuration. If it is not defined in the configuration file, it does not match the URL portion of the HTTP request.
- `url` is responsible for matching the URL part of the HTTP request. It is compatible with [RE2](https://github.com/google/re2)s regular expressions. It is an optional configuration. If it is not defined in the configuration file, it does not match the URL portion of the HTTP request.
- `headers` are responsible for matching the header part of the HTTP request. It is compatible with RE2s regular expressions. It is an optional configuration. If it is not defined in the configuration file, it does not match the header portion of the HTTP request.
- `headers` are responsible for matching the header part of the HTTP request. It is compatible with RE2s regular expressions. It is an optional configuration. If it is not defined in the configuration file, it does not match the header portion of the HTTP request.
- `handler` contains the main processing part. Now `handler` can configure `type`, `status`, `content_type`, `response_content`, `query`, `query_param_name`.
`type` currently supports three types: [predefined\_query\_handler](#predefined_query_handler), [dynamic\_query\_handler](#dynamic_query_handler), [static](#static).
- `handler` contains the main processing part. Now `handler` can configure `type`, `status`, `content_type`, `response_content`, `query`, `query_param_name`.
`type` currently supports three types: [predefined_query_handler](#predefined_query_handler), [dynamic_query_handler](#dynamic_query_handler), [static](#static).
- `query` — use with `predefined_query_handler` type, executes query when the handler is called.
- `query_param_name` — use with `dynamic_query_handler` type, extracts and executes the value corresponding to the `query_param_name` value in HTTP request params.
- `status` — use with `static` type, response status code.
- `content_type` — use with `static` type, response [content-type](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Type).
- `response_content` — use with `static` type, response content sent to client, when using the prefix file:// or config://, find the content from the file or configuration sends to client.
Next are the configuration methods for different `type`.
### predefined_query_handler {#predefined_query_handler}
### predefined\_query\_handler {#predefined_query_handler}
`predefined_query_handler` supports setting `Settings` and `query_params` values. You can configure `query` in the type of `predefined_query_handler`.
`query` value is a predefined query of `predefined_query_handler`, which is executed by ClickHouse when an HTTP request is matched and the result of the query is returned. It is a must configuration.
The following example defines the values of [max_threads](../operations/settings/settings.md#settings-max_threads) and `max_alter_threads` settings, then queries the system table to check whether these settings were set successfully.
The following example defines the values of [max\_threads](../operations/settings/settings.md#settings-max_threads) and `max_alter_threads` settings, then queries the system table to check whether these settings were set successfully.
Example:
@ -425,13 +425,13 @@ max_alter_threads 2
!!! note "caution"
In one `predefined_query_handler` only supports one `query` of an insert type.
### dynamic_query_handler {#dynamic_query_handler}
### dynamic\_query\_handler {#dynamic_query_handler}
In `dynamic_query_handler`, the query is written in the form of param of the HTTP request. The difference is that in `predefined_query_handler`, the query is written in the configuration file. You can configure `query_param_name` in `dynamic_query_handler`.
ClickHouse extracts and executes the value corresponding to the `query_param_name` value in the URL of the HTTP request. The default value of `query_param_name` is `/query` . It is an optional configuration. If there is no definition in the configuration file, the param is not passed in.
To experiment with this functionality, the example defines the values of [max_threads](../operations/settings/settings.md#settings-max_threads) and `max_alter_threads` and `queries` whether the settings were set successfully.
To experiment with this functionality, the example defines the values of [max\_threads](../operations/settings/settings.md#settings-max_threads) and `max_alter_threads` and `queries` whether the settings were set successfully.
Example:
@ -456,7 +456,7 @@ max_alter_threads 2
### static {#static}
`static` can return [content_type](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Type), [status](https://developer.mozilla.org/en-US/docs/Web/HTTP/Status) and `response_content`. `response_content` can return the specified content.
`static` can return [content\_type](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Type), [status](https://developer.mozilla.org/en-US/docs/Web/HTTP/Status) and `response_content`. `response_content` can return the specified content.
Example:

View File

@ -3,15 +3,14 @@ toc_folder_title: Third-Party
toc_priority: 24
---
# Third-Party Interfaces
# Third-Party Interfaces {#third-party-interfaces}
This is a collection of links to third-party tools that provide some sort of interface to ClickHouse. It can be either visual interface, command-line interface or an API:
- [Client libraries](client-libraries.md)
- [Integrations](integrations.md)
- [GUI](gui.md)
- [Proxies](proxy.md)
- [Client libraries](../../interfaces/third-party/client-libraries.md)
- [Integrations](../../interfaces/third-party/integrations.md)
- [GUI](../../interfaces/third-party/gui.md)
- [Proxies](../../interfaces/third-party/proxy.md)
!!! note "Note"
Generic tools that support common API like [ODBC](../../interfaces/odbc.md) or [JDBC](../../interfaces/jdbc.md) usually can work with ClickHouse as well, but are not listed here because there are way too many of them.

View File

@ -104,6 +104,5 @@ toc_title: Integrations
- [ActiveRecord](https://github.com/PNixx/clickhouse-activerecord)
- [GraphQL](https://github.com/graphql)
- [activecube-graphql](https://github.com/bitquery/activecube-graphql)
[Original article](https://clickhouse.tech/docs/en/interfaces/third-party/integrations/) <!--hide-->

View File

@ -50,7 +50,7 @@ toc_title: Adopters
| <a href="http://www.pragma-innovation.fr/" class="favicon">Pragma Innovation</a> | Telemetry and Big Data Analysis | Main product | — | — | [Slides in English, October 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup18/4_pragma_innovation.pdf) |
| <a href="https://www.qingcloud.com/" class="favicon">QINGCLOUD</a> | Cloud services | Main product | — | — | [Slides in Chinese, October 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup19/4.%20Cloud%20%2B%20TSDB%20for%20ClickHouse%20张健%20QingCloud.pdf) |
| <a href="https://qrator.net" class="favicon">Qrator</a> | DDoS protection | Main product | — | — | [Blog Post, March 2019](https://blog.qrator.net/en/clickhouse-ddos-mitigation_37/) |
| <a href="https://www.percent.cn/" class="favicon">Percent 百分点</a> | Analytics | Main Product | — | — | [Slides in Chinese, June 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup24/4.%20ClickHouse万亿数据双中心的设计与实践%20.pdf) |
| <a href="https://www.percent.cn/" class="favicon">Percent 百分点</a> | Analytics | Main Product | — | — | [Slides in Chinese, June 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup24/4.%20ClickHouse万亿数据双中心的设计与实践%20.pdf) |
| <a href="https://plausible.io/" class="favicon">Plausible</a> | Analytics | Main Product | — | — | [Blog post, June 2020](https://twitter.com/PlausibleHQ/status/1273889629087969280) |
| <a href="https://rambler.ru" class="favicon">Rambler</a> | Internet services | Analytics | — | — | [Talk in Russian, April 2018](https://medium.com/@ramblertop/разработка-api-clickhouse-для-рамблер-топ-100-f4c7e56f3141) |
| <a href="https://www.tencent.com" class="favicon">Tencent</a> | Messaging | Logging | — | — | [Talk in Chinese, November 2019](https://youtu.be/T-iVQRuw-QY?t=5050) |

View File

@ -17,11 +17,11 @@ Its also worth noting that ClickHouse is a database management system, not a
Some column-oriented DBMSs do not use data compression. However, data compression does play a key role in achieving excellent performance.
In addition to efficient general-purpose compression codecs with different trade-offs between disk space and CPU consumption, ClickHouse provides [specialized codecs](../sql-reference/statements/create.md#create-query-specialized-codecs) for specific kinds of data, which allow ClickHouse to compete with and outperform more niche databases, like time-series ones.
In addition to efficient general-purpose compression codecs with different trade-offs between disk space and CPU consumption, ClickHouse provides [specialized codecs](../sql-reference/statements/create/table.md#create-query-specialized-codecs) for specific kinds of data, which allow ClickHouse to compete with and outperform more niche databases, like time-series ones.
## Disk Storage of Data {#disk-storage-of-data}
Keeping data physically sorted by primary key makes it possible to extract data for its specific values or value ranges with low latency, less than a few dozen milliseconds. Some column-oriented DBMSs (such as SAP HANA and Google PowerDrill) can only work in RAM. This approach encourages the allocation of a larger hardware budget than is necessary for real-time analysis.
Keeping data physically sorted by primary key makes it possible to extract data for its specific values or value ranges with low latency, less than a few dozen milliseconds. Some column-oriented DBMSs (such as SAP HANA and Google PowerDrill) can only work in RAM. This approach encourages the allocation of a larger hardware budget than is necessary for real-time analysis.
ClickHouse is designed to work on regular hard drives, which means the cost per GB of data storage is low, but SSD and additional RAM are also fully used if available.
@ -57,11 +57,11 @@ Having a data physically sorted by primary key makes it possible to extract data
## Secondary Indexes {#secondary-indexes}
Unlike other database management systems, secondary indexes in ClickHouse does not point to specific rows or row ranges. Instead, they allow the database to know in advance that all rows in some data parts wouldn't match the query filtering conditions and do not read them at all, thus they are called [data skipping indexes](../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-data_skipping-indexes).
Unlike other database management systems, secondary indexes in ClickHouse does not point to specific rows or row ranges. Instead, they allow the database to know in advance that all rows in some data parts wouldnt match the query filtering conditions and do not read them at all, thus they are called [data skipping indexes](../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-data_skipping-indexes).
## Suitable for Online Queries {#suitable-for-online-queries}
Most OLAP database management systems don't aim for online queries with sub-second latencies. In alternative systems, report building time of tens of seconds or even minutes is often considered acceptable. Sometimes it takes even more which forces to prepare reports offline (in advance or by responding with "come back later").
Most OLAP database management systems dont aim for online queries with sub-second latencies. In alternative systems, report building time of tens of seconds or even minutes is often considered acceptable. Sometimes it takes even more which forces to prepare reports offline (in advance or by responding with “come back later”).
In ClickHouse low latency means that queries can be processed without delay and without trying to prepare an answer in advance, right at the same moment while the user interface page is loading. In other words, online.
@ -73,9 +73,9 @@ ClickHouse provides various ways to trade accuracy for performance:
2. Running a query based on a part (sample) of data and getting an approximated result. In this case, proportionally less data is retrieved from the disk.
3. Running an aggregation for a limited number of random keys, instead of for all keys. Under certain conditions for key distribution in the data, this provides a reasonably accurate result while using fewer resources.
## Adaptive Join Algorithm
## Adaptive Join Algorithm {#adaptive-join-algorithm}
ClickHouse adaptively chooses how to [JOIN](../sql-reference/statements/select/join.md) multiple tables, by preferring hash-join algorithm and falling back to the merge-join algorithm if there's more than one large table.
ClickHouse adaptively chooses how to [JOIN](../sql-reference/statements/select/join.md) multiple tables, by preferring hash-join algorithm and falling back to the merge-join algorithm if theres more than one large table.
## Data Replication and Data Integrity Support {#data-replication-and-data-integrity-support}
@ -83,9 +83,9 @@ ClickHouse uses asynchronous multi-master replication. After being written to an
For more information, see the section [Data replication](../engines/table-engines/mergetree-family/replication.md).
## Role-Based Access Control
## Role-Based Access Control {#role-based-access-control}
ClickHouse implements user account management using SQL queries and allows for [role-based access control configuration](../operations/access-rights.md) similar to what can be found in ANSI SQL standard and popular relational database management systems.
ClickHouse implements user account management using SQL queries and allows for [role-based access control configuration](../operations/access-rights.md) similar to what can be found in ANSI SQL standard and popular relational database management systems.
## Features that Can Be Considered Disadvantages {#clickhouse-features-that-can-be-considered-disadvantages}

View File

@ -58,9 +58,9 @@ Privileges can be granted to a user account by the [GRANT](../sql-reference/stat
Management queries:
- [CREATE USER](../sql-reference/statements/create.md#create-user-statement)
- [CREATE USER](../sql-reference/statements/create/user.md)
- [ALTER USER](../sql-reference/statements/alter.md#alter-user-statement)
- [DROP USER](../sql-reference/statements/misc.md#drop-user-statement)
- [DROP USER](../sql-reference/statements/drop.md)
- [SHOW CREATE USER](../sql-reference/statements/show.md#show-create-user-statement)
### Settings Applying {#access-control-settings-applying}
@ -84,11 +84,11 @@ Role contains:
Management queries:
- [CREATE ROLE](../sql-reference/statements/create.md#create-role-statement)
- [CREATE ROLE](../sql-reference/statements/create/role.md)
- [ALTER ROLE](../sql-reference/statements/alter.md#alter-role-statement)
- [DROP ROLE](../sql-reference/statements/misc.md#drop-role-statement)
- [SET ROLE](../sql-reference/statements/misc.md#set-role-statement)
- [SET DEFAULT ROLE](../sql-reference/statements/misc.md#set-default-role-statement)
- [DROP ROLE](../sql-reference/statements/drop.md)
- [SET ROLE](../sql-reference/statements/set-role.md)
- [SET DEFAULT ROLE](../sql-reference/statements/set-role.md#set-default-role-statement)
- [SHOW CREATE ROLE](../sql-reference/statements/show.md#show-create-role-statement)
Privileges can be granted to a role by the [GRANT](../sql-reference/statements/grant.md) query. To revoke privileges from a role ClickHouse provides the [REVOKE](../sql-reference/statements/revoke.md) query.
@ -99,9 +99,9 @@ Row policy is a filter that defines which of the rows are available to a user or
Management queries:
- [CREATE ROW POLICY](../sql-reference/statements/create.md#create-row-policy-statement)
- [CREATE ROW POLICY](../sql-reference/statements/create/row-policy.md)
- [ALTER ROW POLICY](../sql-reference/statements/alter.md#alter-row-policy-statement)
- [DROP ROW POLICY](../sql-reference/statements/misc.md#drop-row-policy-statement)
- [DROP ROW POLICY](../sql-reference/statements/drop.md#drop-row-policy-statement)
- [SHOW CREATE ROW POLICY](../sql-reference/statements/show.md#show-create-row-policy-statement)
## Settings Profile {#settings-profiles-management}
@ -110,9 +110,9 @@ Settings profile is a collection of [settings](../operations/settings/index.md).
Management queries:
- [CREATE SETTINGS PROFILE](../sql-reference/statements/create.md#create-settings-profile-statement)
- [CREATE SETTINGS PROFILE](../sql-reference/statements/create/settings-profile.md#create-settings-profile-statement)
- [ALTER SETTINGS PROFILE](../sql-reference/statements/alter.md#alter-settings-profile-statement)
- [DROP SETTINGS PROFILE](../sql-reference/statements/misc.md#drop-settings-profile-statement)
- [DROP SETTINGS PROFILE](../sql-reference/statements/drop.md#drop-settings-profile-statement)
- [SHOW CREATE SETTINGS PROFILE](../sql-reference/statements/show.md#show-create-settings-profile-statement)
## Quota {#quotas-management}
@ -123,9 +123,9 @@ Quota contains a set of limits for some durations, as well as a list of roles an
Management queries:
- [CREATE QUOTA](../sql-reference/statements/create.md#create-quota-statement)
- [CREATE QUOTA](../sql-reference/statements/create/quota.md)
- [ALTER QUOTA](../sql-reference/statements/alter.md#alter-quota-statement)
- [DROP QUOTA](../sql-reference/statements/misc.md#drop-quota-statement)
- [DROP QUOTA](../sql-reference/statements/drop.md#drop-quota-statement)
- [SHOW CREATE QUOTA](../sql-reference/statements/show.md#show-create-quota-statement)
## Enabling SQL-driven Access Control and Account Management {#enabling-access-control}

View File

@ -1,9 +1,9 @@
---
toc_folder_title: Optimizing Performance
toc_priority: 52
toc_hidden: true
toc_priority: 52
---
# Optimizing Performance
# Optimizing Performance {#optimizing-performance}
- [Sampling query profiler](sampling-query-profiler.md)
- [Sampling query profiler](../../operations/optimizing-performance/sampling-query-profiler.md)

View File

@ -348,7 +348,7 @@ Keys:
Default value: `LOG_USER` if `address` is specified, `LOG_DAEMON otherwise.`
- format Message format. Possible values: `bsd` and `syslog.`
## send_crash_reports {#server_configuration_parameters-logger}
## send\_crash\_reports {#server_configuration_parameters-logger}
Settings for opt-in sending crash reports to the ClickHouse core developers team via [Sentry](https://sentry.io).
Enabling it, especially in pre-production environments, is greatly appreciated.
@ -398,8 +398,7 @@ The cache is shared for the server and memory is allocated as needed. The cache
<mark_cache_size>5368709120</mark_cache_size>
```
## max_server_memory_usage {#max_server_memory_usage}
## max\_server\_memory\_usage {#max_server_memory_usage}
Limits total RAM usage by the ClickHouse server. You can specify it only for the default profile.
@ -416,8 +415,7 @@ On hosts with low RAM and swap, you possibly need setting `max_server_memory_usa
**See also**
- [max_memory_usage](../settings/query-complexity.md#settings_max_memory_usage)
- [max\_memory\_usage](../../operations/settings/query-complexity.md#settings_max_memory_usage)
## max\_concurrent\_queries {#max-concurrent-queries}

View File

@ -36,7 +36,7 @@ Memory usage is not monitored for the states of certain aggregate functions.
Memory usage is not fully tracked for states of the aggregate functions `min`, `max`, `any`, `anyLast`, `argMin`, `argMax` from `String` and `Array` arguments.
Memory consumption is also restricted by the parameters `max_memory_usage_for_user` and [max_server_memory_usage](../server-configuration-parameters/settings.md#max_server_memory_usage).
Memory consumption is also restricted by the parameters `max_memory_usage_for_user` and [max\_server\_memory\_usage](../../operations/server-configuration-parameters/settings.md#max_server_memory_usage).
## max\_memory\_usage\_for\_user {#max-memory-usage-for-user}
@ -46,7 +46,6 @@ Default values are defined in [Settings.h](https://github.com/ClickHouse/ClickHo
See also the description of [max\_memory\_usage](#settings_max_memory_usage).
## max\_rows\_to\_read {#max-rows-to-read}
The following restrictions can be checked on each block (instead of on each row). That is, the restrictions can be broken a little.

View File

@ -129,7 +129,7 @@ Default value: 0.
## max\_http\_get\_redirects {#setting-max_http_get_redirects}
Limits the maximum number of HTTP GET redirect hops for [URL](../../engines/table-engines/special/url.md)-engine tables. The setting applies to both types of tables: those created by the [CREATE TABLE](../../sql-reference/statements/create.md#create-table-query) query and by the [url](../../sql-reference/table-functions/url.md) table function.
Limits the maximum number of HTTP GET redirect hops for [URL](../../engines/table-engines/special/url.md)-engine tables. The setting applies to both types of tables: those created by the [CREATE TABLE](../../sql-reference/statements/create/table.md) query and by the [url](../../sql-reference/table-functions/url.md) table function.
Possible values:
@ -733,8 +733,8 @@ Limits maximum recursion depth in the recursive descent parser. Allows to contro
Possible values:
- Positive integer.
- 0 — Recursion depth is unlimited.
- Positive integer.
- 0 — Recursion depth is unlimited.
Default value: 1000.
@ -1427,20 +1427,20 @@ Possible values:
Default value: 16.
## always_fetch_merged_part {#always_fetch_merged_part}
## always\_fetch\_merged\_part {#always_fetch_merged_part}
Prohibits data parts merging in [Replicated*MergeTree](../../engines/table-engines/mergetree-family/replication.md)-engine tables.
Prohibits data parts merging in [Replicated\*MergeTree](../../engines/table-engines/mergetree-family/replication.md)-engine tables.
When merging is prohibited, the replica never merges parts and always downloads merged parts from other replicas. If there is no required data yet, the replica waits for it. CPU and disk load on the replica server decreases, but the network load on cluster increases. This setting can be useful on servers with relatively weak CPUs or slow disks, such as servers for backups storage.
Possible values:
- 0 — `Replicated*MergeTree`-engine tables merge data parts at the replica.
- 1 — `Replicated*MergeTree`-engine tables don't merge data parts at the replica. The tables download merged data parts from other replicas.
- 1 — `Replicated*MergeTree`-engine tables dont merge data parts at the replica. The tables download merged data parts from other replicas.
Default value: 0.
**See Also**
**See Also**
- [Data Replication](../../engines/table-engines/mergetree-family/replication.md)
@ -1454,11 +1454,11 @@ Possible values:
Default value: 16.
## transform_null_in {#transform_null_in}
## transform\_null\_in {#transform_null_in}
Enables equality of [NULL](../../sql-reference/syntax.md#null-literal) values for [IN](../../sql-reference/operators/in.md) operator.
By default, `NULL` values can't be compared because `NULL` means undefined value. Thus, comparison `expr = NULL` must always return `false`. With this setting `NULL = NULL` returns `true` for `IN` operator.
By default, `NULL` values cant be compared because `NULL` means undefined value. Thus, comparison `expr = NULL` must always return `false`. With this setting `NULL = NULL` returns `true` for `IN` operator.
Possible values:
@ -1467,11 +1467,11 @@ Possible values:
Default value: 0.
**Example**
**Example**
Consider the `null_in` table:
```text
``` text
┌──idx─┬─────i─┐
│ 1 │ 1 │
│ 2 │ NULL │
@ -1481,13 +1481,13 @@ Consider the `null_in` table:
Query:
```sql
``` sql
SELECT idx, i FROM null_in WHERE i IN (1, NULL) SETTINGS transform_null_in = 0;
```
Result:
```text
``` text
┌──idx─┬────i─┐
│ 1 │ 1 │
└──────┴──────┘
@ -1495,24 +1495,23 @@ Result:
Query:
```sql
``` sql
SELECT idx, i FROM null_in WHERE i IN (1, NULL) SETTINGS transform_null_in = 1;
```
Result:
```text
``` text
┌──idx─┬─────i─┐
│ 1 │ 1 │
│ 2 │ NULL │
└──────┴───────┘
```
**See Also**
**See Also**
- [NULL Processing in IN Operators](../../sql-reference/operators/in.md#in-null-processing)
## low\_cardinality\_max\_dictionary\_size {#low_cardinality_max_dictionary_size}
Sets a maximum size in rows of a shared global dictionary for the [LowCardinality](../../sql-reference/data-types/lowcardinality.md) data type that can be written to a storage file system. This setting prevents issues with RAM in case of unlimited dictionary growth. All the data that cant be encoded due to maximum dictionary size limitation ClickHouse writes in an ordinary method.
@ -1570,9 +1569,9 @@ Possible values:
Default value: 0.
## min_insert_block_size_rows_for_materialized_views {#min-insert-block-size-rows-for-materialized-views}
## min\_insert\_block\_size\_rows\_for\_materialized\_views {#min-insert-block-size-rows-for-materialized-views}
Sets minimum number of rows in block which can be inserted into a table by an `INSERT` query. Smaller-sized blocks are squashed into bigger ones. This setting is applied only for blocks inserted into [materialized view](../../sql-reference/statements/create.md#create-view). By adjusting this setting, you control blocks squashing while pushing to materialized view and avoid excessive memory usage.
Sets minimum number of rows in block which can be inserted into a table by an `INSERT` query. Smaller-sized blocks are squashed into bigger ones. This setting is applied only for blocks inserted into [materialized view](../../sql-reference/statements/create/view.md). By adjusting this setting, you control blocks squashing while pushing to materialized view and avoid excessive memory usage.
Possible values:
@ -1583,11 +1582,11 @@ Default value: 1048576.
**See Also**
- [min_insert_block_size_rows](#min-insert-block-size-rows)
- [min\_insert\_block\_size\_rows](#min-insert-block-size-rows)
## min_insert_block_size_bytes_for_materialized_views {#min-insert-block-size-bytes-for-materialized-views}
## min\_insert\_block\_size\_bytes\_for\_materialized\_views {#min-insert-block-size-bytes-for-materialized-views}
Sets minimum number of bytes in block which can be inserted into a table by an `INSERT` query. Smaller-sized blocks are squashed into bigger ones. This setting is applied only for blocks inserted into [materialized view](../../sql-reference/statements/create.md#create-view). By adjusting this setting, you control blocks squashing while pushing to materialized view and avoid excessive memory usage.
Sets minimum number of bytes in block which can be inserted into a table by an `INSERT` query. Smaller-sized blocks are squashed into bigger ones. This setting is applied only for blocks inserted into [materialized view](../../sql-reference/statements/create/view.md). By adjusting this setting, you control blocks squashing while pushing to materialized view and avoid excessive memory usage.
Possible values:
@ -1598,6 +1597,27 @@ Default value: 268435456.
**See also**
- [min_insert_block_size_bytes](#min-insert-block-size-bytes)
- [min\_insert\_block\_size\_bytes](#min-insert-block-size-bytes)
## output_format_pretty_grid_charset {#output-format-pretty-grid-charset}
Allows to change a charset which is used for printing grids borders. Available charsets are following: UTF-8, ASCII.
**Example**
```text
SET output_format_pretty_grid_charset = 'UTF-8';
SELECT * FROM a;
┌─a─┐
│ 1 │
└───┘
SET output_format_pretty_grid_charset = 'ASCII';
SELECT * FROM a;
+-a-+
| 1 |
+---+
```
[Original article](https://clickhouse.tech/docs/en/operations/settings/settings/) <!-- hide -->

View File

@ -1,3 +1,3 @@
## system.asynchronous\_metric\_log {#system-tables-async-log}
Contains the historical values for `system.asynchronous_log` (see [system.asynchronous\_metrics](asynchronous_metrics.md#system_tables-asynchronous_metrics))
Contains the historical values for `system.asynchronous_log` (see [system.asynchronous\_metrics](../../operations/system-tables/asynchronous_metrics.md#system_tables-asynchronous_metrics))

View File

@ -31,6 +31,6 @@ SELECT * FROM system.asynchronous_metrics LIMIT 10
**See Also**
- [Monitoring](../../operations/monitoring.md) — Base concepts of ClickHouse monitoring.
- [system.metrics](metrics.md#system_tables-metrics) — Contains instantly calculated metrics.
- [system.events](events.md#system_tables-events) — Contains a number of events that have occurred.
- [system.metric\_log](metric_log.md#system_tables-metric_log) — Contains a history of metrics values from tables `system.metrics` и `system.events`.
- [system.metrics](../../operations/system-tables/metrics.md#system_tables-metrics) — Contains instantly calculated metrics.
- [system.events](../../operations/system-tables/events.md#system_tables-events) — Contains a number of events that have occurred.
- [system.metric\_log](../../operations/system-tables/metric_log.md#system_tables-metric_log) — Contains a history of metrics values from tables `system.metrics` и `system.events`.

View File

@ -4,6 +4,6 @@ Contains information about detached parts of [MergeTree](../../engines/table-eng
For user-detached parts, the reason is empty. Such parts can be attached with [ALTER TABLE ATTACH PARTITION\|PART](../../sql-reference/statements/alter.md#alter_attach-partition) command.
For the description of other columns, see [system.parts](parts.md#system_tables-parts).
For the description of other columns, see [system.parts](../../operations/system-tables/parts.md#system_tables-parts).
If part name is invalid, values of some columns may be `NULL`. Such parts can be deleted with [ALTER TABLE DROP DETACHED PART](../../sql-reference/statements/alter.md#alter_drop-detached).

View File

@ -26,7 +26,7 @@ SELECT * FROM system.events LIMIT 5
**See Also**
- [system.asynchronous\_metrics](asynchronous_metrics.md#system_tables-asynchronous_metrics) — Contains periodically calculated metrics.
- [system.metrics](metrics.md#system_tables-metrics) — Contains instantly calculated metrics.
- [system.metric\_log](metric_log.md#system_tables-metric_log) — Contains a history of metrics values from tables `system.metrics` и `system.events`.
- [system.asynchronous\_metrics](../../operations/system-tables/asynchronous_metrics.md#system_tables-asynchronous_metrics) — Contains periodically calculated metrics.
- [system.metrics](../../operations/system-tables/metrics.md#system_tables-metrics) — Contains instantly calculated metrics.
- [system.metric\_log](../../operations/system-tables/metric_log.md#system_tables-metric_log) — Contains a history of metrics values from tables `system.metrics` и `system.events`.
- [Monitoring](../../operations/monitoring.md) — Base concepts of ClickHouse monitoring.

View File

@ -20,7 +20,7 @@ System tables:
Most of system tables store their data in RAM. A ClickHouse server creates such system tables at the start.
Unlike other system tables, the system tables [metric\_log](metric_log.md#system_tables-metric_log), [query\_log](query_log.md#system_tables-query_log), [query\_thread\_log](query_thread_log.md#system_tables-query_thread_log), [trace\_log](trace_log.md#system_tables-trace_log) are served by [MergeTree](../../engines/table-engines/mergetree-family/mergetree.md) table engine and store their data in a storage filesystem. If you remove a table from a filesystem, the ClickHouse server creates the empty one again at the time of the next data writing. If system table schema changed in a new release, then ClickHouse renames the current table and creates a new one.
Unlike other system tables, the system tables [metric\_log](../../operations/system-tables/metric_log.md#system_tables-metric_log), [query\_log](../../operations/system-tables/query_log.md#system_tables-query_log), [query\_thread\_log](../../operations/system-tables/query_thread_log.md#system_tables-query_thread_log), [trace\_log](../../operations/system-tables/trace_log.md#system_tables-trace_log) are served by [MergeTree](../../engines/table-engines/mergetree-family/mergetree.md) table engine and store their data in a storage filesystem. If you remove a table from a filesystem, the ClickHouse server creates the empty one again at the time of the next data writing. If system table schema changed in a new release, then ClickHouse renames the current table and creates a new one.
By default, table growth is unlimited. To control a size of a table, you can use [TTL](../../sql-reference/statements/alter.md#manipulations-with-table-ttl) settings for removing outdated log records. Also you can use the partitioning feature of `MergeTree`-engine tables.
@ -45,5 +45,4 @@ If procfs is supported and enabled on the system, ClickHouse server collects the
- `OSReadBytes`
- `OSWriteBytes`
[Original article](https://clickhouse.tech/docs/en/operations/system-tables/) <!--hide-->

View File

@ -49,7 +49,7 @@ CurrentMetric_ReplicatedChecks: 0
**See also**
- [system.asynchronous\_metrics](asynchronous_metrics.md#system_tables-asynchronous_metrics) — Contains periodically calculated metrics.
- [system.events](events.md#system_tables-events) — Contains a number of events that occurred.
- [system.metrics](metrics.md#system_tables-metrics) — Contains instantly calculated metrics.
- [system.asynchronous\_metrics](../../operations/system-tables/asynchronous_metrics.md#system_tables-asynchronous_metrics) — Contains periodically calculated metrics.
- [system.events](../../operations/system-tables/events.md#system_tables-events) — Contains a number of events that occurred.
- [system.metrics](../../operations/system-tables/metrics.md#system_tables-metrics) — Contains instantly calculated metrics.
- [Monitoring](../../operations/monitoring.md) — Base concepts of ClickHouse monitoring.

View File

@ -33,7 +33,7 @@ SELECT * FROM system.metrics LIMIT 10
**See Also**
- [system.asynchronous\_metrics](asynchronous_metrics.md#system_tables-asynchronous_metrics) — Contains periodically calculated metrics.
- [system.events](events.md#system_tables-events) — Contains a number of events that occurred.
- [system.metric\_log](metric_log.md#system_tables-metric_log) — Contains a history of metrics values from tables `system.metrics` и `system.events`.
- [system.asynchronous\_metrics](../../operations/system-tables/asynchronous_metrics.md#system_tables-asynchronous_metrics) — Contains periodically calculated metrics.
- [system.events](../../operations/system-tables/events.md#system_tables-events) — Contains a number of events that occurred.
- [system.metric\_log](../../operations/system-tables/metric_log.md#system_tables-metric_log) — Contains a history of metrics values from tables `system.metrics` и `system.events`.
- [Monitoring](../../operations/monitoring.md) — Base concepts of ClickHouse monitoring.

View File

@ -1,5 +1,5 @@
# system.numbers\_mt {#system-numbers-mt}
The same as [system.numbers](numbers.md) but reads are parallelized. The numbers can be returned in any order.
The same as [system.numbers](../../operations/system-tables/numbers.md) but reads are parallelized. The numbers can be returned in any order.
Used for tests.

View File

@ -11,7 +11,7 @@ You can disable queries logging by setting [log\_queries = 0](../../operations/s
The flushing period of data is set in `flush_interval_milliseconds` parameter of the [query\_log](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-query-log) server settings section. To force flushing, use the [SYSTEM FLUSH LOGS](../../sql-reference/statements/system.md#query_language-system-flush_logs) query.
ClickHouse doesnt delete data from the table automatically. See [Introduction](index.md#system-tables-introduction) for more details.
ClickHouse doesnt delete data from the table automatically. See [Introduction](../../operations/system-tables/index.md#system-tables-introduction) for more details.
The `system.query_log` table registers two kinds of queries:
@ -75,7 +75,7 @@ Columns:
- `quota_key` ([String](../../sql-reference/data-types/string.md)) — The “quota key” specified in the [quotas](../../operations/quotas.md) setting (see `keyed`).
- `revision` ([UInt32](../../sql-reference/data-types/int-uint.md)) — ClickHouse revision.
- `thread_numbers` ([Array(UInt32)](../../sql-reference/data-types/array.md)) — Number of threads that are participating in query execution.
- `ProfileEvents.Names` ([Array(String)](../../sql-reference/data-types/array.md)) — Counters that measure different metrics. The description of them could be found in the table [system.events](events.md#system_tables-events)
- `ProfileEvents.Names` ([Array(String)](../../sql-reference/data-types/array.md)) — Counters that measure different metrics. The description of them could be found in the table [system.events](../../operations/system-tables/events.md#system_tables-events)
- `ProfileEvents.Values` ([Array(UInt64)](../../sql-reference/data-types/array.md)) — Values of metrics that are listed in the `ProfileEvents.Names` column.
- `Settings.Names` ([Array(String)](../../sql-reference/data-types/array.md)) — Names of settings that were changed when the client ran the query. To enable logging changes to settings, set the `log_query_settings` parameter to 1.
- `Settings.Values` ([Array(String)](../../sql-reference/data-types/array.md)) — Values of settings that are listed in the `Settings.Names` column.
@ -135,4 +135,4 @@ Settings.Values: ['0','random','1','10000000000']
**See Also**
- [system.query\_thread\_log](query_thread_log.md#system_tables-query_thread_log) — This table contains information about each query execution thread.
- [system.query\_thread\_log](../../operations/system-tables/query_thread_log.md#system_tables-query_thread_log) — This table contains information about each query execution thread.

View File

@ -9,7 +9,7 @@ To start logging:
The flushing period of data is set in `flush_interval_milliseconds` parameter of the [query\_thread\_log](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-query_thread_log) server settings section. To force flushing, use the [SYSTEM FLUSH LOGS](../../sql-reference/statements/system.md#query_language-system-flush_logs) query.
ClickHouse doesnt delete data from the table automatically. See [Introduction](index.md#system-tables-introduction) for more details.
ClickHouse doesnt delete data from the table automatically. See [Introduction](../../operations/system-tables/index.md#system-tables-introduction) for more details.
Columns:
@ -110,4 +110,4 @@ ProfileEvents.Values: [1,97,81,5,81]
**See Also**
- [system.query\_log](query_log.md#system_tables-query_log) — Description of the `query_log` system table which contains common information about queries execution.
- [system.query\_log](../../operations/system-tables/query_log.md#system_tables-query_log) — Description of the `query_log` system table which contains common information about queries execution.

View File

@ -1,6 +1,7 @@
---
toc_priority: 103
---
# anyHeavy {#anyheavyx}
Selects a frequently occurring value using the [heavy hitters](http://www.cs.umd.edu/~samir/498/karp.pdf) algorithm. If there is a value that occurs more than in half the cases in each of the querys execution threads, this value is returned. Normally, the result is nondeterministic.

View File

@ -5,4 +5,4 @@ toc_priority: 104
## anyLast {#anylastx}
Selects the last value encountered.
The result is just as indeterminate as for the [any](any.md) function.
The result is just as indeterminate as for the [any](../../../sql-reference/aggregate-functions/reference/any.md) function.

View File

@ -26,7 +26,7 @@ In both cases the type of the returned value is [UInt64](../../../sql-reference/
**Details**
ClickHouse supports the `COUNT(DISTINCT ...)` syntax. The behavior of this construction depends on the [count\_distinct\_implementation](../../../operations/settings/settings.md#settings-count_distinct_implementation) setting. It defines which of the [uniq\*](uniq.md#agg_function-uniq) functions is used to perform the operation. The default is the [uniqExact](uniqexact.md#agg_function-uniqexact) function.
ClickHouse supports the `COUNT(DISTINCT ...)` syntax. The behavior of this construction depends on the [count\_distinct\_implementation](../../../operations/settings/settings.md#settings-count_distinct_implementation) setting. It defines which of the [uniq\*](../../../sql-reference/aggregate-functions/reference/uniq.md#agg_function-uniq) functions is used to perform the operation. The default is the [uniqExact](../../../sql-reference/aggregate-functions/reference/uniqexact.md#agg_function-uniqexact) function.
The `SELECT count() FROM table` query is not optimized, because the number of entries in the table is not stored separately. It chooses a small column from the table and counts the number of values in it.

View File

@ -21,7 +21,7 @@ If in one query several values are inserted into the same position, the function
- `x` — Value to be inserted. [Expression](../../../sql-reference/syntax.md#syntax-expressions) resulting in one of the [supported data types](../../../sql-reference/data-types/index.md).
- `pos` — Position at which the specified element `x` is to be inserted. Index numbering in the array starts from zero. [UInt32](../../../sql-reference/data-types/int-uint.md#uint-ranges).
- `default_x`— Default value for substituting in empty positions. Optional parameter. [Expression](../../../sql-reference/syntax.md#syntax-expressions) resulting in the data type configured for the `x` parameter. If `default_x` is not defined, the [default values](../../../sql-reference/statements/create.md#create-default-values) are used.
- `default_x`— Default value for substituting in empty positions. Optional parameter. [Expression](../../../sql-reference/syntax.md#syntax-expressions) resulting in the data type configured for the `x` parameter. If `default_x` is not defined, the [default values](../../../sql-reference/statements/create/table.md#create-default-values) are used.
- `size`— Length of the resulting array. Optional parameter. When using this parameter, the default value `default_x` must be specified. [UInt32](../../../sql-reference/data-types/int-uint.md#uint-ranges).
**Returned value**

View File

@ -2,11 +2,11 @@
toc_priority: 111
---
# groupUniqArray
# groupUniqArray {#groupuniqarray}
Syntax: `groupUniqArray(x)` or `groupUniqArray(max_size)(x)`
Creates an array from different argument values. Memory consumption is the same as for the [uniqExact](uniqexact.md) function.
Creates an array from different argument values. Memory consumption is the same as for the [uniqExact](../../../sql-reference/aggregate-functions/reference/uniqexact.md) function.
The second version (with the `max_size` parameter) limits the size of the resulting array to `max_size` elements.
For example, `groupUniqArray(1)(x)` is equivalent to `[any(x)]`.

View File

@ -1,7 +1,7 @@
---
toc_folder_title: Reference
toc_priority: 36
toc_title: Reference
toc_folder_title: Reference
---
# Aggregate Function Reference {#aggregate-functions-reference}
@ -24,50 +24,95 @@ Standard aggregate functions:
ClickHouse-specific aggregate functions:
- [anyHeavy](../../../sql-reference/aggregate-functions/reference/anyheavy.md)
- [anyLast](../../../sql-reference/aggregate-functions/reference/anylast.md)
- [argMin](../../../sql-reference/aggregate-functions/reference/argmin.md)
- [argMax](../../../sql-reference/aggregate-functions/reference/argmax.md)
- [avgWeighted](../../../sql-reference/aggregate-functions/reference/avgweighted.md)
- [topK](../../../sql-reference/aggregate-functions/reference/topkweighted.md)
- [topKWeighted](../../../sql-reference/aggregate-functions/reference/topkweighted.md)
- [groupArray](../../../sql-reference/aggregate-functions/reference/grouparray.md)
- [groupUniqArray](../../../sql-reference/aggregate-functions/reference/groupuniqarray.md)
- [groupArrayInsertAt](../../../sql-reference/aggregate-functions/reference/grouparrayinsertat.md)
- [groupArrayMovingAvg](../../../sql-reference/aggregate-functions/reference/grouparraymovingavg.md)
- [groupArrayMovingSum](../../../sql-reference/aggregate-functions/reference/grouparraymovingsum.md)
- [groupBitAnd](../../../sql-reference/aggregate-functions/reference/groupbitand.md)
- [groupBitOr](../../../sql-reference/aggregate-functions/reference/groupbitor.md)
- [groupBitXor](../../../sql-reference/aggregate-functions/reference/groupbitxor.md)
- [groupBitmap](../../../sql-reference/aggregate-functions/reference/groupbitmap.md)
- [groupBitmapAnd](../../../sql-reference/aggregate-functions/reference/groupbitmapand.md)
- [groupBitmapOr](../../../sql-reference/aggregate-functions/reference/groupbitmapor.md)
- [groupBitmapXor](../../../sql-reference/aggregate-functions/reference/groupbitmapxor.md)
- [sumWithOverflow](../../../sql-reference/aggregate-functions/reference/sumwithoverflow.md)
- [sumMap](../../../sql-reference/aggregate-functions/reference/summap.md)
- [minMap](../../../sql-reference/aggregate-functions/reference/minmap.md)
- [maxMap](../../../sql-reference/aggregate-functions/reference/maxmap.md)
- [skewSamp](../../../sql-reference/aggregate-functions/reference/skewsamp.md)
- [skewPop](../../../sql-reference/aggregate-functions/reference/skewpop.md)
- [kurtSamp](../../../sql-reference/aggregate-functions/reference/kurtsamp.md)
- [kurtPop](../../../sql-reference/aggregate-functions/reference/kurtpop.md)
- [timeSeriesGroupSum](../../../sql-reference/aggregate-functions/reference/timeseriesgroupsum.md)
- [timeSeriesGroupRateSum](../../../sql-reference/aggregate-functions/reference/timeseriesgroupratesum.md)
- [uniq](../../../sql-reference/aggregate-functions/reference/uniq.md)
- [uniqExact](../../../sql-reference/aggregate-functions/reference/uniqexact.md)
- [uniqCombined](../../../sql-reference/aggregate-functions/reference/uniqcombined.md)
- [uniqCombined64](../../../sql-reference/aggregate-functions/reference/uniqcombined64.md)
- [uniqHLL12](../../../sql-reference/aggregate-functions/reference/uniqhll12.md)
- [quantile](../../../sql-reference/aggregate-functions/reference/quantile.md)
- [quantiles](../../../sql-reference/aggregate-functions/reference/quantiles.md)
- [quantileExact](../../../sql-reference/aggregate-functions/reference/quantileexact.md)
- [quantileExactWeighted](../../../sql-reference/aggregate-functions/reference/quantileexactweighted.md)
- [quantileTiming](../../../sql-reference/aggregate-functions/reference/quantiletiming.md)
- [quantileTimingWeighted](../../../sql-reference/aggregate-functions/reference/quantiletimingweighted.md)
- [quantileDeterministic](../../../sql-reference/aggregate-functions/reference/quantiledeterministic.md)
- [quantileTDigest](../../../sql-reference/aggregate-functions/reference/quantiletdigest.md)
- [quantileTDigestWeighted](../../../sql-reference/aggregate-functions/reference/quantiletdigestweighted.md)
- [simpleLinearRegression](../../../sql-reference/aggregate-functions/reference/simplelinearregression.md)
- [stochasticLinearRegression](../../../sql-reference/aggregate-functions/reference/stochasticlinearregression.md)
- [stochasticLogisticRegression](../../../sql-reference/aggregate-functions/reference/stochasticlogisticregression.md)
- [categoricalInformationValue](../../../sql-reference/aggregate-functions/reference/categoricalinformationvalue.md)

View File

@ -6,9 +6,9 @@ toc_priority: 200
Computes an approximate [quantile](https://en.wikipedia.org/wiki/Quantile) of a numeric data sequence.
This function applies [reservoir sampling](https://en.wikipedia.org/wiki/Reservoir_sampling) with a reservoir size up to 8192 and a random number generator for sampling. The result is non-deterministic. To get an exact quantile, use the [quantileExact](quantileexact.md#quantileexact) function.
This function applies [reservoir sampling](https://en.wikipedia.org/wiki/Reservoir_sampling) with a reservoir size up to 8192 and a random number generator for sampling. The result is non-deterministic. To get an exact quantile, use the [quantileExact](../../../sql-reference/aggregate-functions/reference/quantileexact.md#quantileexact) function.
When using multiple `quantile*` functions with different levels in a query, the internal states are not combined (that is, the query works less efficiently than it could). In this case, use the [quantiles](quantiles.md#quantiles) function.
When using multiple `quantile*` functions with different levels in a query, the internal states are not combined (that is, the query works less efficiently than it could). In this case, use the [quantiles](../../../sql-reference/aggregate-functions/reference/quantiles.md#quantiles) function.
**Syntax**
@ -62,5 +62,5 @@ Result:
**See Also**
- [median](median.md#median)
- [quantiles](quantiles.md#quantiles)
- [median](../../../sql-reference/aggregate-functions/reference/median.md#median)
- [quantiles](../../../sql-reference/aggregate-functions/reference/quantiles.md#quantiles)

View File

@ -6,9 +6,9 @@ toc_priority: 206
Computes an approximate [quantile](https://en.wikipedia.org/wiki/Quantile) of a numeric data sequence.
This function applies [reservoir sampling](https://en.wikipedia.org/wiki/Reservoir_sampling) with a reservoir size up to 8192 and deterministic algorithm of sampling. The result is deterministic. To get an exact quantile, use the [quantileExact](quantileexact.md#quantileexact) function.
This function applies [reservoir sampling](https://en.wikipedia.org/wiki/Reservoir_sampling) with a reservoir size up to 8192 and deterministic algorithm of sampling. The result is deterministic. To get an exact quantile, use the [quantileExact](../../../sql-reference/aggregate-functions/reference/quantileexact.md#quantileexact) function.
When using multiple `quantile*` functions with different levels in a query, the internal states are not combined (that is, the query works less efficiently than it could). In this case, use the [quantiles](quantiles.md#quantiles) function.
When using multiple `quantile*` functions with different levels in a query, the internal states are not combined (that is, the query works less efficiently than it could). In this case, use the [quantiles](../../../sql-reference/aggregate-functions/reference/quantiles.md#quantiles) function.
**Syntax**
@ -63,5 +63,5 @@ Result:
**See Also**
- [median](median.md#median)
- [quantiles](quantiles.md#quantiles)
- [median](../../../sql-reference/aggregate-functions/reference/median.md#median)
- [quantiles](../../../sql-reference/aggregate-functions/reference/quantiles.md#quantiles)

View File

@ -8,7 +8,7 @@ Exactly computes the [quantile](https://en.wikipedia.org/wiki/Quantile) of a num
To get exact value, all the passed values are combined into an array, which is then partially sorted. Therefore, the function consumes `O(n)` memory, where `n` is a number of values that were passed. However, for a small number of values, the function is very effective.
When using multiple `quantile*` functions with different levels in a query, the internal states are not combined (that is, the query works less efficiently than it could). In this case, use the [quantiles](quantiles.md#quantiles) function.
When using multiple `quantile*` functions with different levels in a query, the internal states are not combined (that is, the query works less efficiently than it could). In this case, use the [quantiles](../../../sql-reference/aggregate-functions/reference/quantiles.md#quantiles) function.
**Syntax**
@ -51,5 +51,5 @@ Result:
**See Also**
- [median](median.md#median)
- [quantiles](quantiles.md#quantiles)
- [median](../../../sql-reference/aggregate-functions/reference/median.md#median)
- [quantiles](../../../sql-reference/aggregate-functions/reference/quantiles.md#quantiles)

View File

@ -2,14 +2,13 @@
toc_priority: 203
---
# quantileExactWeighted {#quantileexactweighted}
Exactly computes the [quantile](https://en.wikipedia.org/wiki/Quantile) of a numeric data sequence, taking into account the weight of each element.
To get exact value, all the passed values are combined into an array, which is then partially sorted. Each value is counted with its weight, as if it is present `weight` times. A hash table is used in the algorithm. Because of this, if the passed values are frequently repeated, the function consumes less RAM than [quantileExact](quantileexact.md#quantileexact). You can use this function instead of `quantileExact` and specify the weight 1.
To get exact value, all the passed values are combined into an array, which is then partially sorted. Each value is counted with its weight, as if it is present `weight` times. A hash table is used in the algorithm. Because of this, if the passed values are frequently repeated, the function consumes less RAM than [quantileExact](../../../sql-reference/aggregate-functions/reference/quantileexact.md#quantileexact). You can use this function instead of `quantileExact` and specify the weight 1.
When using multiple `quantile*` functions with different levels in a query, the internal states are not combined (that is, the query works less efficiently than it could). In this case, use the [quantiles](quantiles.md#quantiles) function.
When using multiple `quantile*` functions with different levels in a query, the internal states are not combined (that is, the query works less efficiently than it could). In this case, use the [quantiles](../../../sql-reference/aggregate-functions/reference/quantiles.md#quantiles) function.
**Syntax**
@ -64,5 +63,5 @@ Result:
**See Also**
- [median](median.md#median)
- [quantiles](quantiles.md#quantiles)
- [median](../../../sql-reference/aggregate-functions/reference/median.md#median)
- [quantiles](../../../sql-reference/aggregate-functions/reference/quantiles.md#quantiles)

View File

@ -8,9 +8,9 @@ Computes an approximate [quantile](https://en.wikipedia.org/wiki/Quantile) of a
The maximum error is 1%. Memory consumption is `log(n)`, where `n` is a number of values. The result depends on the order of running the query, and is nondeterministic.
The performance of the function is lower than performance of [quantile](quantile.md#quantile) or [quantileTiming](quantiletiming.md#quantiletiming). In terms of the ratio of State size to precision, this function is much better than `quantile`.
The performance of the function is lower than performance of [quantile](../../../sql-reference/aggregate-functions/reference/quantile.md#quantile) or [quantileTiming](../../../sql-reference/aggregate-functions/reference/quantiletiming.md#quantiletiming). In terms of the ratio of State size to precision, this function is much better than `quantile`.
When using multiple `quantile*` functions with different levels in a query, the internal states are not combined (that is, the query works less efficiently than it could). In this case, use the [quantiles](quantiles.md#quantiles) function.
When using multiple `quantile*` functions with different levels in a query, the internal states are not combined (that is, the query works less efficiently than it could). In this case, use the [quantiles](../../../sql-reference/aggregate-functions/reference/quantiles.md#quantiles) function.
**Syntax**
@ -53,5 +53,5 @@ Result:
**See Also**
- [median](median.md#median)
- [quantiles](quantiles.md#quantiles)
- [median](../../../sql-reference/aggregate-functions/reference/median.md#median)
- [quantiles](../../../sql-reference/aggregate-functions/reference/quantiles.md#quantiles)

View File

@ -6,11 +6,11 @@ toc_priority: 208
Computes an approximate [quantile](https://en.wikipedia.org/wiki/Quantile) of a numeric data sequence using the [t-digest](https://github.com/tdunning/t-digest/blob/master/docs/t-digest-paper/histo.pdf) algorithm. The function takes into account the weight of each sequence member. The maximum error is 1%. Memory consumption is `log(n)`, where `n` is a number of values.
The performance of the function is lower than performance of [quantile](quantile.md#quantile) or [quantileTiming](quantiletiming.md#quantiletiming). In terms of the ratio of State size to precision, this function is much better than `quantile`.
The performance of the function is lower than performance of [quantile](../../../sql-reference/aggregate-functions/reference/quantile.md#quantile) or [quantileTiming](../../../sql-reference/aggregate-functions/reference/quantiletiming.md#quantiletiming). In terms of the ratio of State size to precision, this function is much better than `quantile`.
The result depends on the order of running the query, and is nondeterministic.
When using multiple `quantile*` functions with different levels in a query, the internal states are not combined (that is, the query works less efficiently than it could). In this case, use the [quantiles](quantiles.md#quantiles) function.
When using multiple `quantile*` functions with different levels in a query, the internal states are not combined (that is, the query works less efficiently than it could). In this case, use the [quantiles](../../../sql-reference/aggregate-functions/reference/quantiles.md#quantiles) function.
**Syntax**
@ -54,5 +54,5 @@ Result:
**See Also**
- [median](median.md#median)
- [quantiles](quantiles.md#quantiles)
- [median](../../../sql-reference/aggregate-functions/reference/median.md#median)
- [quantiles](../../../sql-reference/aggregate-functions/reference/quantiles.md#quantiles)

View File

@ -8,7 +8,7 @@ With the determined precision computes the [quantile](https://en.wikipedia.org/w
The result is deterministic (it doesnt depend on the query processing order). The function is optimized for working with sequences which describe distributions like loading web pages times or backend response times.
When using multiple `quantile*` functions with different levels in a query, the internal states are not combined (that is, the query works less efficiently than it could). In this case, use the [quantiles](quantiles.md#quantiles) function.
When using multiple `quantile*` functions with different levels in a query, the internal states are not combined (that is, the query works less efficiently than it could). In this case, use the [quantiles](../../../sql-reference/aggregate-functions/reference/quantiles.md#quantiles) function.
**Syntax**
@ -24,8 +24,8 @@ Alias: `medianTiming`.
- `expr` — [Expression](../../../sql-reference/syntax.md#syntax-expressions) over a column values returning a [Float\*](../../../sql-reference/data-types/float.md)-type number.
- If negative values are passed to the function, the behavior is undefined.
- If the value is greater than 30,000 (a page loading time of more than 30 seconds), it is assumed to be 30,000.
- If negative values are passed to the function, the behavior is undefined.
- If the value is greater than 30,000 (a page loading time of more than 30 seconds), it is assumed to be 30,000.
**Accuracy**
@ -37,7 +37,7 @@ The calculation is accurate if:
Otherwise, the result of the calculation is rounded to the nearest multiple of 16 ms.
!!! note "Note"
For calculating page loading time quantiles, this function is more effective and accurate than [quantile](quantile.md#quantile).
For calculating page loading time quantiles, this function is more effective and accurate than [quantile](../../../sql-reference/aggregate-functions/reference/quantile.md#quantile).
**Returned value**
@ -82,5 +82,5 @@ Result:
**See Also**
- [median](median.md#median)
- [quantiles](quantiles.md#quantiles)
- [median](../../../sql-reference/aggregate-functions/reference/median.md#median)
- [quantiles](../../../sql-reference/aggregate-functions/reference/quantiles.md#quantiles)

View File

@ -8,7 +8,7 @@ With the determined precision computes the [quantile](https://en.wikipedia.org/w
The result is deterministic (it doesnt depend on the query processing order). The function is optimized for working with sequences which describe distributions like loading web pages times or backend response times.
When using multiple `quantile*` functions with different levels in a query, the internal states are not combined (that is, the query works less efficiently than it could). In this case, use the [quantiles](quantiles.md#quantiles) function.
When using multiple `quantile*` functions with different levels in a query, the internal states are not combined (that is, the query works less efficiently than it could). In this case, use the [quantiles](../../../sql-reference/aggregate-functions/reference/quantiles.md#quantiles) function.
**Syntax**
@ -39,7 +39,7 @@ The calculation is accurate if:
Otherwise, the result of the calculation is rounded to the nearest multiple of 16 ms.
!!! note "Note"
For calculating page loading time quantiles, this function is more effective and accurate than [quantile](quantile.md#quantile).
For calculating page loading time quantiles, this function is more effective and accurate than [quantile](../../../sql-reference/aggregate-functions/reference/quantile.md#quantile).
**Returned value**
@ -81,5 +81,5 @@ Result:
**See Also**
- [median](median.md#median)
- [quantiles](quantiles.md#quantiles)
- [median](../../../sql-reference/aggregate-functions/reference/median.md#median)
- [quantiles](../../../sql-reference/aggregate-functions/reference/quantiles.md#quantiles)

View File

@ -4,7 +4,7 @@ toc_priority: 30
# stddevPop {#stddevpop}
The result is equal to the square root of [varPop](varpop.md).
The result is equal to the square root of [varPop](../../../sql-reference/aggregate-functions/reference/varpop.md).
!!! note "Note"
This function uses a numerically unstable algorithm. If you need [numerical stability](https://en.wikipedia.org/wiki/Numerical_stability) in calculations, use the `stddevPopStable` function. It works slower but provides a lower computational error.

View File

@ -4,7 +4,7 @@ toc_priority: 31
# stddevSamp {#stddevsamp}
The result is equal to the square root of [varSamp](varsamp.md).
The result is equal to the square root of [varSamp](../../../sql-reference/aggregate-functions/reference/varsamp.md).
!!! note "Note"
This function uses a numerically unstable algorithm. If you need [numerical stability](https://en.wikipedia.org/wiki/Numerical_stability) in calculations, use the `stddevSampStable` function. It works slower but provides a lower computational error.

View File

@ -71,5 +71,5 @@ The query will return a column of predicted values. Note that first argument of
**See Also**
- [stochasticLogisticRegression](stochasticlogisticregression.md#agg_functions-stochasticlogisticregression)
- [stochasticLogisticRegression](../../../sql-reference/aggregate-functions/reference/stochasticlogisticregression.md#agg_functions-stochasticlogisticregression)
- [Difference between linear and logistic regressions](https://stackoverflow.com/questions/12146914/what-is-the-difference-between-linear-regression-and-logistic-regression)

View File

@ -51,5 +51,5 @@ stochasticLogisticRegression(1.0, 1.0, 10, 'SGD')
**See Also**
- [stochasticLinearRegression](stochasticlinearregression.md#agg_functions-stochasticlinearregression)
- [stochasticLinearRegression](../../../sql-reference/aggregate-functions/reference/stochasticlinearregression.md#agg_functions-stochasticlinearregression)
- [Difference between linear and logistic regressions.](https://stackoverflow.com/questions/12146914/what-is-the-difference-between-linear-regression-and-logistic-regression)

View File

@ -6,7 +6,7 @@ toc_priority: 171
Syntax: `timeSeriesGroupRateSum(uid, ts, val)`
Similarly to [timeSeriesGroupSum](timeseriesgroupsum.md), `timeSeriesGroupRateSum` calculates the rate of time-series and then sum rates together.
Similarly to [timeSeriesGroupSum](../../../sql-reference/aggregate-functions/reference/timeseriesgroupsum.md), `timeSeriesGroupRateSum` calculates the rate of time-series and then sum rates together.
Also, timestamp should be in ascend order before use this function.
Applying this function to the data from the `timeSeriesGroupSum` example, you get the following result:

View File

@ -34,7 +34,7 @@ We recommend using this function in almost all scenarios.
**See Also**
- [uniqCombined](uniqcombined.md#agg_function-uniqcombined)
- [uniqCombined64](uniqcombined64.md#agg_function-uniqcombined64)
- [uniqHLL12](uniqhll12.md#agg_function-uniqhll12)
- [uniqExact](uniqexact.md#agg_function-uniqexact)
- [uniqCombined](../../../sql-reference/aggregate-functions/reference/uniqcombined.md#agg_function-uniqcombined)
- [uniqCombined64](../../../sql-reference/aggregate-functions/reference/uniqcombined64.md#agg_function-uniqcombined64)
- [uniqHLL12](../../../sql-reference/aggregate-functions/reference/uniqhll12.md#agg_function-uniqhll12)
- [uniqExact](../../../sql-reference/aggregate-functions/reference/uniqexact.md#agg_function-uniqexact)

View File

@ -35,9 +35,9 @@ Function:
- Provides the result deterministically (it doesnt depend on the query processing order).
!!! note "Note"
Since it uses 32-bit hash for non-`String` type, the result will have very high error for cardinalities significantly larger than `UINT_MAX` (error will raise quickly after a few tens of billions of distinct values), hence in this case you should use [uniqCombined64](uniqcombined64.md#agg_function-uniqcombined64)
Since it uses 32-bit hash for non-`String` type, the result will have very high error for cardinalities significantly larger than `UINT_MAX` (error will raise quickly after a few tens of billions of distinct values), hence in this case you should use [uniqCombined64](../../../sql-reference/aggregate-functions/reference/uniqcombined64.md#agg_function-uniqcombined64)
Compared to the [uniq](uniq.md#agg_function-uniq) function, the `uniqCombined`:
Compared to the [uniq](../../../sql-reference/aggregate-functions/reference/uniq.md#agg_function-uniq) function, the `uniqCombined`:
- Consumes several times less memory.
- Calculates with several times higher accuracy.
@ -45,7 +45,7 @@ Compared to the [uniq](uniq.md#agg_function-uniq) function, the `uniqCombined`:
**See Also**
- [uniq](uniq.md#agg_function-uniq)
- [uniqCombined64](uniqcombined64.md#agg_function-uniqcombined64)
- [uniqHLL12](uniqhll12.md#agg_function-uniqhll12)
- [uniqExact](uniqexact.md#agg_function-uniqexact)
- [uniq](../../../sql-reference/aggregate-functions/reference/uniq.md#agg_function-uniq)
- [uniqCombined64](../../../sql-reference/aggregate-functions/reference/uniqcombined64.md#agg_function-uniqcombined64)
- [uniqHLL12](../../../sql-reference/aggregate-functions/reference/uniqhll12.md#agg_function-uniqhll12)
- [uniqExact](../../../sql-reference/aggregate-functions/reference/uniqexact.md#agg_function-uniqexact)

View File

@ -4,4 +4,4 @@ toc_priority: 193
# uniqCombined64 {#agg_function-uniqcombined64}
Same as [uniqCombined](uniqcombined.md#agg_function-uniqcombined), but uses 64-bit hash for all data types.
Same as [uniqCombined](../../../sql-reference/aggregate-functions/reference/uniqcombined.md#agg_function-uniqcombined), but uses 64-bit hash for all data types.

View File

@ -10,7 +10,7 @@ Calculates the exact number of different argument values.
uniqExact(x[, ...])
```
Use the `uniqExact` function if you absolutely need an exact result. Otherwise use the [uniq](uniq.md#agg_function-uniq) function.
Use the `uniqExact` function if you absolutely need an exact result. Otherwise use the [uniq](../../../sql-reference/aggregate-functions/reference/uniq.md#agg_function-uniq) function.
The `uniqExact` function uses more memory than `uniq`, because the size of the state has unbounded growth as the number of different values increases.
@ -20,6 +20,6 @@ The function takes a variable number of parameters. Parameters can be `Tuple`, `
**See Also**
- [uniq](uniq.md#agg_function-uniq)
- [uniqCombined](uniq.md#agg_function-uniqcombined)
- [uniqHLL12](uniq.md#agg_function-uniqhll12)
- [uniq](../../../sql-reference/aggregate-functions/reference/uniq.md#agg_function-uniq)
- [uniqCombined](../../../sql-reference/aggregate-functions/reference/uniq.md#agg_function-uniqcombined)
- [uniqHLL12](../../../sql-reference/aggregate-functions/reference/uniq.md#agg_function-uniqhll12)

View File

@ -30,10 +30,10 @@ Function:
- Provides the determinate result (it doesnt depend on the query processing order).
We dont recommend using this function. In most cases, use the [uniq](uniq.md#agg_function-uniq) or [uniqCombined](uniqcombined.md#agg_function-uniqcombined) function.
We dont recommend using this function. In most cases, use the [uniq](../../../sql-reference/aggregate-functions/reference/uniq.md#agg_function-uniq) or [uniqCombined](../../../sql-reference/aggregate-functions/reference/uniqcombined.md#agg_function-uniqcombined) function.
**See Also**
- [uniq](uniq.md#agg_function-uniq)
- [uniqCombined](uniqcombined.md#agg_function-uniqcombined)
- [uniqExact](uniqexact.md#agg_function-uniqexact)
- [uniq](../../../sql-reference/aggregate-functions/reference/uniq.md#agg_function-uniq)
- [uniqCombined](../../../sql-reference/aggregate-functions/reference/uniqcombined.md#agg_function-uniqcombined)
- [uniqExact](../../../sql-reference/aggregate-functions/reference/uniqexact.md#agg_function-uniqexact)

View File

@ -5,7 +5,7 @@ toc_title: AggregateFunction
# AggregateFunction {#data-type-aggregatefunction}
Aggregate functions can have an implementation-defined intermediate state that can be serialized to an AggregateFunction(…) data type and stored in a table, usually, by means of [a materialized view](../../sql-reference/statements/create.md#create-view). The common way to produce an aggregate function state is by calling the aggregate function with the `-State` suffix. To get the final result of aggregation in the future, you must use the same aggregate function with the `-Merge`suffix.
Aggregate functions can have an implementation-defined intermediate state that can be serialized to an `AggregateFunction(…)` data type and stored in a table, usually, by means of [a materialized view](../../sql-reference/statements/create/view.md). The common way to produce an aggregate function state is by calling the aggregate function with the `-State` suffix. To get the final result of aggregation in the future, you must use the same aggregate function with the `-Merge`suffix.
`AggregateFunction(name, types_of_arguments…)` — parametric data type.

View File

@ -1,6 +1,6 @@
---
toc_priority: 56
toc_folder_title: Domains
toc_priority: 56
toc_title: Overview
---

View File

@ -5,7 +5,7 @@ toc_title: Nested(Name1 Type1, Name2 Type2, ...)
# Nested(name1 Type1, Name2 Type2, …) {#nestedname1-type1-name2-type2}
A nested data structure is like a table inside a cell. The parameters of a nested data structure the column names and types are specified the same way as in a [CREATE TABLE](../../../sql-reference/statements/create.md) query. Each table row can correspond to any number of rows in a nested data structure.
A nested data structure is like a table inside a cell. The parameters of a nested data structure the column names and types are specified the same way as in a [CREATE TABLE](../../../sql-reference/statements/create/table.md) query. Each table row can correspond to any number of rows in a nested data structure.
Example:

View File

@ -39,7 +39,7 @@ The configuration looks like this:
</yandex>
```
Corresponding [DDL-query](../../../sql-reference/statements/create.md#create-dictionary-query):
Corresponding [DDL-query](../../../sql-reference/statements/create/dictionary.md):
``` sql
CREATE DICTIONARY (...)
@ -330,7 +330,7 @@ LAYOUT(CACHE(BLOCK_SIZE 4096 FILE_SIZE 16777216 READ_BUFFER_SIZE 1048576
### complex\_key\_ssd\_cache {#complex-key-ssd-cache}
This type of storage is for use with composite [keys](external-dicts-dict-structure.md). Similar to `ssd\_cache`.
This type of storage is for use with composite [keys](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-structure.md). Similar to `ssd\_cache`.
### direct {#direct}

View File

@ -24,7 +24,7 @@ If dictionary is configured using xml-file, the configuration looks like this:
</yandex>
```
In case of [DDL-query](../../../sql-reference/statements/create.md#create-dictionary-query), equal configuration will looks like:
In case of [DDL-query](../../../sql-reference/statements/create/dictionary.md), equal configuration will looks like:
``` sql
CREATE DICTIONARY dict_name (...)

View File

@ -29,7 +29,7 @@ If dictionary is configured using xml file, than dictionary configuration has th
</dictionary>
```
Corresponding [DDL-query](../../../sql-reference/statements/create.md#create-dictionary-query) has the following structure:
Corresponding [DDL-query](../../../sql-reference/statements/create/dictionary.md) has the following structure:
``` sql
CREATE DICTIONARY dict_name

View File

@ -11,7 +11,7 @@ ClickHouse:
- Fully or partially stores dictionaries in RAM.
- Periodically updates dictionaries and dynamically loads missing values. In other words, dictionaries can be loaded dynamically.
- Allows to create external dictionaries with xml files or [DDL queries](../../../sql-reference/statements/create.md#create-dictionary-query).
- Allows to create external dictionaries with xml files or [DDL queries](../../../sql-reference/statements/create/dictionary.md).
The configuration of external dictionaries can be located in one or more xml-files. The path to the configuration is specified in the [dictionaries\_config](../../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-dictionaries_config) parameter.
@ -43,7 +43,7 @@ The dictionary configuration file has the following format:
You can [configure](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict.md) any number of dictionaries in the same file.
[DDL queries for dictionaries](../../../sql-reference/statements/create.md#create-dictionary-query) doesnt require any additional records in server configuration. They allow to work with dictionaries as first-class entities, like tables or views.
[DDL queries for dictionaries](../../../sql-reference/statements/create/dictionary.md) doesnt require any additional records in server configuration. They allow to work with dictionaries as first-class entities, like tables or views.
!!! attention "Attention"
You can convert values for a small dictionary by describing it in a `SELECT` query (see the [transform](../../../sql-reference/functions/other-functions.md) function). This functionality is not related to external dictionaries.

View File

@ -0,0 +1,21 @@
---
toc_priority: 32
toc_title: Distributed DDL
---
# Distributed DDL Queries (ON CLUSTER Clause) {#distributed-ddl-queries-on-cluster-clause}
By default the `CREATE`, `DROP`, `ALTER`, and `RENAME` queries affect only the current server where they are executed. In a cluster setup, it is possible to run such queries in a distributed manner with the `ON CLUSTER` clause.
For example, the following query creates the `all_hits` `Distributed` table on each host in `cluster`:
``` sql
CREATE TABLE IF NOT EXISTS all_hits ON CLUSTER cluster (p Date, i Int32) ENGINE = Distributed(cluster, default, hits)
```
In order to run these queries correctly, each host must have the same cluster definition (to simplify syncing configs, you can use substitutions from ZooKeeper). They must also connect to the ZooKeeper servers.
The local version of the query will eventually be executed on each host in the cluster, even if some hosts are currently not available.
!!! warning "Warning"
The order for executing queries within a single host is guaranteed.

View File

@ -184,12 +184,12 @@ Checks whether all the elements of array2 appear in array1 in the same exact ord
hasSubstr(array1, array2)
```
In other words, the functions will check whether all the elements of `array2` are contained in `array1` like
In other words, the functions will check whether all the elements of `array2` are contained in `array1` like
the `hasAll` function. In addition, it will check that the elements are observed in the same order in both `array1` and `array2`.
For Example:
- `hasSubstr([1,2,3,4], [2,3])` returns 1. However, `hasSubstr([1,2,3,4], [3,2])` will return `0`.
- `hasSubstr([1,2,3,4], [1,2,3])` returns 1. However, `hasSubstr([1,2,3,4], [1,2,4])` will return `0`.
For Example:
- `hasSubstr([1,2,3,4], [2,3])` returns 1. However, `hasSubstr([1,2,3,4], [3,2])` will return `0`.
- `hasSubstr([1,2,3,4], [1,2,3])` returns 1. However, `hasSubstr([1,2,3,4], [1,2,4])` will return `0`.
**Parameters**
@ -223,7 +223,6 @@ For Example:
`SELECT hasSubstr([[1, 2], [3, 4], [5, 6]], [[1, 2], [3, 4]])` returns 1.
## indexOf(arr, x) {#indexofarr-x}
Returns the index of the first x element (starting from 1) if it is in the array, or 0 if it is not.
@ -939,7 +938,7 @@ arrayReduceInRanges(agg_func, ranges, arr1, arr2, ..., arrN)
**Returned value**
- Array containing results of the aggregate function over specified ranges.
- Array containing results of the aggregate function over specified ranges.
Type: [Array](../../sql-reference/data-types/array.md).

View File

@ -161,14 +161,14 @@ toStartOfSecond(value[, timezone])
**Parameters**
- `value` — Date and time. [DateTime64](../data-types/datetime64.md).
- `timezone` — [Timezone](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-timezone) for the returned value (optional). If not specified, the function uses the timezone of the `value` parameter. [String](../data-types/string.md).
- `value` — Date and time. [DateTime64](../../sql-reference/data-types/datetime64.md).
- `timezone` — [Timezone](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-timezone) for the returned value (optional). If not specified, the function uses the timezone of the `value` parameter. [String](../../sql-reference/data-types/string.md).
**Returned value**
- Input value without sub-seconds.
- Input value without sub-seconds.
Type: [DateTime64](../data-types/datetime64.md).
Type: [DateTime64](../../sql-reference/data-types/datetime64.md).
**Examples**

View File

@ -281,7 +281,7 @@ h3GetBaseCell(index)
**Returned value**
- Hexagon base cell number.
- Hexagon base cell number.
Type: [UInt8](../../sql-reference/data-types/int-uint.md).
@ -317,7 +317,7 @@ h3HexAreaM2(resolution)
**Returned value**
- Area in square meters.
- Area in square meters.
Type: [Float64](../../sql-reference/data-types/float.md).
@ -392,7 +392,7 @@ h3ToChildren(index, resolution)
**Returned values**
- Array of the child H3-indexes.
- Array of the child H3-indexes.
Type: [Array](../../sql-reference/data-types/array.md)([UInt64](../../sql-reference/data-types/int-uint.md)).
@ -429,7 +429,7 @@ h3ToParent(index, resolution)
**Returned value**
- Parent H3 index.
- Parent H3 index.
Type: [UInt64](../../sql-reference/data-types/int-uint.md).
@ -463,7 +463,7 @@ h3ToString(index)
**Returned value**
- String representation of the H3 index.
- String representation of the H3 index.
Type: [String](../../sql-reference/data-types/string.md).

View File

@ -799,7 +799,7 @@ Query:
SELECT blockSerializedSize(maxState(1)) as x
```
Result:
Result:
``` text
┌─x─┐
@ -1056,25 +1056,25 @@ Takes state of aggregate function. Returns result of aggregation (finalized stat
## runningAccumulate {#runningaccumulate}
Accumulates states of an aggregate function for each row of a data block.
Accumulates states of an aggregate function for each row of a data block.
!!! warning "Warning"
The state is reset for each new data block.
**Syntax**
```sql
``` sql
runningAccumulate(agg_state[, grouping]);
```
**Parameters**
- `agg_state` — State of the aggregate function. [AggregateFunction](../../sql-reference/data-types/aggregatefunction.md#data-type-aggregatefunction).
- `grouping` — Grouping key. Optional. The state of the function is reset if the `grouping` value is changed. It can be any of the [supported data types](../../sql-reference/data-types/index.md) for which the equality operator is defined.
- `agg_state` — State of the aggregate function. [AggregateFunction](../../sql-reference/data-types/aggregatefunction.md#data-type-aggregatefunction).
- `grouping` — Grouping key. Optional. The state of the function is reset if the `grouping` value is changed. It can be any of the [supported data types](../../sql-reference/data-types/index.md) for which the equality operator is defined.
**Returned value**
- Each resulting row contains a result of the aggregate function, accumulated for all the input rows from 0 to the current position. `runningAccumulate` resets states for each new data block or when the `grouping` value changes.
- Each resulting row contains a result of the aggregate function, accumulated for all the input rows from 0 to the current position. `runningAccumulate` resets states for each new data block or when the `grouping` value changes.
Type depends on the aggregate function used.
@ -1084,13 +1084,13 @@ Consider how you can use `runningAccumulate` to find the cumulative sum of numbe
Query:
```sql
``` sql
SELECT k, runningAccumulate(sum_k) AS res FROM (SELECT number as k, sumState(k) AS sum_k FROM numbers(10) GROUP BY k ORDER BY k);
```
Result:
```text
``` text
┌─k─┬─res─┐
│ 0 │ 0 │
│ 1 │ 1 │
@ -1105,27 +1105,27 @@ Result:
└───┴─────┘
```
The subquery generates `sumState` for every number from `0` to `9`. `sumState` returns the state of the [sum](../aggregate-functions/reference/sum.md) function that contains the sum of a single number.
The subquery generates `sumState` for every number from `0` to `9`. `sumState` returns the state of the [sum](../../sql-reference/aggregate-functions/reference/sum.md) function that contains the sum of a single number.
The whole query does the following:
1. For the first row, `runningAccumulate` takes `sumState(0)` and returns `0`.
2. For the second row, the function merges `sumState(0)` and `sumState(1)` resulting in `sumState(0 + 1)`, and returns `1` as a result.
3. For the third row, the function merges `sumState(0 + 1)` and `sumState(2)` resulting in `sumState(0 + 1 + 2)`, and returns `3` as a result.
4. The actions are repeated until the block ends.
1. For the first row, `runningAccumulate` takes `sumState(0)` and returns `0`.
2. For the second row, the function merges `sumState(0)` and `sumState(1)` resulting in `sumState(0 + 1)`, and returns `1` as a result.
3. For the third row, the function merges `sumState(0 + 1)` and `sumState(2)` resulting in `sumState(0 + 1 + 2)`, and returns `3` as a result.
4. The actions are repeated until the block ends.
The following example shows the `groupping` parameter usage:
Query:
```sql
SELECT
``` sql
SELECT
grouping,
item,
runningAccumulate(state, grouping) AS res
FROM
FROM
(
SELECT
SELECT
toInt8(number / 4) AS grouping,
number AS item,
sumState(number) AS state
@ -1137,7 +1137,7 @@ FROM
Result:
```text
``` text
┌─grouping─┬─item─┬─res─┐
│ 0 │ 0 │ 0 │
│ 0 │ 1 │ 1 │

View File

@ -6,7 +6,7 @@ toc_title: Strings
# Functions for Working with Strings {#functions-for-working-with-strings}
!!! note "Note"
Functions for [searching](string-search-functions.md) and [replacing](string-replace-functions.md) in strings are described separately.
Functions for [searching](../../sql-reference/functions/string-search-functions.md) and [replacing](../../sql-reference/functions/string-replace-functions.md) in strings are described separately.
## empty {#empty}

View File

@ -6,7 +6,7 @@ toc_title: For Replacing in Strings
# Functions for Searching and Replacing in Strings {#functions-for-searching-and-replacing-in-strings}
!!! note "Note"
Functions for [searching](string-search-functions.md) and [other manipulations with strings](string-functions.md) are described separately.
Functions for [searching](../../sql-reference/functions/string-search-functions.md) and [other manipulations with strings](../../sql-reference/functions/string-functions.md) are described separately.
## replaceOne(haystack, pattern, replacement) {#replaceonehaystack-pattern-replacement}

View File

@ -8,7 +8,7 @@ toc_title: For Searching in Strings
The search is case-sensitive by default in all these functions. There are separate variants for case insensitive search.
!!! note "Note"
Functions for [replacing](string-replace-functions.md) and [other manipulations with strings](string-functions.md) are described separately.
Functions for [replacing](../../sql-reference/functions/string-replace-functions.md) and [other manipulations with strings](../../sql-reference/functions/string-functions.md) are described separately.
## position(haystack, needle), locate(haystack, needle) {#position}

View File

@ -523,7 +523,7 @@ Result:
## parseDateTimeBestEffortUS {#parsedatetimebesteffortUS}
This function is similar to ['parseDateTimeBestEffort'](#parsedatetimebesteffort), the only difference is that this function prefers US style (`MM/DD/YYYY` etc) in case of ambiguouty.
This function is similar to [parseDateTimeBestEffort](#parsedatetimebesteffort), the only difference is that this function prefers US style (`MM/DD/YYYY` etc) in case of ambiguouty.
**Syntax**

View File

@ -190,7 +190,7 @@ Extracts network locality (`username:password@host:port`) from a URL.
**Syntax**
```sql
``` sql
netloc(URL)
```
@ -200,7 +200,7 @@ netloc(URL)
**Returned value**
- `username:password@host:port`.
- `username:password@host:port`.
Type: `String`.

Some files were not shown because too many files have changed in this diff Show More