Merge branch 'master' of github.com:ClickHouse/ClickHouse into kerberos_tests

This commit is contained in:
Andrey Z 2021-07-07 13:22:53 +03:00
commit 66f1c4807c
711 changed files with 20096 additions and 38578 deletions

3
.gitmodules vendored
View File

@ -168,9 +168,6 @@
[submodule "contrib/fmtlib"]
path = contrib/fmtlib
url = https://github.com/fmtlib/fmt.git
[submodule "contrib/antlr4-runtime"]
path = contrib/antlr4-runtime
url = https://github.com/ClickHouse-Extras/antlr4-runtime.git
[submodule "contrib/sentry-native"]
path = contrib/sentry-native
url = https://github.com/ClickHouse-Extras/sentry-native.git

View File

@ -8,11 +8,8 @@ ClickHouse® is an open-source column-oriented database management system that a
* [Tutorial](https://clickhouse.tech/docs/en/getting_started/tutorial/) shows how to set up and query small ClickHouse cluster.
* [Documentation](https://clickhouse.tech/docs/en/) provides more in-depth information.
* [YouTube channel](https://www.youtube.com/c/ClickHouseDB) has a lot of content about ClickHouse in video format.
* [Slack](https://join.slack.com/t/clickhousedb/shared_invite/zt-qfort0u8-TWqK4wIP0YSdoDE0btKa1w) and [Telegram](https://telegram.me/clickhouse_en) allow to chat with ClickHouse users in real-time.
* [Slack](https://join.slack.com/t/clickhousedb/shared_invite/zt-rxm3rdrk-lIUmhLC3V8WTaL0TGxsOmg) and [Telegram](https://telegram.me/clickhouse_en) allow to chat with ClickHouse users in real-time.
* [Blog](https://clickhouse.yandex/blog/en/) contains various ClickHouse-related articles, as well as announcements and reports about events.
* [Code Browser](https://clickhouse.tech/codebrowser/html_report/ClickHouse/index.html) with syntax highlight and navigation.
* [Contacts](https://clickhouse.tech/#contacts) can help to get your questions answered if there are any.
* You can also [fill this form](https://clickhouse.tech/#meet) to meet Yandex ClickHouse team in person.
## Upcoming Events
* [China ClickHouse Community Meetup (online)](http://hdxu.cn/rhbfZ) on 26 June 2021.

View File

@ -119,11 +119,16 @@ private:
}
public:
/// We use Int64 instead of time_t because time_t is mapped to the different types (long or long long)
/// on Linux and Darwin (on both of them, long and long long are 64 bit and behaves identically,
/// but they are different types in C++ and this affects function overload resolution).
using Time = Int64;
/// The order of fields matters for alignment and sizeof.
struct Values
{
/// time_t at beginning of the day.
Int64 date;
/// Time at beginning of the day.
Time date;
/// Properties of the day.
UInt16 year;
@ -182,20 +187,20 @@ private:
LUTIndex years_months_lut[DATE_LUT_YEARS * 12];
/// UTC offset at beginning of the Unix epoch. The same as unix timestamp of 1970-01-01 00:00:00 local time.
time_t offset_at_start_of_epoch;
Time offset_at_start_of_epoch;
/// UTC offset at the beginning of the first supported year.
time_t offset_at_start_of_lut;
Time offset_at_start_of_lut;
bool offset_is_whole_number_of_hours_during_epoch;
/// Time zone name.
std::string time_zone;
inline LUTIndex findIndex(time_t t) const
inline LUTIndex findIndex(Time t) const
{
/// First guess.
Int64 guess = (t / 86400) + daynum_offset_epoch;
Time guess = (t / 86400) + daynum_offset_epoch;
/// For negative time_t the integer division was rounded up, so the guess is offset by one.
/// For negative Time the integer division was rounded up, so the guess is offset by one.
if (unlikely(t < 0))
--guess;
@ -227,7 +232,7 @@ private:
return LUTIndex{static_cast<UInt32>(d + daynum_offset_epoch) & date_lut_mask};
}
inline LUTIndex toLUTIndex(time_t t) const
inline LUTIndex toLUTIndex(Time t) const
{
return findIndex(t);
}
@ -280,7 +285,7 @@ public:
/// Round down to start of monday.
template <typename DateOrTime>
inline time_t toFirstDayOfWeek(DateOrTime v) const
inline Time toFirstDayOfWeek(DateOrTime v) const
{
const LUTIndex i = toLUTIndex(v);
return lut[i - (lut[i].day_of_week - 1)].date;
@ -295,7 +300,7 @@ public:
/// Round down to start of month.
template <typename DateOrTime>
inline time_t toFirstDayOfMonth(DateOrTime v) const
inline Time toFirstDayOfMonth(DateOrTime v) const
{
const LUTIndex i = toLUTIndex(v);
return lut[i - (lut[i].day_of_month - 1)].date;
@ -332,13 +337,13 @@ public:
}
template <typename DateOrTime>
inline time_t toFirstDayOfQuarter(DateOrTime v) const
inline Time toFirstDayOfQuarter(DateOrTime v) const
{
return toDate(toFirstDayOfQuarterIndex(v));
}
/// Round down to start of year.
inline time_t toFirstDayOfYear(time_t t) const
inline Time toFirstDayOfYear(Time t) const
{
return lut[years_lut[lut[findIndex(t)].year - DATE_LUT_MIN_YEAR]].date;
}
@ -355,14 +360,14 @@ public:
return toDayNum(toFirstDayNumOfYearIndex(v));
}
inline time_t toFirstDayOfNextMonth(time_t t) const
inline Time toFirstDayOfNextMonth(Time t) const
{
LUTIndex index = findIndex(t);
index += 32 - lut[index].day_of_month;
return lut[index - (lut[index].day_of_month - 1)].date;
}
inline time_t toFirstDayOfPrevMonth(time_t t) const
inline Time toFirstDayOfPrevMonth(Time t) const
{
LUTIndex index = findIndex(t);
index -= lut[index].day_of_month;
@ -389,16 +394,16 @@ public:
/** Round to start of day, then shift for specified amount of days.
*/
inline time_t toDateAndShift(time_t t, Int32 days) const
inline Time toDateAndShift(Time t, Int32 days) const
{
return lut[findIndex(t) + days].date;
}
inline time_t toTime(time_t t) const
inline Time toTime(Time t) const
{
const LUTIndex index = findIndex(t);
time_t res = t - lut[index].date;
Time res = t - lut[index].date;
if (res >= lut[index].time_at_offset_change())
res += lut[index].amount_of_offset_change();
@ -406,11 +411,11 @@ public:
return res - offset_at_start_of_epoch; /// Starting at 1970-01-01 00:00:00 local time.
}
inline unsigned toHour(time_t t) const
inline unsigned toHour(Time t) const
{
const LUTIndex index = findIndex(t);
time_t time = t - lut[index].date;
Time time = t - lut[index].date;
if (time >= lut[index].time_at_offset_change())
time += lut[index].amount_of_offset_change();
@ -426,7 +431,7 @@ public:
* then subtract the former from the latter to get the offset result.
* The boundaries when meets DST(daylight saving time) change should be handled very carefully.
*/
inline time_t timezoneOffset(time_t t) const
inline Time timezoneOffset(Time t) const
{
const LUTIndex index = findIndex(t);
@ -434,7 +439,7 @@ public:
/// Because the "amount_of_offset_change" in LUT entry only exists in the change day, it's costly to scan it from the very begin.
/// but we can figure out all the accumulated offsets from 1970-01-01 to that day just by get the whole difference between lut[].date,
/// and then, we can directly subtract multiple 86400s to get the real DST offsets for the leap seconds is not considered now.
time_t res = (lut[index].date - lut[daynum_offset_epoch].date) % 86400;
Time res = (lut[index].date - lut[daynum_offset_epoch].date) % 86400;
/// As so far to know, the maximal DST offset couldn't be more than 2 hours, so after the modulo operation the remainder
/// will sits between [-offset --> 0 --> offset] which respectively corresponds to moving clock forward or backward.
@ -448,7 +453,7 @@ public:
}
inline unsigned toSecond(time_t t) const
inline unsigned toSecond(Time t) const
{
auto res = t % 60;
if (likely(res >= 0))
@ -456,7 +461,7 @@ public:
return res + 60;
}
inline unsigned toMinute(time_t t) const
inline unsigned toMinute(Time t) const
{
if (t >= 0 && offset_is_whole_number_of_hours_during_epoch)
return (t / 60) % 60;
@ -474,27 +479,27 @@ public:
}
/// NOTE: Assuming timezone offset is a multiple of 15 minutes.
inline time_t toStartOfMinute(time_t t) const { return roundDown(t, 60); }
inline time_t toStartOfFiveMinute(time_t t) const { return roundDown(t, 300); }
inline time_t toStartOfFifteenMinutes(time_t t) const { return roundDown(t, 900); }
inline Time toStartOfMinute(Time t) const { return roundDown(t, 60); }
inline Time toStartOfFiveMinute(Time t) const { return roundDown(t, 300); }
inline Time toStartOfFifteenMinutes(Time t) const { return roundDown(t, 900); }
inline time_t toStartOfTenMinutes(time_t t) const
inline Time toStartOfTenMinutes(Time t) const
{
if (t >= 0 && offset_is_whole_number_of_hours_during_epoch)
return t / 600 * 600;
/// More complex logic is for Nepal - it has offset 05:45. Australia/Eucla is also unfortunate.
Int64 date = find(t).date;
Time date = find(t).date;
return date + (t - date) / 600 * 600;
}
/// NOTE: Assuming timezone transitions are multiple of hours. Lord Howe Island in Australia is a notable exception.
inline time_t toStartOfHour(time_t t) const
inline Time toStartOfHour(Time t) const
{
if (t >= 0 && offset_is_whole_number_of_hours_during_epoch)
return t / 3600 * 3600;
Int64 date = find(t).date;
Time date = find(t).date;
return date + (t - date) / 3600 * 3600;
}
@ -506,11 +511,11 @@ public:
* because the same calendar day starts/ends at different timestamps in different time zones)
*/
inline time_t fromDayNum(DayNum d) const { return lut[toLUTIndex(d)].date; }
inline time_t fromDayNum(ExtendedDayNum d) const { return lut[toLUTIndex(d)].date; }
inline Time fromDayNum(DayNum d) const { return lut[toLUTIndex(d)].date; }
inline Time fromDayNum(ExtendedDayNum d) const { return lut[toLUTIndex(d)].date; }
template <typename DateOrTime>
inline time_t toDate(DateOrTime v) const { return lut[toLUTIndex(v)].date; }
inline Time toDate(DateOrTime v) const { return lut[toLUTIndex(v)].date; }
template <typename DateOrTime>
inline unsigned toMonth(DateOrTime v) const { return lut[toLUTIndex(v)].month; }
@ -578,7 +583,7 @@ public:
return toDayNum(toFirstDayNumOfISOYearIndex(v));
}
inline time_t toFirstDayOfISOYear(time_t t) const
inline Time toFirstDayOfISOYear(Time t) const
{
return lut[toFirstDayNumOfISOYearIndex(t)].date;
}
@ -773,7 +778,7 @@ public:
}
/// We count all hour-length intervals, unrelated to offset changes.
inline time_t toRelativeHourNum(time_t t) const
inline Time toRelativeHourNum(Time t) const
{
if (t >= 0 && offset_is_whole_number_of_hours_during_epoch)
return t / 3600;
@ -784,18 +789,18 @@ public:
}
template <typename DateOrTime>
inline time_t toRelativeHourNum(DateOrTime v) const
inline Time toRelativeHourNum(DateOrTime v) const
{
return toRelativeHourNum(lut[toLUTIndex(v)].date);
}
inline time_t toRelativeMinuteNum(time_t t) const
inline Time toRelativeMinuteNum(Time t) const
{
return (t + DATE_LUT_ADD) / 60 - (DATE_LUT_ADD / 60);
}
template <typename DateOrTime>
inline time_t toRelativeMinuteNum(DateOrTime v) const
inline Time toRelativeMinuteNum(DateOrTime v) const
{
return toRelativeMinuteNum(lut[toLUTIndex(v)].date);
}
@ -842,14 +847,14 @@ public:
return ExtendedDayNum(4 + (d - 4) / days * days);
}
inline time_t toStartOfDayInterval(ExtendedDayNum d, UInt64 days) const
inline Time toStartOfDayInterval(ExtendedDayNum d, UInt64 days) const
{
if (days == 1)
return toDate(d);
return lut[toLUTIndex(ExtendedDayNum(d / days * days))].date;
}
inline time_t toStartOfHourInterval(time_t t, UInt64 hours) const
inline Time toStartOfHourInterval(Time t, UInt64 hours) const
{
if (hours == 1)
return toStartOfHour(t);
@ -867,7 +872,7 @@ public:
const LUTIndex index = findIndex(t);
const Values & values = lut[index];
time_t time = t - values.date;
Time time = t - values.date;
if (time >= values.time_at_offset_change())
{
/// Align to new hour numbers before rounding.
@ -892,7 +897,7 @@ public:
return values.date + time;
}
inline time_t toStartOfMinuteInterval(time_t t, UInt64 minutes) const
inline Time toStartOfMinuteInterval(Time t, UInt64 minutes) const
{
if (minutes == 1)
return toStartOfMinute(t);
@ -909,7 +914,7 @@ public:
return roundDown(t, seconds);
}
inline time_t toStartOfSecondInterval(time_t t, UInt64 seconds) const
inline Time toStartOfSecondInterval(Time t, UInt64 seconds) const
{
if (seconds == 1)
return t;
@ -934,14 +939,14 @@ public:
return toDayNum(makeLUTIndex(year, month, day_of_month));
}
inline time_t makeDate(Int16 year, UInt8 month, UInt8 day_of_month) const
inline Time makeDate(Int16 year, UInt8 month, UInt8 day_of_month) const
{
return lut[makeLUTIndex(year, month, day_of_month)].date;
}
/** Does not accept daylight saving time as argument: in case of ambiguity, it choose greater timestamp.
*/
inline time_t makeDateTime(Int16 year, UInt8 month, UInt8 day_of_month, UInt8 hour, UInt8 minute, UInt8 second) const
inline Time makeDateTime(Int16 year, UInt8 month, UInt8 day_of_month, UInt8 hour, UInt8 minute, UInt8 second) const
{
size_t index = makeLUTIndex(year, month, day_of_month);
UInt32 time_offset = hour * 3600 + minute * 60 + second;
@ -969,7 +974,7 @@ public:
return values.year * 10000 + values.month * 100 + values.day_of_month;
}
inline time_t YYYYMMDDToDate(UInt32 num) const
inline Time YYYYMMDDToDate(UInt32 num) const
{
return makeDate(num / 10000, num / 100 % 100, num % 100);
}
@ -1000,13 +1005,13 @@ public:
TimeComponents time;
};
inline DateComponents toDateComponents(time_t t) const
inline DateComponents toDateComponents(Time t) const
{
const Values & values = getValues(t);
return { values.year, values.month, values.day_of_month };
}
inline DateTimeComponents toDateTimeComponents(time_t t) const
inline DateTimeComponents toDateTimeComponents(Time t) const
{
const LUTIndex index = findIndex(t);
const Values & values = lut[index];
@ -1017,7 +1022,7 @@ public:
res.date.month = values.month;
res.date.day = values.day_of_month;
time_t time = t - values.date;
Time time = t - values.date;
if (time >= values.time_at_offset_change())
time += values.amount_of_offset_change();
@ -1042,7 +1047,7 @@ public:
}
inline UInt64 toNumYYYYMMDDhhmmss(time_t t) const
inline UInt64 toNumYYYYMMDDhhmmss(Time t) const
{
DateTimeComponents components = toDateTimeComponents(t);
@ -1055,7 +1060,7 @@ public:
+ UInt64(components.date.year) * 10000000000;
}
inline time_t YYYYMMDDhhmmssToTime(UInt64 num) const
inline Time YYYYMMDDhhmmssToTime(UInt64 num) const
{
return makeDateTime(
num / 10000000000,
@ -1069,12 +1074,12 @@ public:
/// Adding calendar intervals.
/// Implementation specific behaviour when delta is too big.
inline NO_SANITIZE_UNDEFINED time_t addDays(time_t t, Int64 delta) const
inline NO_SANITIZE_UNDEFINED Time addDays(Time t, Int64 delta) const
{
const LUTIndex index = findIndex(t);
const Values & values = lut[index];
time_t time = t - values.date;
Time time = t - values.date;
if (time >= values.time_at_offset_change())
time += values.amount_of_offset_change();
@ -1086,7 +1091,7 @@ public:
return lut[new_index].date + time;
}
inline NO_SANITIZE_UNDEFINED time_t addWeeks(time_t t, Int64 delta) const
inline NO_SANITIZE_UNDEFINED Time addWeeks(Time t, Int64 delta) const
{
return addDays(t, delta * 7);
}
@ -1131,14 +1136,14 @@ public:
/// If resulting month has less deys than source month, then saturation can happen.
/// Example: 31 Aug + 1 month = 30 Sep.
inline time_t NO_SANITIZE_UNDEFINED addMonths(time_t t, Int64 delta) const
inline Time NO_SANITIZE_UNDEFINED addMonths(Time t, Int64 delta) const
{
const auto result_day = addMonthsIndex(t, delta);
const LUTIndex index = findIndex(t);
const Values & values = lut[index];
time_t time = t - values.date;
Time time = t - values.date;
if (time >= values.time_at_offset_change())
time += values.amount_of_offset_change();
@ -1153,7 +1158,7 @@ public:
return toDayNum(addMonthsIndex(d, delta));
}
inline time_t NO_SANITIZE_UNDEFINED addQuarters(time_t t, Int64 delta) const
inline Time NO_SANITIZE_UNDEFINED addQuarters(Time t, Int64 delta) const
{
return addMonths(t, delta * 3);
}
@ -1180,14 +1185,14 @@ public:
}
/// Saturation can occur if 29 Feb is mapped to non-leap year.
inline time_t addYears(time_t t, Int64 delta) const
inline Time addYears(Time t, Int64 delta) const
{
auto result_day = addYearsIndex(t, delta);
const LUTIndex index = findIndex(t);
const Values & values = lut[index];
time_t time = t - values.date;
Time time = t - values.date;
if (time >= values.time_at_offset_change())
time += values.amount_of_offset_change();
@ -1203,7 +1208,7 @@ public:
}
inline std::string timeToString(time_t t) const
inline std::string timeToString(Time t) const
{
DateTimeComponents components = toDateTimeComponents(t);
@ -1228,7 +1233,7 @@ public:
return s;
}
inline std::string dateToString(time_t t) const
inline std::string dateToString(Time t) const
{
const Values & values = getValues(t);

View File

@ -0,0 +1,41 @@
#include <functional>
/** Adapt functor to static method where functor passed as context.
* Main use case to convert lambda into function that can be passed into JIT code.
*/
template <typename Functor>
class FunctorToStaticMethodAdaptor : public FunctorToStaticMethodAdaptor<decltype(&Functor::operator())>
{
};
template <typename R, typename C, typename ...Args>
class FunctorToStaticMethodAdaptor<R (C::*)(Args...) const>
{
public:
static R call(C * ptr, Args &&... arguments)
{
return std::invoke(&C::operator(), ptr, std::forward<Args>(arguments)...);
}
static R unsafeCall(char * ptr, Args &&... arguments)
{
C * ptr_typed = reinterpret_cast<C*>(ptr);
return std::invoke(&C::operator(), ptr_typed, std::forward<Args>(arguments)...);
}
};
template <typename R, typename C, typename ...Args>
class FunctorToStaticMethodAdaptor<R (C::*)(Args...)>
{
public:
static R call(C * ptr, Args &&... arguments)
{
return std::invoke(&C::operator(), ptr, std::forward<Args>(arguments)...);
}
static R unsafeCall(char * ptr, Args &&... arguments)
{
C * ptr_typed = static_cast<C*>(ptr);
return std::invoke(&C::operator(), ptr_typed, std::forward<Args>(arguments)...);
}
};

View File

@ -1,8 +1,9 @@
#include <common/ReplxxLineReader.h>
#include <common/errnoToString.h>
#include <errno.h>
#include <string.h>
#include <chrono>
#include <cerrno>
#include <cstring>
#include <unistd.h>
#include <functional>
#include <sys/file.h>
@ -24,6 +25,94 @@ void trim(String & s)
s.erase(std::find_if(s.rbegin(), s.rend(), [](int ch) { return !std::isspace(ch); }).base(), s.end());
}
/// Copied from replxx::src/util.cxx::now_ms_str() under the terms of 3-clause BSD license of Replxx.
/// Copyright (c) 2017-2018, Marcin Konarski (amok at codestation.org)
/// Copyright (c) 2010, Salvatore Sanfilippo (antirez at gmail dot com)
/// Copyright (c) 2010, Pieter Noordhuis (pcnoordhuis at gmail dot com)
std::string replxx_now_ms_str()
{
std::chrono::milliseconds ms(std::chrono::duration_cast<std::chrono::milliseconds>(std::chrono::system_clock::now().time_since_epoch()));
time_t t = ms.count() / 1000;
tm broken;
if (!localtime_r(&t, &broken))
{
return std::string();
}
static int const BUFF_SIZE(32);
char str[BUFF_SIZE];
strftime(str, BUFF_SIZE, "%Y-%m-%d %H:%M:%S.", &broken);
snprintf(str + sizeof("YYYY-mm-dd HH:MM:SS"), 5, "%03d", static_cast<int>(ms.count() % 1000));
return str;
}
/// Convert from readline to replxx format.
///
/// replxx requires each history line to prepended with time line:
///
/// ### YYYY-MM-DD HH:MM:SS.SSS
/// select 1
///
/// And w/o those service lines it will load all lines from history file as
/// one history line for suggestion. And if there are lots of lines in file it
/// will take lots of time (getline() + tons of reallocations).
///
/// NOTE: this code uses std::ifstream/std::ofstream like original replxx code.
void convertHistoryFile(const std::string & path, replxx::Replxx & rx)
{
std::ifstream in(path);
if (!in)
{
rx.print("Cannot open %s reading (for conversion): %s\n",
path.c_str(), errnoToString(errno).c_str());
return;
}
std::string line;
if (!getline(in, line).good())
{
rx.print("Cannot read from %s (for conversion): %s\n",
path.c_str(), errnoToString(errno).c_str());
return;
}
/// This is the marker of the date, no need to convert.
static char const REPLXX_TIMESTAMP_PATTERN[] = "### dddd-dd-dd dd:dd:dd.ddd";
if (line.starts_with("### ") && line.size() == strlen(REPLXX_TIMESTAMP_PATTERN))
{
return;
}
std::vector<std::string> lines;
in.seekg(0);
while (getline(in, line).good())
{
lines.push_back(line);
}
in.close();
size_t lines_size = lines.size();
std::sort(lines.begin(), lines.end());
lines.erase(std::unique(lines.begin(), lines.end()), lines.end());
rx.print("The history file (%s) is in old format. %zu lines, %zu unique lines.\n",
path.c_str(), lines_size, lines.size());
std::ofstream out(path);
if (!out)
{
rx.print("Cannot open %s for writing (for conversion): %s\n",
path.c_str(), errnoToString(errno).c_str());
return;
}
const std::string & timestamp = replxx_now_ms_str();
for (const auto & out_line : lines)
{
out << "### " << timestamp << "\n" << out_line << std::endl;
}
out.close();
}
}
ReplxxLineReader::ReplxxLineReader(
@ -47,6 +136,8 @@ ReplxxLineReader::ReplxxLineReader(
}
else
{
convertHistoryFile(history_file_path, rx);
if (flock(history_file_fd, LOCK_SH))
{
rx.print("Shared lock of history file failed: %s\n", errnoToString(errno).c_str());

View File

@ -1,4 +1,7 @@
# This strings autochanged from release_lib.sh:
# This variables autochanged by release_lib.sh:
# NOTE: has nothing common with DBMS_TCP_PROTOCOL_VERSION,
# only DBMS_TCP_PROTOCOL_VERSION should be incremented on protocol changes.
SET(VERSION_REVISION 54453)
SET(VERSION_MAJOR 21)
SET(VERSION_MINOR 8)

View File

@ -34,7 +34,6 @@ endif()
set_property(DIRECTORY PROPERTY EXCLUDE_FROM_ALL 1)
add_subdirectory (abseil-cpp-cmake)
add_subdirectory (antlr4-runtime-cmake)
add_subdirectory (boost-cmake)
add_subdirectory (cctz-cmake)
add_subdirectory (consistent-hashing)

@ -1 +0,0 @@
Subproject commit 672643e9a427ef803abf13bc8cb4989606553d64

View File

@ -1,156 +0,0 @@
set (LIBRARY_DIR "${ClickHouse_SOURCE_DIR}/contrib/antlr4-runtime")
set (SRCS
"${LIBRARY_DIR}/ANTLRErrorListener.cpp"
"${LIBRARY_DIR}/ANTLRErrorStrategy.cpp"
"${LIBRARY_DIR}/ANTLRFileStream.cpp"
"${LIBRARY_DIR}/ANTLRInputStream.cpp"
"${LIBRARY_DIR}/atn/AbstractPredicateTransition.cpp"
"${LIBRARY_DIR}/atn/ActionTransition.cpp"
"${LIBRARY_DIR}/atn/AmbiguityInfo.cpp"
"${LIBRARY_DIR}/atn/ArrayPredictionContext.cpp"
"${LIBRARY_DIR}/atn/ATN.cpp"
"${LIBRARY_DIR}/atn/ATNConfig.cpp"
"${LIBRARY_DIR}/atn/ATNConfigSet.cpp"
"${LIBRARY_DIR}/atn/ATNDeserializationOptions.cpp"
"${LIBRARY_DIR}/atn/ATNDeserializer.cpp"
"${LIBRARY_DIR}/atn/ATNSerializer.cpp"
"${LIBRARY_DIR}/atn/ATNSimulator.cpp"
"${LIBRARY_DIR}/atn/ATNState.cpp"
"${LIBRARY_DIR}/atn/AtomTransition.cpp"
"${LIBRARY_DIR}/atn/BasicBlockStartState.cpp"
"${LIBRARY_DIR}/atn/BasicState.cpp"
"${LIBRARY_DIR}/atn/BlockEndState.cpp"
"${LIBRARY_DIR}/atn/BlockStartState.cpp"
"${LIBRARY_DIR}/atn/ContextSensitivityInfo.cpp"
"${LIBRARY_DIR}/atn/DecisionEventInfo.cpp"
"${LIBRARY_DIR}/atn/DecisionInfo.cpp"
"${LIBRARY_DIR}/atn/DecisionState.cpp"
"${LIBRARY_DIR}/atn/EmptyPredictionContext.cpp"
"${LIBRARY_DIR}/atn/EpsilonTransition.cpp"
"${LIBRARY_DIR}/atn/ErrorInfo.cpp"
"${LIBRARY_DIR}/atn/LexerAction.cpp"
"${LIBRARY_DIR}/atn/LexerActionExecutor.cpp"
"${LIBRARY_DIR}/atn/LexerATNConfig.cpp"
"${LIBRARY_DIR}/atn/LexerATNSimulator.cpp"
"${LIBRARY_DIR}/atn/LexerChannelAction.cpp"
"${LIBRARY_DIR}/atn/LexerCustomAction.cpp"
"${LIBRARY_DIR}/atn/LexerIndexedCustomAction.cpp"
"${LIBRARY_DIR}/atn/LexerModeAction.cpp"
"${LIBRARY_DIR}/atn/LexerMoreAction.cpp"
"${LIBRARY_DIR}/atn/LexerPopModeAction.cpp"
"${LIBRARY_DIR}/atn/LexerPushModeAction.cpp"
"${LIBRARY_DIR}/atn/LexerSkipAction.cpp"
"${LIBRARY_DIR}/atn/LexerTypeAction.cpp"
"${LIBRARY_DIR}/atn/LL1Analyzer.cpp"
"${LIBRARY_DIR}/atn/LookaheadEventInfo.cpp"
"${LIBRARY_DIR}/atn/LoopEndState.cpp"
"${LIBRARY_DIR}/atn/NotSetTransition.cpp"
"${LIBRARY_DIR}/atn/OrderedATNConfigSet.cpp"
"${LIBRARY_DIR}/atn/ParseInfo.cpp"
"${LIBRARY_DIR}/atn/ParserATNSimulator.cpp"
"${LIBRARY_DIR}/atn/PlusBlockStartState.cpp"
"${LIBRARY_DIR}/atn/PlusLoopbackState.cpp"
"${LIBRARY_DIR}/atn/PrecedencePredicateTransition.cpp"
"${LIBRARY_DIR}/atn/PredicateEvalInfo.cpp"
"${LIBRARY_DIR}/atn/PredicateTransition.cpp"
"${LIBRARY_DIR}/atn/PredictionContext.cpp"
"${LIBRARY_DIR}/atn/PredictionMode.cpp"
"${LIBRARY_DIR}/atn/ProfilingATNSimulator.cpp"
"${LIBRARY_DIR}/atn/RangeTransition.cpp"
"${LIBRARY_DIR}/atn/RuleStartState.cpp"
"${LIBRARY_DIR}/atn/RuleStopState.cpp"
"${LIBRARY_DIR}/atn/RuleTransition.cpp"
"${LIBRARY_DIR}/atn/SemanticContext.cpp"
"${LIBRARY_DIR}/atn/SetTransition.cpp"
"${LIBRARY_DIR}/atn/SingletonPredictionContext.cpp"
"${LIBRARY_DIR}/atn/StarBlockStartState.cpp"
"${LIBRARY_DIR}/atn/StarLoopbackState.cpp"
"${LIBRARY_DIR}/atn/StarLoopEntryState.cpp"
"${LIBRARY_DIR}/atn/TokensStartState.cpp"
"${LIBRARY_DIR}/atn/Transition.cpp"
"${LIBRARY_DIR}/atn/WildcardTransition.cpp"
"${LIBRARY_DIR}/BailErrorStrategy.cpp"
"${LIBRARY_DIR}/BaseErrorListener.cpp"
"${LIBRARY_DIR}/BufferedTokenStream.cpp"
"${LIBRARY_DIR}/CharStream.cpp"
"${LIBRARY_DIR}/CommonToken.cpp"
"${LIBRARY_DIR}/CommonTokenFactory.cpp"
"${LIBRARY_DIR}/CommonTokenStream.cpp"
"${LIBRARY_DIR}/ConsoleErrorListener.cpp"
"${LIBRARY_DIR}/DefaultErrorStrategy.cpp"
"${LIBRARY_DIR}/dfa/DFA.cpp"
"${LIBRARY_DIR}/dfa/DFASerializer.cpp"
"${LIBRARY_DIR}/dfa/DFAState.cpp"
"${LIBRARY_DIR}/dfa/LexerDFASerializer.cpp"
"${LIBRARY_DIR}/DiagnosticErrorListener.cpp"
"${LIBRARY_DIR}/Exceptions.cpp"
"${LIBRARY_DIR}/FailedPredicateException.cpp"
"${LIBRARY_DIR}/InputMismatchException.cpp"
"${LIBRARY_DIR}/InterpreterRuleContext.cpp"
"${LIBRARY_DIR}/IntStream.cpp"
"${LIBRARY_DIR}/Lexer.cpp"
"${LIBRARY_DIR}/LexerInterpreter.cpp"
"${LIBRARY_DIR}/LexerNoViableAltException.cpp"
"${LIBRARY_DIR}/ListTokenSource.cpp"
"${LIBRARY_DIR}/misc/InterpreterDataReader.cpp"
"${LIBRARY_DIR}/misc/Interval.cpp"
"${LIBRARY_DIR}/misc/IntervalSet.cpp"
"${LIBRARY_DIR}/misc/MurmurHash.cpp"
"${LIBRARY_DIR}/misc/Predicate.cpp"
"${LIBRARY_DIR}/NoViableAltException.cpp"
"${LIBRARY_DIR}/Parser.cpp"
"${LIBRARY_DIR}/ParserInterpreter.cpp"
"${LIBRARY_DIR}/ParserRuleContext.cpp"
"${LIBRARY_DIR}/ProxyErrorListener.cpp"
"${LIBRARY_DIR}/RecognitionException.cpp"
"${LIBRARY_DIR}/Recognizer.cpp"
"${LIBRARY_DIR}/RuleContext.cpp"
"${LIBRARY_DIR}/RuleContextWithAltNum.cpp"
"${LIBRARY_DIR}/RuntimeMetaData.cpp"
"${LIBRARY_DIR}/support/Any.cpp"
"${LIBRARY_DIR}/support/Arrays.cpp"
"${LIBRARY_DIR}/support/CPPUtils.cpp"
"${LIBRARY_DIR}/support/guid.cpp"
"${LIBRARY_DIR}/support/StringUtils.cpp"
"${LIBRARY_DIR}/Token.cpp"
"${LIBRARY_DIR}/TokenSource.cpp"
"${LIBRARY_DIR}/TokenStream.cpp"
"${LIBRARY_DIR}/TokenStreamRewriter.cpp"
"${LIBRARY_DIR}/tree/ErrorNode.cpp"
"${LIBRARY_DIR}/tree/ErrorNodeImpl.cpp"
"${LIBRARY_DIR}/tree/IterativeParseTreeWalker.cpp"
"${LIBRARY_DIR}/tree/ParseTree.cpp"
"${LIBRARY_DIR}/tree/ParseTreeListener.cpp"
"${LIBRARY_DIR}/tree/ParseTreeVisitor.cpp"
"${LIBRARY_DIR}/tree/ParseTreeWalker.cpp"
"${LIBRARY_DIR}/tree/pattern/Chunk.cpp"
"${LIBRARY_DIR}/tree/pattern/ParseTreeMatch.cpp"
"${LIBRARY_DIR}/tree/pattern/ParseTreePattern.cpp"
"${LIBRARY_DIR}/tree/pattern/ParseTreePatternMatcher.cpp"
"${LIBRARY_DIR}/tree/pattern/RuleTagToken.cpp"
"${LIBRARY_DIR}/tree/pattern/TagChunk.cpp"
"${LIBRARY_DIR}/tree/pattern/TextChunk.cpp"
"${LIBRARY_DIR}/tree/pattern/TokenTagToken.cpp"
"${LIBRARY_DIR}/tree/TerminalNode.cpp"
"${LIBRARY_DIR}/tree/TerminalNodeImpl.cpp"
"${LIBRARY_DIR}/tree/Trees.cpp"
"${LIBRARY_DIR}/tree/xpath/XPath.cpp"
"${LIBRARY_DIR}/tree/xpath/XPathElement.cpp"
"${LIBRARY_DIR}/tree/xpath/XPathLexer.cpp"
"${LIBRARY_DIR}/tree/xpath/XPathLexerErrorListener.cpp"
"${LIBRARY_DIR}/tree/xpath/XPathRuleAnywhereElement.cpp"
"${LIBRARY_DIR}/tree/xpath/XPathRuleElement.cpp"
"${LIBRARY_DIR}/tree/xpath/XPathTokenAnywhereElement.cpp"
"${LIBRARY_DIR}/tree/xpath/XPathTokenElement.cpp"
"${LIBRARY_DIR}/tree/xpath/XPathWildcardAnywhereElement.cpp"
"${LIBRARY_DIR}/tree/xpath/XPathWildcardElement.cpp"
"${LIBRARY_DIR}/UnbufferedCharStream.cpp"
"${LIBRARY_DIR}/UnbufferedTokenStream.cpp"
"${LIBRARY_DIR}/Vocabulary.cpp"
"${LIBRARY_DIR}/WritableToken.cpp"
)
add_library (antlr4-runtime ${SRCS})
target_include_directories (antlr4-runtime SYSTEM PUBLIC ${LIBRARY_DIR})

2
contrib/libpq vendored

@ -1 +1 @@
Subproject commit c7624588ddd84f153dd5990e81b886e4568bddde
Subproject commit e071ea570f8985aa00e34f5b9d50a3cfe666327e

View File

@ -8,7 +8,7 @@ set(SRCS
"${LIBPQ_SOURCE_DIR}/fe-lobj.c"
"${LIBPQ_SOURCE_DIR}/fe-misc.c"
"${LIBPQ_SOURCE_DIR}/fe-print.c"
"${LIBPQ_SOURCE_DIR}/fe-protocol2.c"
"${LIBPQ_SOURCE_DIR}/fe-trace.c"
"${LIBPQ_SOURCE_DIR}/fe-protocol3.c"
"${LIBPQ_SOURCE_DIR}/fe-secure.c"
"${LIBPQ_SOURCE_DIR}/fe-secure-common.c"
@ -18,8 +18,12 @@ set(SRCS
"${LIBPQ_SOURCE_DIR}/pqexpbuffer.c"
"${LIBPQ_SOURCE_DIR}/common/scram-common.c"
"${LIBPQ_SOURCE_DIR}/common/sha2_openssl.c"
"${LIBPQ_SOURCE_DIR}/common/sha2.c"
"${LIBPQ_SOURCE_DIR}/common/sha1.c"
"${LIBPQ_SOURCE_DIR}/common/md5.c"
"${LIBPQ_SOURCE_DIR}/common/md5_common.c"
"${LIBPQ_SOURCE_DIR}/common/hmac_openssl.c"
"${LIBPQ_SOURCE_DIR}/common/cryptohash.c"
"${LIBPQ_SOURCE_DIR}/common/saslprep.c"
"${LIBPQ_SOURCE_DIR}/common/unicode_norm.c"
"${LIBPQ_SOURCE_DIR}/common/ip.c"

2
contrib/libunwind vendored

@ -1 +1 @@
Subproject commit a491c27b33109a842d577c0f7ac5f5f218859181
Subproject commit 6b816d2fba3991f8fd6aaec17d92f68947eab667

View File

@ -43,29 +43,6 @@ command -v flock >/dev/null && FLOCK=flock
# Override defaults from optional config file
test -f /etc/default/clickhouse && . /etc/default/clickhouse
# On x86_64, check for required instruction set.
if uname -mpi | grep -q 'x86_64'; then
if ! grep -q 'sse4_2' /proc/cpuinfo; then
# On KVM, cpuinfo could falsely not report SSE 4.2 support, so skip the check.
if ! grep -q 'Common KVM processor' /proc/cpuinfo; then
# Some other VMs also report wrong flags in cpuinfo.
# Tricky way to test for instruction set:
# create temporary binary and run it;
# if it get caught illegal instruction signal,
# then required instruction set is not supported really.
#
# Generated this way:
# gcc -xc -Os -static -nostdlib - <<< 'void _start() { __asm__("pcmpgtq %%xmm0, %%xmm1; mov $0x3c, %%rax; xor %%rdi, %%rdi; syscall":::"memory"); }' && strip -R .note.gnu.build-id -R .comment -R .eh_frame -s ./a.out && gzip -c -9 ./a.out | base64 -w0; echo
if ! (echo -n 'H4sICAwAW1cCA2Eub3V0AKt39XFjYmRkgAEmBjsGEI+H0QHMd4CKGyCUAMUsGJiBJDNQNUiYlQEZOKDQclB9cnD9CmCSBYqJBRxQOvBpSQobGfqIAWn8FuYnPI4fsAGyPQz/87MeZtArziguKSpJTGLQK0mtKGGgGHADMSgoYH6AhTMPNHyE0NQzYuEzYzEXFr6CBPQDANAsXKTwAQAA' | base64 -d | gzip -d > /tmp/clickhouse_test_sse42 && chmod a+x /tmp/clickhouse_test_sse42 && /tmp/clickhouse_test_sse42); then
echo 'Warning! SSE 4.2 instruction set is not supported'
#exit 3
fi
fi
fi
fi
die()
{
@ -116,7 +93,7 @@ forcestop()
service_or_func()
{
if [ -x "/bin/systemctl" ] && [ -f /etc/systemd/system/clickhouse-server.service ] && [ -d /run/systemd/system ]; then
service $PROGRAM $1
systemctl $1 $PROGRAM
else
$1
fi

View File

@ -12,7 +12,6 @@ mkdir root
pushd root
mkdir lib lib64 etc tmp root
cp ${BUILD_DIR}/programs/clickhouse .
cp ${SRC_DIR}/programs/server/{config,users}.xml .
cp /lib/x86_64-linux-gnu/{libc.so.6,libdl.so.2,libm.so.6,libpthread.so.0,librt.so.1,libnss_dns.so.2,libresolv.so.2} lib
cp /lib64/ld-linux-x86-64.so.2 lib64
cp /etc/resolv.conf ./etc

View File

@ -46,6 +46,7 @@ RUN apt-get update \
pigz \
pkg-config \
tzdata \
pv \
--yes --no-install-recommends
# Sanitizer options for services (clickhouse-server)

View File

@ -160,7 +160,6 @@ function clone_submodules
SUBMODULES_TO_UPDATE=(
contrib/abseil-cpp
contrib/antlr4-runtime
contrib/boost
contrib/zlib-ng
contrib/libxml2
@ -374,14 +373,11 @@ function run_tests
# Depends on AWS
01801_s3_cluster
# Depends on LLVM JIT
01072_nullable_jit
01852_jit_if
01865_jit_comparison_constant_result
01871_merge_tree_compile_expressions
# needs psql
01889_postgresql_protocol_null_fields
# needs pv
01923_network_receive_time_metric_insert
)
time clickhouse-test --hung-check -j 8 --order=random --use-skip-list \

View File

@ -1,6 +1,8 @@
# docker build -t yandex/clickhouse-integration-test .
FROM yandex/clickhouse-test-base
SHELL ["/bin/bash", "-c"]
RUN apt-get update \
&& env DEBIAN_FRONTEND=noninteractive apt-get -y install \
tzdata \
@ -20,7 +22,9 @@ RUN apt-get update \
krb5-user \
iproute2 \
lsof \
g++
g++ \
default-jre
RUN rm -rf \
/var/lib/apt/lists/* \
/var/cache/debconf \
@ -30,6 +34,19 @@ RUN apt-get clean
# Install MySQL ODBC driver
RUN curl 'https://cdn.mysql.com//Downloads/Connector-ODBC/8.0/mysql-connector-odbc-8.0.21-linux-glibc2.12-x86-64bit.tar.gz' --output 'mysql-connector.tar.gz' && tar -xzf mysql-connector.tar.gz && cd mysql-connector-odbc-8.0.21-linux-glibc2.12-x86-64bit/lib && mv * /usr/local/lib && ln -s /usr/local/lib/libmyodbc8a.so /usr/lib/x86_64-linux-gnu/odbc/libmyodbc.so
# Unfortunately this is required for a single test for conversion data from zookeeper to clickhouse-keeper.
# ZooKeeper is not started by default, but consumes some space in containers.
# 777 perms used to allow anybody to start/stop ZooKeeper
ENV ZOOKEEPER_VERSION='3.6.3'
RUN curl -O "https://mirrors.estointernet.in/apache/zookeeper/zookeeper-${ZOOKEEPER_VERSION}/apache-zookeeper-${ZOOKEEPER_VERSION}-bin.tar.gz"
RUN tar -zxvf apache-zookeeper-${ZOOKEEPER_VERSION}-bin.tar.gz && mv apache-zookeeper-${ZOOKEEPER_VERSION}-bin /opt/zookeeper && chmod -R 777 /opt/zookeeper && rm apache-zookeeper-${ZOOKEEPER_VERSION}-bin.tar.gz
RUN echo $'tickTime=2500 \n\
tickTime=2500 \n\
dataDir=/zookeeper \n\
clientPort=2181 \n\
maxClientCnxns=80' > /opt/zookeeper/conf/zoo.cfg
RUN mkdir /zookeeper && chmod -R 777 /zookeeper
ENV TZ=Europe/Moscow
RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone

View File

@ -11,6 +11,7 @@ services:
interval: 10s
timeout: 5s
retries: 5
command: [ "postgres", "-c", "wal_level=logical", "-c", "max_replication_slots=2"]
networks:
default:
aliases:
@ -22,4 +23,4 @@ services:
volumes:
- type: ${POSTGRES_LOGS_FS:-tmpfs}
source: ${POSTGRES_DIR:-}
target: /postgres/
target: /postgres/

View File

@ -319,14 +319,14 @@ function get_profiles
wait
clickhouse-client --port $LEFT_SERVER_PORT --query "select * from system.query_log where type = 2 format TSVWithNamesAndTypes" > left-query-log.tsv ||: &
clickhouse-client --port $LEFT_SERVER_PORT --query "select * from system.query_log where type = 'QueryFinish' format TSVWithNamesAndTypes" > left-query-log.tsv ||: &
clickhouse-client --port $LEFT_SERVER_PORT --query "select * from system.query_thread_log format TSVWithNamesAndTypes" > left-query-thread-log.tsv ||: &
clickhouse-client --port $LEFT_SERVER_PORT --query "select * from system.trace_log format TSVWithNamesAndTypes" > left-trace-log.tsv ||: &
clickhouse-client --port $LEFT_SERVER_PORT --query "select arrayJoin(trace) addr, concat(splitByChar('/', addressToLine(addr))[-1], '#', demangle(addressToSymbol(addr)) ) name from system.trace_log group by addr format TSVWithNamesAndTypes" > left-addresses.tsv ||: &
clickhouse-client --port $LEFT_SERVER_PORT --query "select * from system.metric_log format TSVWithNamesAndTypes" > left-metric-log.tsv ||: &
clickhouse-client --port $LEFT_SERVER_PORT --query "select * from system.asynchronous_metric_log format TSVWithNamesAndTypes" > left-async-metric-log.tsv ||: &
clickhouse-client --port $RIGHT_SERVER_PORT --query "select * from system.query_log where type = 2 format TSVWithNamesAndTypes" > right-query-log.tsv ||: &
clickhouse-client --port $RIGHT_SERVER_PORT --query "select * from system.query_log where type = 'QueryFinish' format TSVWithNamesAndTypes" > right-query-log.tsv ||: &
clickhouse-client --port $RIGHT_SERVER_PORT --query "select * from system.query_thread_log format TSVWithNamesAndTypes" > right-query-thread-log.tsv ||: &
clickhouse-client --port $RIGHT_SERVER_PORT --query "select * from system.trace_log format TSVWithNamesAndTypes" > right-trace-log.tsv ||: &
clickhouse-client --port $RIGHT_SERVER_PORT --query "select arrayJoin(trace) addr, concat(splitByChar('/', addressToLine(addr))[-1], '#', demangle(addressToSymbol(addr)) ) name from system.trace_log group by addr format TSVWithNamesAndTypes" > right-addresses.tsv ||: &
@ -409,10 +409,10 @@ create view right_query_log as select *
'$(cat "right-query-log.tsv.columns")');
create view query_logs as
select 0 version, query_id, ProfileEvents.keys, ProfileEvents.values,
select 0 version, query_id, ProfileEvents,
query_duration_ms, memory_usage from left_query_log
union all
select 1 version, query_id, ProfileEvents.keys, ProfileEvents.values,
select 1 version, query_id, ProfileEvents,
query_duration_ms, memory_usage from right_query_log
;
@ -424,7 +424,7 @@ create table query_run_metric_arrays engine File(TSV, 'analyze/query-run-metric-
with (
-- sumMapState with the list of all keys with '-0.' values. Negative zero is because
-- sumMap removes keys with positive zeros.
with (select groupUniqArrayArray(ProfileEvents.keys) from query_logs) as all_names
with (select groupUniqArrayArray(mapKeys(ProfileEvents)) from query_logs) as all_names
select arrayReduce('sumMapState', [(all_names, arrayMap(x->-0., all_names))])
) as all_metrics
select test, query_index, version, query_id,
@ -433,8 +433,8 @@ create table query_run_metric_arrays engine File(TSV, 'analyze/query-run-metric-
[
all_metrics,
arrayReduce('sumMapState',
[(ProfileEvents.keys,
arrayMap(x->toFloat64(x), ProfileEvents.values))]
[(mapKeys(ProfileEvents),
arrayMap(x->toFloat64(x), mapValues(ProfileEvents)))]
),
arrayReduce('sumMapState', [(
['client_time', 'server_time', 'memory_usage'],
@ -1003,10 +1003,11 @@ create view query_log as select *
create table unstable_run_metrics engine File(TSVWithNamesAndTypes,
'unstable-run-metrics.$version.rep') as
select
test, query_index, query_id,
ProfileEvents.values value, ProfileEvents.keys metric
from query_log array join ProfileEvents
select test, query_index, query_id, value, metric
from query_log
array join
mapValues(ProfileEvents) as value,
mapKeys(ProfileEvents) as metric
join unstable_query_runs using (query_id)
;
@ -1177,11 +1178,11 @@ create view right_async_metric_log as
-- Use the right log as time reference because it may have higher precision.
create table metrics engine File(TSV, 'metrics/metrics.tsv') as
with (select min(event_time) from right_async_metric_log) as min_time
select name metric, r.event_time - min_time event_time, l.value as left, r.value as right
select metric, r.event_time - min_time event_time, l.value as left, r.value as right
from right_async_metric_log r
asof join file('left-async-metric-log.tsv', TSVWithNamesAndTypes,
'$(cat left-async-metric-log.tsv.columns)') l
on l.name = r.name and r.event_time <= l.event_time
on l.metric = r.metric and r.event_time <= l.event_time
order by metric, event_time
;

View File

@ -23,6 +23,7 @@
<!-- disable jit for perf tests -->
<compile_expressions>0</compile_expressions>
<compile_aggregate_expressions>0</compile_aggregate_expressions>
</default>
</profiles>
<users>

View File

@ -35,7 +35,7 @@ if [ "$NUM_TRIES" -gt "1" ]; then
# simpliest way to forward env variables to server
sudo -E -u clickhouse /usr/bin/clickhouse-server --config /etc/clickhouse-server/config.xml --daemon
else
service clickhouse-server start
sudo clickhouse start
fi
if [[ -n "$USE_DATABASE_REPLICATED" ]] && [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]; then

View File

@ -1,4 +1,6 @@
#!/bin/bash
# shellcheck disable=SC2094
# shellcheck disable=SC2086
set -x
@ -37,6 +39,17 @@ function stop()
function start()
{
# Rename existing log file - it will be more convenient to read separate files for separate server runs.
if [ -f '/var/log/clickhouse-server/clickhouse-server.log' ]
then
log_file_counter=1
while [ -f "/var/log/clickhouse-server/clickhouse-server.log.${log_file_counter}" ]
do
log_file_counter=$((log_file_counter + 1))
done
mv '/var/log/clickhouse-server/clickhouse-server.log' "/var/log/clickhouse-server/clickhouse-server.log.${log_file_counter}"
fi
counter=0
until clickhouse-client --query "SELECT 1"
do
@ -141,7 +154,11 @@ zgrep -Fa "########################################" /test_output/* > /dev/null
&& echo -e 'Killed by signal (output files)\tFAIL' >> /test_output/test_results.tsv
# Put logs into /test_output/
pigz < /var/log/clickhouse-server/clickhouse-server.log > /test_output/clickhouse-server.log.gz
for log_file in /var/log/clickhouse-server/clickhouse-server.log*
do
pigz < "${log_file}" > /test_output/"$(basename ${log_file})".gz
done
tar -chf /test_output/coordination.tar /var/lib/clickhouse/coordination ||:
mv /var/log/clickhouse-server/stderr.log /test_output/
tar -chf /test_output/query_log_dump.tar /var/lib/clickhouse/data/system/query_log ||:

View File

@ -0,0 +1,6 @@
# ARM (AArch64) build works on Amazon Graviton, Oracle Cloud, Huawei Cloud ARM machines.
# The support for AArch64 is pre-production ready.
wget 'https://builds.clickhouse.tech/master/aarch64/clickhouse'
chmod a+x ./clickhouse
sudo ./clickhouse install

View File

@ -0,0 +1,3 @@
wget 'https://builds.clickhouse.tech/master/freebsd/clickhouse'
chmod a+x ./clickhouse
sudo ./clickhouse install

View File

@ -0,0 +1,3 @@
wget 'https://builds.clickhouse.tech/master/macos-aarch64/clickhouse'
chmod a+x ./clickhouse
./clickhouse

View File

@ -0,0 +1,3 @@
wget 'https://builds.clickhouse.tech/master/macos/clickhouse'
chmod a+x ./clickhouse
./clickhouse

View File

@ -7,13 +7,13 @@ toc_title: Third-Party Libraries Used
The list of third-party libraries can be obtained by the following query:
```
``` sql
SELECT library_name, license_type, license_path FROM system.licenses ORDER BY library_name COLLATE 'en'
```
[Example](https://gh-api.clickhouse.tech/play?user=play#U0VMRUNUIGxpYnJhcnlfbmFtZSwgbGljZW5zZV90eXBlLCBsaWNlbnNlX3BhdGggRlJPTSBzeXN0ZW0ubGljZW5zZXMgT1JERVIgQlkgbGlicmFyeV9uYW1lIENPTExBVEUgJ2VuJw==)
| library_name | license_type | license_path |
| library_name | license_type | license_path |
|:-|:-|:-|
| abseil-cpp | Apache | /contrib/abseil-cpp/LICENSE |
| AMQP-CPP | Apache | /contrib/AMQP-CPP/LICENSE |
@ -89,3 +89,15 @@ SELECT library_name, license_type, license_path FROM system.licenses ORDER BY li
| xz | Public Domain | /contrib/xz/COPYING |
| zlib-ng | zLib | /contrib/zlib-ng/LICENSE.md |
| zstd | BSD | /contrib/zstd/LICENSE |
## Guidelines for adding new third-party libraries and maintaining custom changes in them {#adding-third-party-libraries}
1. All external third-party code should reside in the dedicated directories under `contrib` directory of ClickHouse repo. Prefer Git submodules, when available.
2. Fork/mirror the official repo in [Clickhouse-extras](https://github.com/ClickHouse-Extras). Prefer official GitHub repos, when available.
3. Branch from the branch you want to integrate, e.g., `master` -> `clickhouse/master`, or `release/vX.Y.Z` -> `clickhouse/release/vX.Y.Z`.
4. All forks in [Clickhouse-extras](https://github.com/ClickHouse-Extras) can be automatically synchronized with upstreams. `clickhouse/...` branches will remain unaffected, since virtually nobody is going to use that naming pattern in their upstream repos.
5. Add submodules under `contrib` of ClickHouse repo that refer the above forks/mirrors. Set the submodules to track the corresponding `clickhouse/...` branches.
6. Every time the custom changes have to be made in the library code, a dedicated branch should be created, like `clickhouse/my-fix`. Then this branch should be merged into the branch, that is tracked by the submodule, e.g., `clickhouse/master` or `clickhouse/release/vX.Y.Z`.
7. No code should be pushed in any branch of the forks in [Clickhouse-extras](https://github.com/ClickHouse-Extras), whose names do not follow `clickhouse/...` pattern.
8. Always write the custom changes with the official repo in mind. Once the PR is merged from (a feature/fix branch in) your personal fork into the fork in [Clickhouse-extras](https://github.com/ClickHouse-Extras), and the submodule is bumped in ClickHouse repo, consider opening another PR from (a feature/fix branch in) the fork in [Clickhouse-extras](https://github.com/ClickHouse-Extras) to the official repo of the library. This will make sure, that 1) the contribution has more than a single use case and importance, 2) others will also benefit from it, 3) the change will not remain a maintenance burden solely on ClickHouse developers.
9. When a submodule needs to start using a newer code from the original branch (e.g., `master`), and since the custom changes might be merged in the branch it is tracking (e.g., `clickhouse/master`) and so it may diverge from its original counterpart (i.e., `master`), a careful merge should be carried out first, i.e., `master` -> `clickhouse/master`, and only then the submodule can be bumped in ClickHouse.

View File

@ -237,6 +237,8 @@ The description of ClickHouse architecture can be found here: https://clickhouse
The Code Style Guide: https://clickhouse.tech/docs/en/development/style/
Adding third-party libraries: https://clickhouse.tech/docs/en/development/contrib/#adding-third-party-libraries
Writing tests: https://clickhouse.tech/docs/en/development/tests/
List of tasks: https://github.com/ClickHouse/ClickHouse/issues?q=is%3Aopen+is%3Aissue+label%3A%22easy+task%22

View File

@ -628,7 +628,7 @@ If the class is not intended for polymorphic use, you do not need to make functi
**18.** Encodings.
Use UTF-8 everywhere. Use `std::string`and`char *`. Do not use `std::wstring`and`wchar_t`.
Use UTF-8 everywhere. Use `std::string` and `char *`. Do not use `std::wstring` and `wchar_t`.
**19.** Logging.
@ -749,17 +749,9 @@ If your code in the `master` branch is not buildable yet, exclude it from the bu
**1.** The C++20 standard library is used (experimental extensions are allowed), as well as `boost` and `Poco` frameworks.
**2.** If necessary, you can use any well-known libraries available in the OS package.
**2.** It is not allowed to use libraries from OS packages. It is also not allowed to use pre-installed libraries. All libraries should be placed in form of source code in `contrib` directory and built with ClickHouse.
If there is a good solution already available, then use it, even if it means you have to install another library.
(But be prepared to remove bad libraries from code.)
**3.** You can install a library that isnt in the packages, if the packages do not have what you need or have an outdated version or the wrong type of compilation.
**4.** If the library is small and does not have its own complex build system, put the source files in the `contrib` folder.
**5.** Preference is always given to libraries that are already in use.
**3.** Preference is always given to libraries that are already in use.
## General Recommendations {#general-recommendations-1}

View File

@ -0,0 +1,71 @@
---
toc_priority: 30
toc_title: MaterializedPostgreSQL
---
# MaterializedPostgreSQL {#materialize-postgresql}
## Creating a Database {#creating-a-database}
``` sql
CREATE DATABASE test_database
ENGINE = MaterializedPostgreSQL('postgres1:5432', 'postgres_database', 'postgres_user', 'postgres_password'
SELECT * FROM test_database.postgres_table;
```
## Settings {#settings}
1. `materialized_postgresql_max_block_size` - Number of rows collected before flushing data into table. Default: `65536`.
2. `materialized_postgresql_tables_list` - List of tables for MaterializedPostgreSQL database engine. Default: `whole database`.
3. `materialized_postgresql_allow_automatic_update` - Allow to reload table in the background, when schema changes are detected. Default: `0` (`false`).
``` sql
CREATE DATABASE test_database
ENGINE = MaterializedPostgreSQL('postgres1:5432', 'postgres_database', 'postgres_user', 'postgres_password'
SETTINGS materialized_postgresql_max_block_size = 65536,
materialized_postgresql_tables_list = 'table1,table2,table3';
SELECT * FROM test_database.table1;
```
## Requirements {#requirements}
- Setting `wal_level`to `logical` and `max_replication_slots` to at least `2` in the postgresql config file.
- Each replicated table must have one of the following **replica identity**:
1. **default** (primary key)
2. **index**
``` bash
postgres# CREATE TABLE postgres_table (a Integer NOT NULL, b Integer, c Integer NOT NULL, d Integer, e Integer NOT NULL);
postgres# CREATE unique INDEX postgres_table_index on postgres_table(a, c, e);
postgres# ALTER TABLE postgres_table REPLICA IDENTITY USING INDEX postgres_table_index;
```
Primary key is always checked first. If it is absent, then index, defined as replica identity index, is checked.
If index is used as replica identity, there has to be only one such index in a table.
You can check what type is used for a specific table with the following command:
``` bash
postgres# SELECT CASE relreplident
WHEN 'd' THEN 'default'
WHEN 'n' THEN 'nothing'
WHEN 'f' THEN 'full'
WHEN 'i' THEN 'index'
END AS replica_identity
FROM pg_class
WHERE oid = 'postgres_table'::regclass;
```
## Warning {#warning}
1. **TOAST** values convertion is not supported. Default value for the data type will be used.

View File

@ -0,0 +1,46 @@
---
toc_priority: 12
toc_title: MateriaziePostgreSQL
---
# MaterializedPostgreSQL {#materialize-postgresql}
## Creating a Table {#creating-a-table}
``` sql
CREATE TABLE test.postgresql_replica (key UInt64, value UInt64)
ENGINE = MaterializedPostgreSQL('postgres1:5432', 'postgres_database', 'postgresql_replica', 'postgres_user', 'postgres_password')
PRIMARY KEY key;
```
## Requirements {#requirements}
- Setting `wal_level`to `logical` and `max_replication_slots` to at least `2` in the postgresql config file.
- A table with engine `MaterializedPostgreSQL` must have a primary key - the same as a replica identity index (default: primary key) of a postgres table (See [details on replica identity index](../../database-engines/materialized-postgresql.md#requirements)).
- Only database `Atomic` is allowed.
## Virtual columns {#creating-a-table}
- `_version` (`UInt64`)
- `_sign` (`Int8`)
These columns do not need to be added, when table is created. They are always accessible in `SELECT` query.
`_version` column equals `LSN` position in `WAL`, so it might be used to check how up-to-date replication is.
``` sql
CREATE TABLE test.postgresql_replica (key UInt64, value UInt64)
ENGINE = MaterializedPostgreSQL('postgres1:5432', 'postgres_database', 'postgresql_replica', 'postgres_user', 'postgres_password')
PRIMARY KEY key;
SELECT key, value, _version FROM test.postgresql_replica;
```
## Warning {#warning}
1. **TOAST** values convertion is not supported. Default value for the data type will be used.

View File

@ -94,11 +94,11 @@ For production environments, its recommended to use the latest `stable`-versi
To run ClickHouse inside Docker follow the guide on [Docker Hub](https://hub.docker.com/r/yandex/clickhouse-server/). Those images use official `deb` packages inside.
### Single Binary
### Single Binary {#from-single-binary}
You can install ClickHouse on Linux using single portable binary from the latest commit of the `master` branch: [https://builds.clickhouse.tech/master/amd64/clickhouse].
You can install ClickHouse on Linux using a single portable binary from the latest commit of the `master` branch: [https://builds.clickhouse.tech/master/amd64/clickhouse].
```
``` bash
curl -O 'https://builds.clickhouse.tech/master/amd64/clickhouse' && chmod a+x clickhouse
sudo ./clickhouse install
```
@ -107,9 +107,10 @@ sudo ./clickhouse install
For non-Linux operating systems and for AArch64 CPU arhitecture, ClickHouse builds are provided as a cross-compiled binary from the latest commit of the `master` branch (with a few hours delay).
- [macOS](https://builds.clickhouse.tech/master/macos/clickhouse) — `curl -O 'https://builds.clickhouse.tech/master/macos/clickhouse' && chmod a+x ./clickhouse`
- [FreeBSD](https://builds.clickhouse.tech/master/freebsd/clickhouse) — `curl -O 'https://builds.clickhouse.tech/master/freebsd/clickhouse' && chmod a+x ./clickhouse`
- [AArch64](https://builds.clickhouse.tech/master/aarch64/clickhouse) — `curl -O 'https://builds.clickhouse.tech/master/aarch64/clickhouse' && chmod a+x ./clickhouse`
- [MacOS x86_64](https://builds.clickhouse.tech/master/macos/clickhouse) — `curl -O 'https://builds.clickhouse.tech/master/macos/clickhouse' && chmod a+x ./clickhouse`
- [MacOS Aarch64 (Apple Silicon)](https://builds.clickhouse.tech/master/macos-aarch64/clickhouse) — `curl -O 'https://builds.clickhouse.tech/master/macos-aarch64/clickhouse' && chmod a+x ./clickhouse`
- [FreeBSD x86_64](https://builds.clickhouse.tech/master/freebsd/clickhouse) — `curl -O 'https://builds.clickhouse.tech/master/freebsd/clickhouse' && chmod a+x ./clickhouse`
- [Linux AArch64](https://builds.clickhouse.tech/master/aarch64/clickhouse) — `curl -O 'https://builds.clickhouse.tech/master/aarch64/clickhouse' && chmod a+x ./clickhouse`
After downloading, you can use the `clickhouse client` to connect to the server, or `clickhouse local` to process local data.

View File

@ -498,7 +498,7 @@ Return a message.
<response_content>Say Hi!</response_content>
</handler>
</rule>
<http_handlers>
</http_handlers>
```
``` bash

View File

@ -110,7 +110,7 @@ toc_title: Adopters
| <a href="https://www.semrush.com/" class="favicon">SEMrush</a> | Marketing | Main product | — | — | [Slides in Russian, August 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup17/5_semrush.pdf) |
| <a href="https://sentry.io/" class="favicon">Sentry</a> | Software Development | Main product | — | — | [Blog Post in English, May 2019](https://blog.sentry.io/2019/05/16/introducing-snuba-sentrys-new-search-infrastructure) |
| <a href="https://seo.do/" class="favicon">seo.do</a> | Analytics | Main product | — | — | [Slides in English, November 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup35/CH%20Presentation-%20Metehan%20Çetinkaya.pdf) |
| <a href="http://www.sgk.gov.tr/wps/portal/sgk/tr" class="favicon">SGK</a> | Goverment Social Security | Analytics | — | — | [Slides in English, November 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup35/ClickHouse%20Meetup-Ramazan%20POLAT.pdf) |
| <a href="http://www.sgk.gov.tr/wps/portal/sgk/tr" class="favicon">SGK</a> | Government Social Security | Analytics | — | — | [Slides in English, November 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup35/ClickHouse%20Meetup-Ramazan%20POLAT.pdf) |
| <a href="http://english.sina.com/index.html" class="favicon">Sina</a> | News | — | — | — | [Slides in Chinese, October 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup19/6.%20ClickHouse最佳实践%20高鹏_新浪.pdf) |
| <a href="https://smi2.ru/" class="favicon">SMI2</a> | News | Analytics | — | — | [Blog Post in Russian, November 2017](https://habr.com/ru/company/smi2/blog/314558/) |
| <a href="https://www.spark.co.nz/" class="favicon">Spark New Zealand</a> | Telecommunications | Security Operations | — | — | [Blog Post, Feb 2020](https://blog.n0p.me/2020/02/2020-02-05-dnsmonster/) |
@ -154,5 +154,7 @@ toc_title: Adopters
| <a href="https://www.hydrolix.io/" class="favicon">Hydrolix</a> | Cloud data platform | Main product | — | — | [Documentation](https://docs.hydrolix.io/guide/query) |
| <a href="https://www.argedor.com/en/clickhouse/" class="favicon">Argedor</a> | ClickHouse support | — | — | — | [Official website](https://www.argedor.com/en/clickhouse/) |
| <a href="https://signoz.io/" class="favicon">SigNoz</a> | Observability Platform | Main Product | — | — | [Source code](https://github.com/SigNoz/signoz) |
| <a href="https://chelpipegroup.com/" class="favicon">ChelPipe Group</a> | Analytics | — | — | — | [Blog post, June 2021](https://vc.ru/trade/253172-tyazhelomu-proizvodstvu-user-friendly-sayt-internet-magazin-trub-dlya-chtpz) |
| <a href="https://zagravagames.com/en/" class="favicon">Zagrava Trading</a> | — | — | — | — | [Job offer, May 2021](https://twitter.com/datastackjobs/status/1394707267082063874) |
[Original article](https://clickhouse.tech/docs/en/introduction/adopters/) <!--hide-->

View File

@ -1728,6 +1728,28 @@ Possible values:
Default value: 0.
## optimize_functions_to_subcolumns {#optimize-functions-to-subcolumns}
Enables or disables optimization by transforming some functions to reading subcolumns. This reduces the amount of data to read.
These functions can be transformed:
- [length](../../sql-reference/functions/array-functions.md#array_functions-length) to read the [size0](../../sql-reference/data-types/array.md#array-size) subcolumn.
- [empty](../../sql-reference/functions/array-functions.md#function-empty) to read the [size0](../../sql-reference/data-types/array.md#array-size) subcolumn.
- [notEmpty](../../sql-reference/functions/array-functions.md#function-notempty) to read the [size0](../../sql-reference/data-types/array.md#array-size) subcolumn.
- [isNull](../../sql-reference/operators/index.md#operator-is-null) to read the [null](../../sql-reference/data-types/nullable.md#finding-null) subcolumn.
- [isNotNull](../../sql-reference/operators/index.md#is-not-null) to read the [null](../../sql-reference/data-types/nullable.md#finding-null) subcolumn.
- [count](../../sql-reference/aggregate-functions/reference/count.md) to read the [null](../../sql-reference/data-types/nullable.md#finding-null) subcolumn.
- [mapKeys](../../sql-reference/functions/tuple-map-functions.md#mapkeys) to read the [keys](../../sql-reference/data-types/map.md#map-subcolumns) subcolumn.
- [mapValues](../../sql-reference/functions/tuple-map-functions.md#mapvalues) to read the [values](../../sql-reference/data-types/map.md#map-subcolumns) subcolumn.
Possible values:
- 0 — Optimization disabled.
- 1 — Optimization enabled.
Default value: `0`.
## distributed_replica_error_half_life {#settings-distributed_replica_error_half_life}
- Type: seconds

View File

@ -36,4 +36,4 @@ SELECT * FROM system.asynchronous_metric_log LIMIT 10
- [system.asynchronous_metrics](../system-tables/asynchronous_metrics.md) — Contains metrics, calculated periodically in the background.
- [system.metric_log](../system-tables/metric_log.md) — Contains history of metrics values from tables `system.metrics` and `system.events`, periodically flushed to disk.
[Original article](https://clickhouse.tech/docs/en/operations/system_tables/asynchronous_metric_log) <!--hide-->
[Original article](https://clickhouse.tech/docs/en/operations/system-tables/asynchronous_metric_log) <!--hide-->

View File

@ -33,6 +33,6 @@ SELECT * FROM system.asynchronous_metrics LIMIT 10
- [Monitoring](../../operations/monitoring.md) — Base concepts of ClickHouse monitoring.
- [system.metrics](../../operations/system-tables/metrics.md#system_tables-metrics) — Contains instantly calculated metrics.
- [system.events](../../operations/system-tables/events.md#system_tables-events) — Contains a number of events that have occurred.
- [system.metric_log](../../operations/system-tables/metric_log.md#system_tables-metric_log) — Contains a history of metrics values from tables `system.metrics` и `system.events`.
- [system.metric_log](../../operations/system-tables/metric_log.md#system_tables-metric_log) — Contains a history of metrics values from tables `system.metrics` and `system.events`.
[Original article](https://clickhouse.tech/docs/en/operations/system_tables/asynchronous_metrics) <!--hide-->
[Original article](https://clickhouse.tech/docs/en/operations/system-tables/asynchronous_metrics) <!--hide-->

View File

@ -68,4 +68,4 @@ estimated_recovery_time: 0
- [distributed_replica_error_cap setting](../../operations/settings/settings.md#settings-distributed_replica_error_cap)
- [distributed_replica_error_half_life setting](../../operations/settings/settings.md#settings-distributed_replica_error_half_life)
[Original article](https://clickhouse.tech/docs/en/operations/system_tables/clusters) <!--hide-->
[Original article](https://clickhouse.tech/docs/en/operations/system-tables/clusters) <!--hide-->

View File

@ -69,4 +69,21 @@ is_in_sampling_key: 0
compression_codec:
```
[Original article](https://clickhouse.tech/docs/en/operations/system_tables/columns) <!--hide-->
The `system.columns` table contains the following columns (the column type is shown in brackets):
- `database` (String) — Database name.
- `table` (String) — Table name.
- `name` (String) — Column name.
- `type` (String) — Column type.
- `default_kind` (String) — Expression type (`DEFAULT`, `MATERIALIZED`, `ALIAS`) for the default value, or an empty string if it is not defined.
- `default_expression` (String) — Expression for the default value, or an empty string if it is not defined.
- `data_compressed_bytes` (UInt64) — The size of compressed data, in bytes.
- `data_uncompressed_bytes` (UInt64) — The size of decompressed data, in bytes.
- `marks_bytes` (UInt64) — The size of marks, in bytes.
- `comment` (String) — Comment on the column, or an empty string if it is not defined.
- `is_in_partition_key` (UInt8) — Flag that indicates whether the column is in the partition expression.
- `is_in_sorting_key` (UInt8) — Flag that indicates whether the column is in the sorting key expression.
- `is_in_primary_key` (UInt8) — Flag that indicates whether the column is in the primary key expression.
- `is_in_sampling_key` (UInt8) — Flag that indicates whether the column is in the sampling key expression.
[Original article](https://clickhouse.tech/docs/en/operations/system-tables/columns) <!--hide-->

View File

@ -38,4 +38,4 @@ SELECT * FROM system.contributors WHERE name = 'Olga Khvostikova'
│ Olga Khvostikova │
└──────────────────┘
```
[Original article](https://clickhouse.tech/docs/en/operations/system_tables/contributors) <!--hide-->
[Original article](https://clickhouse.tech/docs/en/operations/system-tables/contributors) <!--hide-->

View File

@ -8,4 +8,4 @@ Columns:
- `with_admin_option` ([UInt8](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Flag that shows whether `current_role` is a role with `ADMIN OPTION` privilege.
- `is_default` ([UInt8](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Flag that shows whether `current_role` is a default role.
[Original article](https://clickhouse.tech/docs/en/operations/system_tables/current-roles) <!--hide-->
[Original article](https://clickhouse.tech/docs/en/operations/system-tables/current-roles) <!--hide-->

View File

@ -33,4 +33,4 @@ SELECT * FROM system.data_type_families WHERE alias_to = 'String'
- [Syntax](../../sql-reference/syntax.md) — Information about supported syntax.
[Original article](https://clickhouse.tech/docs/en/operations/system_tables/data_type_families) <!--hide-->
[Original article](https://clickhouse.tech/docs/en/operations/system-tables/data_type_families) <!--hide-->

View File

@ -35,4 +35,4 @@ SELECT * FROM system.databases
└────────────────────────────────┴────────┴────────────────────────────┴─────────────────────────────────────────────────────────────────────┴──────────────────────────────────────┘
```
[Original article](https://clickhouse.tech/docs/en/operations/system_tables/databases) <!--hide-->
[Original article](https://clickhouse.tech/docs/en/operations/system-tables/databases) <!--hide-->

View File

@ -8,4 +8,4 @@ For the description of other columns, see [system.parts](../../operations/system
If part name is invalid, values of some columns may be `NULL`. Such parts can be deleted with [ALTER TABLE DROP DETACHED PART](../../sql-reference/statements/alter/partition.md#alter_drop-detached).
[Original article](https://clickhouse.tech/docs/en/operations/system_tables/detached_parts) <!--hide-->
[Original article](https://clickhouse.tech/docs/en/operations/system-tables/detached_parts) <!--hide-->

View File

@ -61,4 +61,4 @@ SELECT * FROM system.dictionaries
└──────────┴──────┴────────┴─────────────┴──────┴────────┴──────────────────────────────────────┴─────────────────────┴─────────────────┴─────────────┴──────────┴───────────────┴───────────────────────┴────────────────────────────┴──────────────┴──────────────┴─────────────────────┴──────────────────────────────┘───────────────────────┴────────────────┘
```
[Original article](https://clickhouse.tech/docs/en/operations/system_tables/dictionaries) <!--hide-->
[Original article](https://clickhouse.tech/docs/en/operations/system-tables/dictionaries) <!--hide-->

View File

@ -10,9 +10,6 @@ Columns:
- `total_space` ([UInt64](../../sql-reference/data-types/int-uint.md)) — Disk volume in bytes.
- `keep_free_space` ([UInt64](../../sql-reference/data-types/int-uint.md)) — Amount of disk space that should stay free on disk in bytes. Defined in the `keep_free_space_bytes` parameter of disk configuration.
[Original article](https://clickhouse.tech/docs/en/operations/system_tables/disks) <!--hide-->
**Example**
```sql
@ -27,5 +24,4 @@ Columns:
1 rows in set. Elapsed: 0.001 sec.
```
[Original article](https://clickhouse.tech/docs/en/operations/system-tables/disks) <!--hide-->

View File

@ -9,4 +9,4 @@ Columns:
- `is_current` ([UInt8](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Flag that shows whether `enabled_role` is a current role of a current user.
- `is_default` ([UInt8](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Flag that shows whether `enabled_role` is a default role.
[Original article](https://clickhouse.tech/docs/en/operations/system_tables/enabled-roles) <!--hide-->
[Original article](https://clickhouse.tech/docs/en/operations/system-tables/enabled-roles) <!--hide-->

View File

@ -31,4 +31,4 @@ SELECT * FROM system.events LIMIT 5
- [system.metric_log](../../operations/system-tables/metric_log.md#system_tables-metric_log) — Contains a history of metrics values from tables `system.metrics` и `system.events`.
- [Monitoring](../../operations/monitoring.md) — Base concepts of ClickHouse monitoring.
[Original article](https://clickhouse.tech/docs/en/operations/system_tables/events) <!--hide-->
[Original article](https://clickhouse.tech/docs/en/operations/system-tables/events) <!--hide-->

View File

@ -7,8 +7,6 @@ Columns:
- `name`(`String`) The name of the function.
- `is_aggregate`(`UInt8`) — Whether the function is aggregate.
[Original article](https://clickhouse.tech/docs/en/operations/system_tables/functions) <!--hide-->
**Example**
```sql
@ -30,4 +28,6 @@ Columns:
└──────────────────────────┴──────────────┴──────────────────┴──────────┘
10 rows in set. Elapsed: 0.002 sec.
```
```
[Original article](https://clickhouse.tech/docs/en/operations/system-tables/functions) <!--hide-->

View File

@ -21,4 +21,4 @@ Columns:
- `grant_option` ([UInt8](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Permission is granted `WITH GRANT OPTION`, see [GRANT](../../sql-reference/statements/grant.md#grant-privigele-syntax).
[Original article](https://clickhouse.tech/docs/en/operations/system_tables/grants) <!--hide-->
[Original article](https://clickhouse.tech/docs/en/operations/system-tables/grants) <!--hide-->

View File

@ -14,4 +14,4 @@ Columns:
- `Tables.database` (Array(String)) - Array of names of database tables that use the `config_name` parameter.
- `Tables.table` (Array(String)) - Array of table names that use the `config_name` parameter.
[Original article](https://clickhouse.tech/docs/en/operations/system_tables/graphite_retentions) <!--hide-->
[Original article](https://clickhouse.tech/docs/en/operations/system-tables/graphite_retentions) <!--hide-->

View File

@ -36,4 +36,4 @@ SELECT library_name, license_type, license_path FROM system.licenses LIMIT 15
```
[Original article](https://clickhouse.tech/docs/en/operations/system_tables/licenses) <!--hide-->
[Original article](https://clickhouse.tech/docs/en/operations/system-tables/licenses) <!--hide-->

View File

@ -51,4 +51,4 @@ type: SettingUInt64
4 rows in set. Elapsed: 0.001 sec.
```
[Original article](https://clickhouse.tech/docs/en/operations/system_tables/merge_tree_settings) <!--hide-->
[Original article](https://clickhouse.tech/docs/en/operations/system-tables/merge_tree_settings) <!--hide-->

View File

@ -22,4 +22,4 @@ Columns:
- `merge_type` — The type of current merge. Empty if it's an mutation.
- `merge_algorithm` — The algorithm used in current merge. Empty if it's an mutation.
[Original article](https://clickhouse.tech/docs/en/operations/system_tables/merges) <!--hide-->
[Original article](https://clickhouse.tech/docs/en/operations/system-tables/merges) <!--hide-->

View File

@ -48,4 +48,4 @@ CurrentMetric_DistributedFilesToInsert: 0
- [system.metrics](../../operations/system-tables/metrics.md) — Contains instantly calculated metrics.
- [Monitoring](../../operations/monitoring.md) — Base concepts of ClickHouse monitoring.
[Original article](https://clickhouse.tech/docs/en/operations/system_tables/metric_log) <!--hide-->
[Original article](https://clickhouse.tech/docs/en/operations/system-tables/metric_log) <!--hide-->

View File

@ -38,4 +38,4 @@ SELECT * FROM system.metrics LIMIT 10
- [system.metric_log](../../operations/system-tables/metric_log.md#system_tables-metric_log) — Contains a history of metrics values from tables `system.metrics` и `system.events`.
- [Monitoring](../../operations/monitoring.md) — Base concepts of ClickHouse monitoring.
[Original article](https://clickhouse.tech/docs/en/operations/system_tables/metrics) <!--hide-->
[Original article](https://clickhouse.tech/docs/en/operations/system-tables/metrics) <!--hide-->

View File

@ -45,4 +45,4 @@ If there were problems with mutating some data parts, the following columns cont
- [MergeTree](../../engines/table-engines/mergetree-family/mergetree.md) table engine
- [ReplicatedMergeTree](../../engines/table-engines/mergetree-family/replication.md) family
[Original article](https://clickhouse.tech/docs/en/operations/system_tables/mutations) <!--hide-->
[Original article](https://clickhouse.tech/docs/en/operations/system-tables/mutations) <!--hide-->

View File

@ -29,4 +29,4 @@ Reads from this table are not parallelized.
10 rows in set. Elapsed: 0.001 sec.
```
[Original article](https://clickhouse.tech/docs/en/operations/system_tables/numbers) <!--hide-->
[Original article](https://clickhouse.tech/docs/en/operations/system-tables/numbers) <!--hide-->

View File

@ -27,4 +27,4 @@ Used for tests.
10 rows in set. Elapsed: 0.001 sec.
```
[Original article](https://clickhouse.tech/docs/en/operations/system_tables/numbers_mt) <!--hide-->
[Original article](https://clickhouse.tech/docs/en/operations/system-tables/numbers_mt) <!--hide-->

View File

@ -20,4 +20,4 @@ This is similar to the `DUAL` table found in other DBMSs.
1 rows in set. Elapsed: 0.001 sec.
```
[Original article](https://clickhouse.tech/docs/en/operations/system_tables/one) <!--hide-->
[Original article](https://clickhouse.tech/docs/en/operations/system-tables/one) <!--hide-->

View File

@ -66,4 +66,4 @@ error: 0
exception:
```
[Original article](https://clickhouse.tech/docs/en/operations/system_tables/part_log) <!--hide-->
[Original article](https://clickhouse.tech/docs/en/operations/system-tables/part_log) <!--hide-->

View File

@ -155,4 +155,4 @@ move_ttl_info.max: []
- [MergeTree family](../../engines/table-engines/mergetree-family/mergetree.md)
- [TTL for Columns and Tables](../../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-ttl)
[Original article](https://clickhouse.tech/docs/en/operations/system_tables/parts) <!--hide-->
[Original article](https://clickhouse.tech/docs/en/operations/system-tables/parts) <!--hide-->

View File

@ -14,7 +14,6 @@ Columns:
- `query` (String) The query text. For `INSERT`, it does not include the data to insert.
- `query_id` (String) Query ID, if defined.
```sql
:) SELECT * FROM system.processes LIMIT 10 FORMAT Vertical;
```
@ -59,4 +58,4 @@ Settings: {'background_pool_size':'32','load_balancing':'random','al
1 rows in set. Elapsed: 0.002 sec.
```
[Original article](https://clickhouse.tech/docs/en/operations/system_tables/processes) <!--hide-->
[Original article](https://clickhouse.tech/docs/en/operations/system-tables/processes) <!--hide-->

View File

@ -156,4 +156,4 @@ Settings: {'background_pool_size':'32','load_balancing':'random','al
- [system.query_thread_log](../../operations/system-tables/query_thread_log.md#system_tables-query_thread_log) — This table contains information about each query execution thread.
[Original article](https://clickhouse.tech/docs/en/operations/system_tables/query_log) <!--hide-->
[Original article](https://clickhouse.tech/docs/en/operations/system-tables/query_log) <!--hide-->

View File

@ -113,4 +113,4 @@ ProfileEvents: {'Query':1,'SelectQuery':1,'ReadCompressedBytes':36,'Compr
- [system.query_log](../../operations/system-tables/query_log.md#system_tables-query_log) — Description of the `query_log` system table which contains common information about queries execution.
[Original article](https://clickhouse.tech/docs/en/operations/system_tables/query_thread_log) <!--hide-->
[Original article](https://clickhouse.tech/docs/en/operations/system-tables/query_thread_log) <!--hide-->

View File

@ -17,3 +17,5 @@ Columns:
- `max_read_rows` ([Nullable](../../sql-reference/data-types/nullable.md)([UInt64](../../sql-reference/data-types/int-uint.md))) — Maximum number of rows read from all tables and table functions participated in queries.
- `max_read_bytes` ([Nullable](../../sql-reference/data-types/nullable.md)([UInt64](../../sql-reference/data-types/int-uint.md))) — Maximum number of bytes read from all tables and table functions participated in queries.
- `max_execution_time` ([Nullable](../../sql-reference/data-types/nullable.md)([Float64](../../sql-reference/data-types/float.md))) — Maximum of the query execution time, in seconds.
[Original article](https://clickhouse.tech/docs/en/operations/system-tables/quota_limits) <!--hide-->

View File

@ -28,3 +28,5 @@ Columns:
## See Also {#see-also}
- [SHOW QUOTA](../../sql-reference/statements/show.md#show-quota-statement)
[Original article](https://clickhouse.tech/docs/en/operations/system-tables/quota_usage) <!--hide-->

View File

@ -24,5 +24,5 @@ Columns:
- [SHOW QUOTAS](../../sql-reference/statements/show.md#show-quotas-statement)
[Original article](https://clickhouse.tech/docs/en/operations/system_tables/quotas) <!--hide-->
[Original article](https://clickhouse.tech/docs/en/operations/system-tables/quotas) <!--hide-->

View File

@ -30,4 +30,6 @@ Columns:
## See Also {#see-also}
- [SHOW QUOTA](../../sql-reference/statements/show.md#show-quota-statement)
- [SHOW QUOTA](../../sql-reference/statements/show.md#show-quota-statement)
[Original article](https://clickhouse.tech/docs/en/operations/system-tables/quotas_usage) <!--hide-->

View File

@ -120,5 +120,5 @@ WHERE
If this query does not return anything, it means that everything is fine.
[Original article](https://clickhouse.tech/docs/en/operations/system_tables/replicas) <!--hide-->
[Original article](https://clickhouse.tech/docs/en/operations/system-tables/replicas) <!--hide-->

View File

@ -18,4 +18,4 @@ Columns:
- 1 — The role has `ADMIN OPTION` privilege.
- 0 — The role without `ADMIN OPTION` privilege.
[Original article](https://clickhouse.tech/docs/en/operations/system_tables/role-grants) <!--hide-->
[Original article](https://clickhouse.tech/docs/en/operations/system-tables/role-grants) <!--hide-->

View File

@ -12,4 +12,4 @@ Columns:
- [SHOW ROLES](../../sql-reference/statements/show.md#show-roles-statement)
[Original article](https://clickhouse.tech/docs/en/operations/system_tables/roles) <!--hide-->
[Original article](https://clickhouse.tech/docs/en/operations/system-tables/roles) <!--hide-->

View File

@ -31,4 +31,4 @@ Columns:
- [SHOW POLICIES](../../sql-reference/statements/show.md#show-policies-statement)
[Original article](https://clickhouse.tech/docs/en/operations/system_tables/row_policies) <!--hide-->
[Original article](https://clickhouse.tech/docs/en/operations/system-tables/row_policies) <!--hide-->

View File

@ -50,4 +50,4 @@ SELECT * FROM system.settings WHERE changed AND name='load_balancing'
- [Constraints on Settings](../../operations/settings/constraints-on-settings.md)
- [SHOW SETTINGS](../../sql-reference/statements/show.md#show-settings) statement
[Original article](https://clickhouse.tech/docs/en/operations/system_tables/settings) <!--hide-->
[Original article](https://clickhouse.tech/docs/en/operations/system-tables/settings) <!--hide-->

View File

@ -27,4 +27,4 @@ Columns:
- `inherit_profile` ([Nullable](../../sql-reference/data-types/nullable.md)([String](../../sql-reference/data-types/string.md))) — A parent profile for this setting profile. `NULL` if not set. Setting profile will inherit all the settings' values and constraints (`min`, `max`, `readonly`) from its parent profiles.
[Original article](https://clickhouse.tech/docs/en/operations/system_tables/settings_profile_elements) <!--hide-->
[Original article](https://clickhouse.tech/docs/en/operations/system-tables/settings_profile_elements) <!--hide-->

View File

@ -21,4 +21,4 @@ Columns:
- [SHOW PROFILES](../../sql-reference/statements/show.md#show-profiles-statement)
[Original article](https://clickhouse.tech/docs/en/operations/system_tables/settings_profiles) <!--hide-->
[Original article](https://clickhouse.tech/docs/en/operations/system-tables/settings_profiles) <!--hide-->

View File

@ -14,4 +14,4 @@ Columns:
If the storage policy contains more then one volume, then information for each volume is stored in the individual row of the table.
[Original article](https://clickhouse.tech/docs/en/operations/system_tables/storage_policies) <!--hide-->
[Original article](https://clickhouse.tech/docs/en/operations/system-tables/storage_policies) <!--hide-->

View File

@ -35,4 +35,4 @@ WHERE name in ('Kafka', 'MergeTree', 'ReplicatedCollapsingMergeTree')
- Kafka [settings](../../engines/table-engines/integrations/kafka.md#table_engine-kafka-creating-a-table)
- Join [settings](../../engines/table-engines/special/join.md#join-limitations-and-settings)
[Original article](https://clickhouse.tech/docs/en/operations/system_tables/table_engines) <!--hide-->
[Original article](https://clickhouse.tech/docs/en/operations/system-tables/table_engines) <!--hide-->

View File

@ -117,4 +117,4 @@ lifetime_bytes: ᴺᵁᴸᴸ
comment:
```
[Original article](https://clickhouse.tech/docs/en/operations/system_tables/tables) <!--hide-->
[Original article](https://clickhouse.tech/docs/en/operations/system-tables/tables) <!--hide-->

View File

@ -50,4 +50,4 @@ source_file: /ClickHouse/src/Interpreters/DNSCacheUpdater.cpp; void
source_line: 45
```
[Original article](https://clickhouse.tech/docs/en/operations/system_tables/text_log) <!--hide-->
[Original article](https://clickhouse.tech/docs/en/operations/system-tables/text_log) <!--hide-->

View File

@ -27,4 +27,4 @@ SELECT * FROM system.time_zones LIMIT 10
└────────────────────┘
```
[Original article](https://clickhouse.tech/docs/en/operations/system_tables/time_zones) <!--hide-->
[Original article](https://clickhouse.tech/docs/en/operations/system-tables/time_zones) <!--hide-->

View File

@ -55,4 +55,3 @@ size: 5244400
```
[Original article](https://clickhouse.tech/docs/en/operations/system-tables/trace_log) <!--hide-->

View File

@ -31,4 +31,4 @@ Columns:
- [SHOW USERS](../../sql-reference/statements/show.md#show-users-statement)
[Original article](https://clickhouse.tech/docs/en/operations/system_tables/users) <!--hide-->
[Original article](https://clickhouse.tech/docs/en/operations/system-tables/users) <!--hide-->

View File

@ -72,4 +72,4 @@ numChildren: 7
pzxid: 987021252247
path: /clickhouse/tables/01-08/visits/replicas
```
[Original article](https://clickhouse.tech/docs/en/operations/system_tables/zookeeper) <!--hide-->
[Original article](https://clickhouse.tech/docs/en/operations/system-tables/zookeeper) <!--hide-->

View File

@ -31,6 +31,8 @@ ClickHouse supports the `COUNT(DISTINCT ...)` syntax. The behavior of this const
The `SELECT count() FROM table` query is not optimized, because the number of entries in the table is not stored separately. It chooses a small column from the table and counts the number of values in it.
However `SELECT count(nullable_column) FROM table` query can be optimized by enabling the [optimize_functions_to_subcolumns](../../../operations/settings/settings.md#optimize-functions-to-subcolumns) setting. With `optimize_functions_to_subcolumns = 1` the function reads only [null](../../../sql-reference/data-types/nullable.md#finding-null) subcolumn instead of reading and processing the whole column data. The query `SELECT count(n) FROM table` transforms to `SELECT sum(NOT n.null) FROM table`.
**Examples**
Example 1:

View File

@ -1,17 +1,22 @@
---
toc_priority: 212
---
# median {#median}
The `median*` functions are the aliases for the corresponding `quantile*` functions. They calculate median of a numeric data sample.
Functions:
- `median` — Alias for [quantile](#quantile).
- `medianDeterministic` — Alias for [quantileDeterministic](#quantiledeterministic).
- `medianExact` — Alias for [quantileExact](#quantileexact).
- `medianExactWeighted` — Alias for [quantileExactWeighted](#quantileexactweighted).
- `medianTiming` — Alias for [quantileTiming](#quantiletiming).
- `medianTimingWeighted` — Alias for [quantileTimingWeighted](#quantiletimingweighted).
- `medianTDigest` — Alias for [quantileTDigest](#quantiletdigest).
- `medianTDigestWeighted` — Alias for [quantileTDigestWeighted](#quantiletdigestweighted).
- `median` — Alias for [quantile](../../../sql-reference/aggregate-functions/reference/quantile.md#quantile).
- `medianDeterministic` — Alias for [quantileDeterministic](../../../sql-reference/aggregate-functions/reference/quantiledeterministic.md#quantiledeterministic).
- `medianExact` — Alias for [quantileExact](../../../sql-reference/aggregate-functions/reference/quantileexact.md#quantileexact).
- `medianExactWeighted` — Alias for [quantileExactWeighted](../../../sql-reference/aggregate-functions/reference/quantileexactweighted.md#quantileexactweighted).
- `medianTiming` — Alias for [quantileTiming](../../../sql-reference/aggregate-functions/reference/quantiletiming.md#quantiletiming).
- `medianTimingWeighted` — Alias for [quantileTimingWeighted](../../../sql-reference/aggregate-functions/reference/quantiletimingweighted.md#quantiletimingweighted).
- `medianTDigest` — Alias for [quantileTDigest](../../../sql-reference/aggregate-functions/reference/quantiletdigest.md#quantiletdigest).
- `medianTDigestWeighted` — Alias for [quantileTDigestWeighted](../../../sql-reference/aggregate-functions/reference/quantiletdigestweighted.md#quantiletdigestweighted).
- `medianBFloat16` — Alias for [quantileBFloat16](../../../sql-reference/aggregate-functions/reference/quantilebfloat16.md#quantilebfloat16).
**Example**

View File

@ -0,0 +1,64 @@
---
toc_priority: 209
---
# quantileBFloat16 {#quantilebfloat16}
Computes an approximate [quantile](https://en.wikipedia.org/wiki/Quantile) of a sample consisting of [bfloat16](https://en.wikipedia.org/wiki/Bfloat16_floating-point_format) numbers. `bfloat16` is a floating-point data type with 1 sign bit, 8 exponent bits and 7 fraction bits.
The function converts input values to 32-bit floats and takes the most significant 16 bits. Then it calculates `bfloat16` quantile value and converts the result to a 64-bit float by appending zero bits.
The function is a fast quantile estimator with a relative error no more than 0.390625%.
**Syntax**
``` sql
quantileBFloat16[(level)](expr)
```
Alias: `medianBFloat16`
**Arguments**
- `expr` — Column with numeric data. [Integer](../../../sql-reference/data-types/int-uint.md), [Float](../../../sql-reference/data-types/float.md).
**Parameters**
- `level` — Level of quantile. Optional. Possible values are in the range from 0 to 1. Default value: 0.5. [Float](../../../sql-reference/data-types/float.md).
**Returned value**
- Approximate quantile of the specified level.
Type: [Float64](../../../sql-reference/data-types/float.md#float32-float64).
**Example**
Input table has an integer and a float columns:
``` text
┌─a─┬─────b─┐
│ 1 │ 1.001 │
│ 2 │ 1.002 │
│ 3 │ 1.003 │
│ 4 │ 1.004 │
└───┴───────┘
```
Query to calculate 0.75-quantile (third quartile):
``` sql
SELECT quantileBFloat16(0.75)(a), quantileBFloat16(0.75)(b) FROM example_table;
```
Result:
``` text
┌─quantileBFloat16(0.75)(a)─┬─quantileBFloat16(0.75)(b)─┐
│ 3 │ 1 │
└───────────────────────────┴───────────────────────────┘
```
Note that all floating point values in the example are truncated to 1.0 when converting to `bfloat16`.
**See Also**
- [median](../../../sql-reference/aggregate-functions/reference/median.md#median)
- [quantiles](../../../sql-reference/aggregate-functions/reference/quantiles.md#quantiles)

View File

@ -2,7 +2,9 @@
toc_priority: 202
---
# quantileExact {#quantileexact}
# quantileExact Functions {#quantileexact-functions}
## quantileExact {#quantileexact}
Exactly computes the [quantile](https://en.wikipedia.org/wiki/Quantile) of a numeric data sequence.
@ -49,7 +51,7 @@ Result:
└───────────────────────┘
```
# quantileExactLow {#quantileexactlow}
## quantileExactLow {#quantileexactlow}
Similar to `quantileExact`, this computes the exact [quantile](https://en.wikipedia.org/wiki/Quantile) of a numeric data sequence.
@ -66,13 +68,13 @@ SELECT quantileExactLow(0.1)(number) FROM numbers(10)
│ 1 │
└───────────────────────────────┘
```
When using multiple `quantile*` functions with different levels in a query, the internal states are not combined (that is, the query works less efficiently than it could). In this case, use the [quantiles](../../../sql-reference/aggregate-functions/reference/quantiles.md#quantiles) function.
**Syntax**
``` sql
quantileExact(level)(expr)
quantileExactLow(level)(expr)
```
Alias: `medianExactLow`.
@ -107,12 +109,11 @@ Result:
│ 4 │
└──────────────────────────┘
```
# quantileExactHigh {#quantileexacthigh}
## quantileExactHigh {#quantileexacthigh}
Similar to `quantileExact`, this computes the exact [quantile](https://en.wikipedia.org/wiki/Quantile) of a numeric data sequence.
All the passed values are combined into an array, which is then fully sorted,
to get the exact value. The sorting [algorithm's](https://en.cppreference.com/w/cpp/algorithm/sort) complexity is `O(N·log(N))`, where `N = std::distance(first, last)` comparisons.
All the passed values are combined into an array, which is then fully sorted, to get the exact value. The sorting [algorithm's](https://en.cppreference.com/w/cpp/algorithm/sort) complexity is `O(N·log(N))`, where `N = std::distance(first, last)` comparisons.
The return value depends on the quantile level and the number of elements in the selection, i.e. if the level is 0.5, then the function returns the higher median value for an even number of elements and the middle median value for an odd number of elements. Median is calculated similarly to the [median_high](https://docs.python.org/3/library/statistics.html#statistics.median_high) implementation which is used in python. For all other levels, the element at the index corresponding to the value of `level * size_of_array` is returned.
@ -158,6 +159,111 @@ Result:
│ 5 │
└───────────────────────────┘
```
## quantileExactExclusive {#quantileexactexclusive}
Exactly computes the [quantile](https://en.wikipedia.org/wiki/Quantile) of a numeric data sequence.
To get exact value, all the passed values are combined into an array, which is then partially sorted. Therefore, the function consumes `O(n)` memory, where `n` is a number of values that were passed. However, for a small number of values, the function is very effective.
This function is equivalent to [PERCENTILE.EXC](https://support.microsoft.com/en-us/office/percentile-exc-function-bbaa7204-e9e1-4010-85bf-c31dc5dce4ba) Excel function, ([type R6](https://en.wikipedia.org/wiki/Quantile#Estimating_quantiles_from_a_sample)).
When using multiple `quantileExactExclusive` functions with different levels in a query, the internal states are not combined (that is, the query works less efficiently than it could). In this case, use the [quantilesExactExclusive](../../../sql-reference/aggregate-functions/reference/quantiles.md#quantilesexactexclusive) function.
**Syntax**
``` sql
quantileExactExclusive(level)(expr)
```
**Arguments**
- `expr` — Expression over the column values resulting in numeric [data types](../../../sql-reference/data-types/index.md#data_types), [Date](../../../sql-reference/data-types/date.md) or [DateTime](../../../sql-reference/data-types/datetime.md).
**Parameters**
- `level` — Level of quantile. Optional. Possible values: (0, 1) — bounds not included. Default value: 0.5. At `level=0.5` the function calculates [median](https://en.wikipedia.org/wiki/Median). [Float](../../../sql-reference/data-types/float.md).
**Returned value**
- Quantile of the specified level.
Type:
- [Float64](../../../sql-reference/data-types/float.md) for numeric data type input.
- [Date](../../../sql-reference/data-types/date.md) if input values have the `Date` type.
- [DateTime](../../../sql-reference/data-types/datetime.md) if input values have the `DateTime` type.
**Example**
Query:
``` sql
CREATE TABLE num AS numbers(1000);
SELECT quantileExactExclusive(0.6)(x) FROM (SELECT number AS x FROM num);
```
Result:
``` text
┌─quantileExactExclusive(0.6)(x)─┐
│ 599.6 │
└────────────────────────────────┘
```
## quantileExactInclusive {#quantileexactinclusive}
Exactly computes the [quantile](https://en.wikipedia.org/wiki/Quantile) of a numeric data sequence.
To get exact value, all the passed values are combined into an array, which is then partially sorted. Therefore, the function consumes `O(n)` memory, where `n` is a number of values that were passed. However, for a small number of values, the function is very effective.
This function is equivalent to [PERCENTILE.INC](https://support.microsoft.com/en-us/office/percentile-inc-function-680f9539-45eb-410b-9a5e-c1355e5fe2ed) Excel function, ([type R7](https://en.wikipedia.org/wiki/Quantile#Estimating_quantiles_from_a_sample)).
When using multiple `quantileExactInclusive` functions with different levels in a query, the internal states are not combined (that is, the query works less efficiently than it could). In this case, use the [quantilesExactInclusive](../../../sql-reference/aggregate-functions/reference/quantiles.md#quantilesexactinclusive) function.
**Syntax**
``` sql
quantileExactInclusive(level)(expr)
```
**Arguments**
- `expr` — Expression over the column values resulting in numeric [data types](../../../sql-reference/data-types/index.md#data_types), [Date](../../../sql-reference/data-types/date.md) or [DateTime](../../../sql-reference/data-types/datetime.md).
**Parameters**
- `level` — Level of quantile. Optional. Possible values: [0, 1] — bounds included. Default value: 0.5. At `level=0.5` the function calculates [median](https://en.wikipedia.org/wiki/Median). [Float](../../../sql-reference/data-types/float.md).
**Returned value**
- Quantile of the specified level.
Type:
- [Float64](../../../sql-reference/data-types/float.md) for numeric data type input.
- [Date](../../../sql-reference/data-types/date.md) if input values have the `Date` type.
- [DateTime](../../../sql-reference/data-types/datetime.md) if input values have the `DateTime` type.
**Example**
Query:
``` sql
CREATE TABLE num AS numbers(1000);
SELECT quantileExactInclusive(0.6)(x) FROM (SELECT number AS x FROM num);
```
Result:
``` text
┌─quantileExactInclusive(0.6)(x)─┐
│ 599.4 │
└────────────────────────────────┘
```
**See Also**
- [median](../../../sql-reference/aggregate-functions/reference/median.md#median)

View File

@ -2,8 +2,114 @@
toc_priority: 201
---
# quantiles {#quantiles}
# quantiles Functions {#quantiles-functions}
## quantiles {#quantiles}
Syntax: `quantiles(level1, level2, …)(x)`
All the quantile functions also have corresponding quantiles functions: `quantiles`, `quantilesDeterministic`, `quantilesTiming`, `quantilesTimingWeighted`, `quantilesExact`, `quantilesExactWeighted`, `quantilesTDigest`. These functions calculate all the quantiles of the listed levels in one pass, and return an array of the resulting values.
All the quantile functions also have corresponding quantiles functions: `quantiles`, `quantilesDeterministic`, `quantilesTiming`, `quantilesTimingWeighted`, `quantilesExact`, `quantilesExactWeighted`, `quantilesTDigest`, `quantilesBFloat16`. These functions calculate all the quantiles of the listed levels in one pass, and return an array of the resulting values.
## quantilesExactExclusive {#quantilesexactexclusive}
Exactly computes the [quantiles](https://en.wikipedia.org/wiki/Quantile) of a numeric data sequence.
To get exact value, all the passed values are combined into an array, which is then partially sorted. Therefore, the function consumes `O(n)` memory, where `n` is a number of values that were passed. However, for a small number of values, the function is very effective.
This function is equivalent to [PERCENTILE.EXC](https://support.microsoft.com/en-us/office/percentile-exc-function-bbaa7204-e9e1-4010-85bf-c31dc5dce4ba) Excel function, ([type R6](https://en.wikipedia.org/wiki/Quantile#Estimating_quantiles_from_a_sample)).
Works more efficiently with sets of levels than [quantileExactExclusive](../../../sql-reference/aggregate-functions/reference/quantileexact.md#quantileexactexclusive).
**Syntax**
``` sql
quantilesExactExclusive(level1, level2, ...)(expr)
```
**Arguments**
- `expr` — Expression over the column values resulting in numeric [data types](../../../sql-reference/data-types/index.md#data_types), [Date](../../../sql-reference/data-types/date.md) or [DateTime](../../../sql-reference/data-types/datetime.md).
**Parameters**
- `level` — Levels of quantiles. Possible values: (0, 1) — bounds not included. [Float](../../../sql-reference/data-types/float.md).
**Returned value**
- [Array](../../../sql-reference/data-types/array.md) of quantiles of the specified levels.
Type of array values:
- [Float64](../../../sql-reference/data-types/float.md) for numeric data type input.
- [Date](../../../sql-reference/data-types/date.md) if input values have the `Date` type.
- [DateTime](../../../sql-reference/data-types/datetime.md) if input values have the `DateTime` type.
**Example**
Query:
``` sql
CREATE TABLE num AS numbers(1000);
SELECT quantilesExactExclusive(0.25, 0.5, 0.75, 0.9, 0.95, 0.99, 0.999)(x) FROM (SELECT number AS x FROM num);
```
Result:
``` text
┌─quantilesExactExclusive(0.25, 0.5, 0.75, 0.9, 0.95, 0.99, 0.999)(x)─┐
│ [249.25,499.5,749.75,899.9,949.9499999999999,989.99,998.999] │
└─────────────────────────────────────────────────────────────────────┘
```
## quantilesExactInclusive {#quantilesexactinclusive}
Exactly computes the [quantiles](https://en.wikipedia.org/wiki/Quantile) of a numeric data sequence.
To get exact value, all the passed values are combined into an array, which is then partially sorted. Therefore, the function consumes `O(n)` memory, where `n` is a number of values that were passed. However, for a small number of values, the function is very effective.
This function is equivalent to [PERCENTILE.INC](https://support.microsoft.com/en-us/office/percentile-inc-function-680f9539-45eb-410b-9a5e-c1355e5fe2ed) Excel function, ([type R7](https://en.wikipedia.org/wiki/Quantile#Estimating_quantiles_from_a_sample)).
Works more efficiently with sets of levels than [quantileExactInclusive](../../../sql-reference/aggregate-functions/reference/quantileexact.md#quantileexactinclusive).
**Syntax**
``` sql
quantilesExactInclusive(level1, level2, ...)(expr)
```
**Arguments**
- `expr` — Expression over the column values resulting in numeric [data types](../../../sql-reference/data-types/index.md#data_types), [Date](../../../sql-reference/data-types/date.md) or [DateTime](../../../sql-reference/data-types/datetime.md).
**Parameters**
- `level` — Levels of quantiles. Possible values: [0, 1] — bounds included. [Float](../../../sql-reference/data-types/float.md).
**Returned value**
- [Array](../../../sql-reference/data-types/array.md) of quantiles of the specified levels.
Type of array values:
- [Float64](../../../sql-reference/data-types/float.md) for numeric data type input.
- [Date](../../../sql-reference/data-types/date.md) if input values have the `Date` type.
- [DateTime](../../../sql-reference/data-types/datetime.md) if input values have the `DateTime` type.
**Example**
Query:
``` sql
CREATE TABLE num AS numbers(1000);
SELECT quantilesExactInclusive(0.25, 0.5, 0.75, 0.9, 0.95, 0.99, 0.999)(x) FROM (SELECT number AS x FROM num);
```
Result:
``` text
┌─quantilesExactInclusive(0.25, 0.5, 0.75, 0.9, 0.95, 0.99, 0.999)(x)─┐
│ [249.75,499.5,749.25,899.1,949.05,989.01,998.001] │
└─────────────────────────────────────────────────────────────────────┘
```

View File

@ -8,6 +8,7 @@ toc_title: Map(key, value)
`Map(key, value)` data type stores `key:value` pairs.
**Parameters**
- `key` — The key part of the pair. [String](../../sql-reference/data-types/string.md) or [Integer](../../sql-reference/data-types/int-uint.md).
- `value` — The value part of the pair. [String](../../sql-reference/data-types/string.md), [Integer](../../sql-reference/data-types/int-uint.md) or [Array](../../sql-reference/data-types/array.md).
@ -75,6 +76,36 @@ SELECT CAST(([1, 2, 3], ['Ready', 'Steady', 'Go']), 'Map(UInt8, String)') AS map
└───────────────────────────────┘
```
## Map.keys and Map.values Subcolumns {#map-subcolumns}
To optimize `Map` column processing, in some cases you can use the `keys` and `values` subcolumns instead of reading the whole column.
**Example**
Query:
``` sql
CREATE TABLE t_map (`a` Map(String, UInt64)) ENGINE = Memory;
INSERT INTO t_map VALUES (map('key1', 1, 'key2', 2, 'key3', 3));
SELECT a.keys FROM t_map;
SELECT a.values FROM t_map;
```
Result:
``` text
┌─a.keys─────────────────┐
│ ['key1','key2','key3'] │
└────────────────────────┘
┌─a.values─┐
│ [1,2,3] │
└──────────┘
```
**See Also**
- [map()](../../sql-reference/functions/tuple-map-functions.md#function-map) function

View File

@ -57,7 +57,7 @@ In this case, ClickHouse can reload the dictionary earlier if the dictionary con
When updating the dictionaries, the ClickHouse server applies different logic depending on the type of [source](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md):
- For a text file, it checks the time of modification. If the time differs from the previously recorded time, the dictionary is updated.
- For MySQL source, the time of modification is checked using a `SHOW TABLE STATUS` query (in case of MySQL 8 you need to disable meta-information caching in MySQL by `set global information_schema_stats_expiry=0`.
- For MySQL source, the time of modification is checked using a `SHOW TABLE STATUS` query (in case of MySQL 8 you need to disable meta-information caching in MySQL by `set global information_schema_stats_expiry=0`).
- Dictionaries from other sources are updated every time by default.
For other sources (ODBC, PostgreSQL, ClickHouse, etc), you can set up a query that will update the dictionaries only if they really changed, rather than each time. To do this, follow these steps:
@ -88,13 +88,13 @@ SOURCE(ODBC(... invalidate_query 'SELECT update_time FROM dictionary_source wher
For `Cache`, `ComplexKeyCache`, `SSDCache`, and `SSDComplexKeyCache` dictionaries both synchronious and asynchronious updates are supported.
It is also possible for `Flat`, `Hashed`, `ComplexKeyHashed` dictionaries to only request data that was changed after previous update. If `update_field` is specified in as part of dictionary source configuration value of previous update time in seconds will be added to data request. Depends of source type Executable, HTTP, MySQL, PostgreSQL, ClickHouse, ODBC different logic will be applied to `update_field` before request data from external source.
It is also possible for `Flat`, `Hashed`, `ComplexKeyHashed` dictionaries to only request data that was changed after the previous update. If `update_field` is specified as part of the dictionary source configuration, value of the previous update time in seconds will be added to the data request. Depends on source type (Executable, HTTP, MySQL, PostgreSQL, ClickHouse, or ODBC) different logic will be applied to `update_field` before request data from an external source.
- If source is HTTP then `update_field` will be added as query parameter with last update time as parameter value.
- If source is Executable then `update_field` will be added as executable script argument with last update time as argument value.
- If source is ClickHouse, MySQL, PostgreSQL, ODBC there will be additional part of WHERE, where `update_field` is compared as greater or equal with last update time.
- If the source is HTTP then `update_field` will be added as a query parameter with the last update time as the parameter value.
- If the source is Executable then `update_field` will be added as an executable script argument with the last update time as the argument value.
- If the source is ClickHouse, MySQL, PostgreSQL, ODBC there will be an additional part of `WHERE`, where `update_field` is compared as greater or equal with the last update time.
If `update_field` option is set. Additional option `update_lag` can be set. Value of `update_lag` option is subtracted from previous update time before request updated data.
If `update_field` option is set, additional option `update_lag` can be set. Value of `update_lag` option is subtracted from previous update time before request updated data.
Example of settings:
@ -116,4 +116,4 @@ or
...
SOURCE(CLICKHOUSE(... update_field 'added_time' update_lag 15))
...
```
```

View File

@ -159,7 +159,7 @@ Configuration fields:
| Tag | Description | Required |
|------------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|----------|
| `name` | Column name. | Yes |
| `type` | ClickHouse data type: [UInt8](../../../sql-reference/data-types/int-uint.md), [UInt16](../../../sql-reference/data-types/int-uint.md), [UInt32](../../../sql-reference/data-types/int-uint.md), [UInt64](../../../sql-reference/data-types/int-uint.md), [Int8](../../../sql-reference/data-types/int-uint.md), [Int16](../../../sql-reference/data-types/int-uint.md), [Int32](../../../sql-reference/data-types/int-uint.md), [Int64](../../../sql-reference/data-types/int-uint.md), [Float32](../../../sql-reference/data-types/float.md), [Float64](../../../sql-reference/data-types/float.md), [UUID](../../../sql-reference/data-types/uuid.md), [Decimal32](../../../sql-reference/data-types/decimal.md), [Decimal64](../../../sql-reference/data-types/decimal.md), [Decimal128](../../../sql-reference/data-types/decimal.md), [Decimal256](../../../sql-reference/data-types/decimal.md), [String](../../../sql-reference/data-types/string.md).<br/>ClickHouse tries to cast value from dictionary to the specified data type. For example, for MySQL, the field might be `TEXT`, `VARCHAR`, or `BLOB` in the MySQL source table, but it can be uploaded as `String` in ClickHouse.<br/>[Nullable](../../../sql-reference/data-types/nullable.md) is currently supported for [Flat](external-dicts-dict-layout.md#flat), [Hashed](external-dicts-dict-layout.md#dicts-external_dicts_dict_layout-hashed), [ComplexKeyHashed](external-dicts-dict-layout.md#complex-key-hashed), [Direct](external-dicts-dict-layout.md#direct), [ComplexKeyDirect](external-dicts-dict-layout.md#complex-key-direct), [RangeHashed](external-dicts-dict-layout.md#range-hashed), [Polygon](external-dicts-dict-polygon.md), [Cache](external-dicts-dict-layout.md#cache), [ComplexKeyCache](external-dicts-dict-layout.md#complex-key-cache), [SSDCache](external-dicts-dict-layout.md#ssd-cache), [SSDComplexKeyCache](external-dicts-dict-layout.md#complex-key-ssd-cache) dictionaries. In [IPTrie](external-dicts-dict-layout.md#ip-trie) dictionaries `Nullable` types are not supported. | Yes |
| `type` | ClickHouse data type: [UInt8](../../../sql-reference/data-types/int-uint.md), [UInt16](../../../sql-reference/data-types/int-uint.md), [UInt32](../../../sql-reference/data-types/int-uint.md), [UInt64](../../../sql-reference/data-types/int-uint.md), [Int8](../../../sql-reference/data-types/int-uint.md), [Int16](../../../sql-reference/data-types/int-uint.md), [Int32](../../../sql-reference/data-types/int-uint.md), [Int64](../../../sql-reference/data-types/int-uint.md), [Float32](../../../sql-reference/data-types/float.md), [Float64](../../../sql-reference/data-types/float.md), [UUID](../../../sql-reference/data-types/uuid.md), [Decimal32](../../../sql-reference/data-types/decimal.md), [Decimal64](../../../sql-reference/data-types/decimal.md), [Decimal128](../../../sql-reference/data-types/decimal.md), [Decimal256](../../../sql-reference/data-types/decimal.md), [String](../../../sql-reference/data-types/string.md), [Array](../../../sql-reference/data-types/array.md).<br/>ClickHouse tries to cast value from dictionary to the specified data type. For example, for MySQL, the field might be `TEXT`, `VARCHAR`, or `BLOB` in the MySQL source table, but it can be uploaded as `String` in ClickHouse.<br/>[Nullable](../../../sql-reference/data-types/nullable.md) is currently supported for [Flat](external-dicts-dict-layout.md#flat), [Hashed](external-dicts-dict-layout.md#dicts-external_dicts_dict_layout-hashed), [ComplexKeyHashed](external-dicts-dict-layout.md#complex-key-hashed), [Direct](external-dicts-dict-layout.md#direct), [ComplexKeyDirect](external-dicts-dict-layout.md#complex-key-direct), [RangeHashed](external-dicts-dict-layout.md#range-hashed), [Polygon](external-dicts-dict-polygon.md), [Cache](external-dicts-dict-layout.md#cache), [ComplexKeyCache](external-dicts-dict-layout.md#complex-key-cache), [SSDCache](external-dicts-dict-layout.md#ssd-cache), [SSDComplexKeyCache](external-dicts-dict-layout.md#complex-key-ssd-cache) dictionaries. In [IPTrie](external-dicts-dict-layout.md#ip-trie) dictionaries `Nullable` types are not supported. | Yes |
| `null_value` | Default value for a non-existing element.<br/>In the example, it is an empty string. [NULL](../../syntax.md#null-literal) value can be used only for the `Nullable` types (see the previous line with types description). | Yes |
| `expression` | [Expression](../../../sql-reference/syntax.md#syntax-expressions) that ClickHouse executes on the value.<br/>The expression can be a column name in the remote SQL database. Thus, you can use it to create an alias for the remote column.<br/><br/>Default value: no expression. | No |
| <a name="hierarchical-dict-attr"></a> `hierarchical` | If `true`, the attribute contains the value of a parent key for the current key. See [Hierarchical Dictionaries](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-hierarchical.md).<br/><br/>Default value: `false`. | No |

View File

@ -11,18 +11,24 @@ Returns 1 for an empty array, or 0 for a non-empty array.
The result type is UInt8.
The function also works for strings.
Can be optimized by enabling the [optimize_functions_to_subcolumns](../../operations/settings/settings.md#optimize-functions-to-subcolumns) setting. With `optimize_functions_to_subcolumns = 1` the function reads only [size0](../../sql-reference/data-types/array.md#array-size) subcolumn instead of reading and processing the whole array column. The query `SELECT empty(arr) FROM table` transforms to `SELECT arr.size0 = 0 FROM TABLE`.
## notEmpty {#function-notempty}
Returns 0 for an empty array, or 1 for a non-empty array.
The result type is UInt8.
The function also works for strings.
Can be optimized by enabling the [optimize_functions_to_subcolumns](../../operations/settings/settings.md#optimize-functions-to-subcolumns) setting. With `optimize_functions_to_subcolumns = 1` the function reads only [size0](../../sql-reference/data-types/array.md#array-size) subcolumn instead of reading and processing the whole array column. The query `SELECT notEmpty(arr) FROM table` transforms to `SELECT arr.size0 != 0 FROM TABLE`.
## length {#array_functions-length}
Returns the number of items in the array.
The result type is UInt64.
The function also works for strings.
Can be optimized by enabling the [optimize_functions_to_subcolumns](../../operations/settings/settings.md#optimize-functions-to-subcolumns) setting. With `optimize_functions_to_subcolumns = 1` the function reads only [size0](../../sql-reference/data-types/array.md#array-size) subcolumn instead of reading and processing the whole array column. The query `SELECT length(arr) FROM table` transforms to `SELECT arr.size0 FROM TABLE`.
## emptyArrayUInt8, emptyArrayUInt16, emptyArrayUInt32, emptyArrayUInt64 {#emptyarrayuint8-emptyarrayuint16-emptyarrayuint32-emptyarrayuint64}
## emptyArrayInt8, emptyArrayInt16, emptyArrayInt32, emptyArrayInt64 {#emptyarrayint8-emptyarrayint16-emptyarrayint32-emptyarrayint64}

View File

@ -80,6 +80,7 @@ SELECT toDateTime('2019-01-01 00:00:00', 'UTC') AS time_utc,
toInt32(time_samoa) AS int32samoa
FORMAT Vertical;
```
Result:
```text
@ -1014,7 +1015,7 @@ Result:
## dateName {#dataname}
Returns part of date with specified date part.
Returns specified part of date.
**Syntax**
@ -1024,13 +1025,13 @@ dateName(date_part, date)
**Arguments**
- `date_part` - Date part. Possible values .
- `date` — Date [Date](../../sql-reference/data-types/date.md) or DateTime [DateTime](../../sql-reference/data-types/datetime.md), [DateTime64](../../sql-reference/data-types/datetime64.md).
- `date_part` — Date part. Possible values: 'year', 'quarter', 'month', 'week', 'dayofyear', 'day', 'weekday', 'hour', 'minute', 'second'. [String](../../sql-reference/data-types/string.md).
- `date` — Date. [Date](../../sql-reference/data-types/date.md), [DateTime](../../sql-reference/data-types/datetime.md) or [DateTime64](../../sql-reference/data-types/datetime64.md).
- `timezone` — Timezone. Optional. [String](../../sql-reference/data-types/string.md).
**Returned value**
- Specified date part of date.
- The specified part of date.
Type: [String](../../sql-reference/data-types/string.md#string)

View File

@ -224,7 +224,7 @@ Accepts an integer. Returns an array of UInt64 numbers containing the list of po
## bitPositionsToArray(num) {#bitpositionstoarraynum}
Accepts an integer, argument will be converted to unsigned integer type. Returns an array of UInt64 numbers containing the list of positions of bits that equals 1. Numbers in the array are in ascending order.
Accepts an integer and converts it to an unsigned integer. Returns an array of `UInt64` numbers containing the list of positions of bits of `arg` that equal `1`, in ascending order.
**Syntax**
@ -234,11 +234,13 @@ bitPositionsToArray(arg)
**Arguments**
- `arg` — Integer value.Types: [Int/UInt](../../sql-reference/data-types/int-uint.md)
- `arg` — Integer value. [Int/UInt](../../sql-reference/data-types/int-uint.md).
**Returned value**
An array of UInt64 numbers containing the list of positions of bits that equals 1. Numbers in the array are in ascending order.
- An array containing a list of positions of bits that equal `1`, in ascending order.
Type: [Array](../../sql-reference/data-types/array.md)([UInt64](../../sql-reference/data-types/int-uint.md)).
**Example**

View File

@ -70,23 +70,23 @@ Result:
Collect all the keys and sum corresponding values.
**Syntax**
**Syntax**
``` sql
mapAdd(Tuple(Array, Array), Tuple(Array, Array) [, ...])
mapAdd(arg1, arg2 [, ...])
```
**Arguments**
**Arguments**
Arguments are [tuples](../../sql-reference/data-types/tuple.md#tuplet1-t2) of two [arrays](../../sql-reference/data-types/array.md#data-type-array), where items in the first array represent keys, and the second array contains values for the each key. All key arrays should have same type, and all value arrays should contain items which are promote to the one type ([Int64](../../sql-reference/data-types/int-uint.md#int-ranges), [UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges) or [Float64](../../sql-reference/data-types/float.md#float32-float64)). The common promoted type is used as a type for the result array.
Arguments are [maps](../../sql-reference/data-types/map.md) or [tuples](../../sql-reference/data-types/tuple.md#tuplet1-t2) of two [arrays](../../sql-reference/data-types/array.md#data-type-array), where items in the first array represent keys, and the second array contains values for the each key. All key arrays should have same type, and all value arrays should contain items which are promote to the one type ([Int64](../../sql-reference/data-types/int-uint.md#int-ranges), [UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges) or [Float64](../../sql-reference/data-types/float.md#float32-float64)). The common promoted type is used as a type for the result array.
**Returned value**
- Returns one [tuple](../../sql-reference/data-types/tuple.md#tuplet1-t2), where the first array contains the sorted keys and the second array contains values.
- Depending on the arguments returns one [map](../../sql-reference/data-types/map.md) or [tuple](../../sql-reference/data-types/tuple.md#tuplet1-t2), where the first array contains the sorted keys and the second array contains values.
**Example**
Query:
Query with a tuple map:
``` sql
SELECT mapAdd(([toUInt8(1), 2], [1, 1]), ([toUInt8(1), 2], [1, 1])) as res, toTypeName(res) as type;
@ -100,6 +100,11 @@ Result:
└───────────────┴────────────────────────────────────┘
```
Query with `Map` type:
``` sql
```
## mapSubtract {#function-mapsubtract}
Collect all the keys and subtract corresponding values.
@ -220,6 +225,8 @@ Result:
Returns all keys from the `map` parameter.
Can be optimized by enabling the [optimize_functions_to_subcolumns](../../operations/settings/settings.md#optimize-functions-to-subcolumns) setting. With `optimize_functions_to_subcolumns = 1` the function reads only [keys](../../sql-reference/data-types/map.md#map-subcolumns) subcolumn instead of reading and processing the whole column data. The query `SELECT mapKeys(m) FROM table` transforms to `SELECT m.keys FROM table`.
**Syntax**
```sql
@ -261,6 +268,8 @@ Result:
Returns all values from the `map` parameter.
Can be optimized by enabling the [optimize_functions_to_subcolumns](../../operations/settings/settings.md#optimize-functions-to-subcolumns) setting. With `optimize_functions_to_subcolumns = 1` the function reads only [values](../../sql-reference/data-types/map.md#map-subcolumns) subcolumn instead of reading and processing the whole column data. The query `SELECT mapValues(m) FROM table` transforms to `SELECT m.values FROM table`.
**Syntax**
```sql

View File

@ -283,6 +283,8 @@ ClickHouse supports the `IS NULL` and `IS NOT NULL` operators.
- `0` otherwise.
- For other values, the `IS NULL` operator always returns `0`.
Can be optimized by enabling the [optimize_functions_to_subcolumns](../../operations/settings/settings.md#optimize-functions-to-subcolumns) setting. With `optimize_functions_to_subcolumns = 1` the function reads only [null](../../sql-reference/data-types/nullable.md#finding-null) subcolumn instead of reading and processing the whole column data. The query `SELECT n IS NULL FROM table` transforms to `SELECT n.null FROM TABLE`.
<!-- -->
``` sql
@ -313,3 +315,5 @@ SELECT * FROM t_null WHERE y IS NOT NULL
│ 2 │ 3 │
└───┴───┘
```
Can be optimized by enabling the [optimize_functions_to_subcolumns](../../operations/settings/settings.md#optimize-functions-to-subcolumns) setting. With `optimize_functions_to_subcolumns = 1` the function reads only [null](../../sql-reference/data-types/nullable.md#finding-null) subcolumn instead of reading and processing the whole column data. The query `SELECT n IS NOT NULL FROM table` transforms to `SELECT NOT n.null FROM TABLE`.

View File

@ -8,7 +8,7 @@ toc_title: INDEX
The following operations are available:
- `ALTER TABLE [db].name ADD INDEX name expression TYPE type GRANULARITY value AFTER name [AFTER name2]` - Adds index description to tables metadata.
- `ALTER TABLE [db].name ADD INDEX name expression TYPE type GRANULARITY value [FIRST|AFTER name]` - Adds index description to tables metadata.
- `ALTER TABLE [db].name DROP INDEX name` - Removes index description from tables metadata and deletes index files from disk.

View File

@ -19,6 +19,8 @@ The following operations with [partitions](../../../engines/table-engines/merget
- [UNFREEZE PARTITION](#alter_unfreeze-partition) — Removes a backup of a partition.
- [FETCH PARTITION\|PART](#alter_fetch-partition) — Downloads a part or partition from another server.
- [MOVE PARTITION\|PART](#alter_move-partition) — Move partition/data part to another disk or volume.
- [UPDATE IN PARTITION](#update-in-partition) — Update data inside the partition by condition.
- [DELETE IN PARTITION](#delete-in-partition) — Delete data inside the partition by condition.
<!-- -->

View File

@ -0,0 +1,48 @@
---
toc_priority: 55
toc_title: s3Cluster
---
# s3Cluster Table Function {#s3Cluster-table-function}
Allows processing files from [Amazon S3](https://aws.amazon.com/s3/) in parallel from many nodes in a specified cluster. On initiator it creates a connection to all nodes in the cluster, discloses asterics in S3 file path, and dispatches each file dynamically. On the worker node it asks the initiator about the next task to process and processes it. This is repeated until all tasks are finished.
**Syntax**
``` sql
s3Cluster(cluster_name, source, [access_key_id, secret_access_key,] format, structure)
```
**Arguments**
- `cluster_name` — Name of a cluster that is used to build a set of addresses and connection parameters to remote and local servers.
- `source` — URL to a file or a bunch of files. Supports following wildcards in readonly mode: `*`, `?`, `{'abc','def'}` and `{N..M}` where `N`, `M` — numbers, `abc`, `def` — strings. For more information see [Wildcards In Path](../../engines/table-engines/integrations/s3.md#wildcards-in-path).
- `access_key_id` and `secret_access_key` — Keys that specify credentials to use with given endpoint. Optional.
- `format` — The [format](../../interfaces/formats.md#formats) of the file.
- `structure` — Structure of the table. Format `'column1_name column1_type, column2_name column2_type, ...'`.
**Returned value**
A table with the specified structure for reading or writing data in the specified file.
**Examples**
Select the data from all files in the cluster `cluster_simple`:
``` sql
SELECT * FROM s3Cluster('cluster_simple', 'http://minio1:9001/root/data/{clickhouse,database}/*', 'minio', 'minio123', 'CSV', 'name String, value UInt32, polygon Array(Array(Tuple(Float64, Float64)))') ORDER BY (name, value, polygon);
```
Count the total amount of rows in all files in the cluster `cluster_simple`:
``` sql
SELECT count(*) FROM s3Cluster('cluster_simple', 'http://minio1:9001/root/data/{clickhouse,database}/*', 'minio', 'minio123', 'CSV', 'name String, value UInt32, polygon Array(Array(Tuple(Float64, Float64)))');
```
!!! warning "Warning"
If your listing of files contains number ranges with leading zeros, use the construction with braces for each digit separately or use `?`.
**See Also**
- [S3 engine](../../engines/table-engines/integrations/s3.md)
- [s3 table function](../../sql-reference/table-functions/s3.md)

Some files were not shown because too many files have changed in this diff Show More