Merge branch 'master' into fix_no_column_materialized_mysql

This commit is contained in:
Alexander Tokmakov 2021-07-05 22:07:56 +03:00
commit 0f377fc4d8
843 changed files with 20239 additions and 39821 deletions

3
.gitmodules vendored
View File

@ -168,9 +168,6 @@
[submodule "contrib/fmtlib"]
path = contrib/fmtlib
url = https://github.com/fmtlib/fmt.git
[submodule "contrib/antlr4-runtime"]
path = contrib/antlr4-runtime
url = https://github.com/ClickHouse-Extras/antlr4-runtime.git
[submodule "contrib/sentry-native"]
path = contrib/sentry-native
url = https://github.com/ClickHouse-Extras/sentry-native.git

View File

@ -184,10 +184,27 @@ endif ()
set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -rdynamic")
find_program (OBJCOPY_PATH NAMES "llvm-objcopy" "llvm-objcopy-12" "llvm-objcopy-11" "llvm-objcopy-10" "llvm-objcopy-9" "llvm-objcopy-8" "objcopy")
if (NOT OBJCOPY_PATH AND OS_DARWIN)
find_program (BREW_PATH NAMES "brew")
if (BREW_PATH)
execute_process (COMMAND ${BREW_PATH} --prefix llvm ERROR_QUIET OUTPUT_STRIP_TRAILING_WHITESPACE OUTPUT_VARIABLE LLVM_PREFIX)
if (LLVM_PREFIX)
find_program (OBJCOPY_PATH NAMES "llvm-objcopy" PATHS "${LLVM_PREFIX}/bin" NO_DEFAULT_PATH)
endif ()
if (NOT OBJCOPY_PATH)
execute_process (COMMAND ${BREW_PATH} --prefix binutils ERROR_QUIET OUTPUT_STRIP_TRAILING_WHITESPACE OUTPUT_VARIABLE BINUTILS_PREFIX)
if (BINUTILS_PREFIX)
find_program (OBJCOPY_PATH NAMES "objcopy" PATHS "${BINUTILS_PREFIX}/bin" NO_DEFAULT_PATH)
endif ()
endif ()
endif ()
endif ()
if (OBJCOPY_PATH)
message(STATUS "Using objcopy: ${OBJCOPY_PATH}.")
message (STATUS "Using objcopy: ${OBJCOPY_PATH}")
else ()
message(FATAL_ERROR "Cannot find objcopy.")
message (FATAL_ERROR "Cannot find objcopy.")
endif ()
if (OS_DARWIN)

View File

@ -8,7 +8,7 @@ ClickHouse® is an open-source column-oriented database management system that a
* [Tutorial](https://clickhouse.tech/docs/en/getting_started/tutorial/) shows how to set up and query small ClickHouse cluster.
* [Documentation](https://clickhouse.tech/docs/en/) provides more in-depth information.
* [YouTube channel](https://www.youtube.com/c/ClickHouseDB) has a lot of content about ClickHouse in video format.
* [Slack](https://join.slack.com/t/clickhousedb/shared_invite/zt-qfort0u8-TWqK4wIP0YSdoDE0btKa1w) and [Telegram](https://telegram.me/clickhouse_en) allow to chat with ClickHouse users in real-time.
* [Slack](https://join.slack.com/t/clickhousedb/shared_invite/zt-rxm3rdrk-lIUmhLC3V8WTaL0TGxsOmg) and [Telegram](https://telegram.me/clickhouse_en) allow to chat with ClickHouse users in real-time.
* [Blog](https://clickhouse.yandex/blog/en/) contains various ClickHouse-related articles, as well as announcements and reports about events.
* [Code Browser](https://clickhouse.tech/codebrowser/html_report/ClickHouse/index.html) with syntax highlight and navigation.
* [Contacts](https://clickhouse.tech/#contacts) can help to get your questions answered if there are any.

View File

@ -17,7 +17,7 @@ class DateLUT : private boost::noncopyable
{
public:
/// Return singleton DateLUTImpl instance for the default time zone.
static ALWAYS_INLINE const DateLUTImpl & instance()
static ALWAYS_INLINE const DateLUTImpl & instance() // -V1071
{
const auto & date_lut = getInstance();
return *date_lut.default_impl.load(std::memory_order_acquire);

View File

@ -119,11 +119,16 @@ private:
}
public:
/// We use Int64 instead of time_t because time_t is mapped to the different types (long or long long)
/// on Linux and Darwin (on both of them, long and long long are 64 bit and behaves identically,
/// but they are different types in C++ and this affects function overload resolution).
using Time = Int64;
/// The order of fields matters for alignment and sizeof.
struct Values
{
/// time_t at beginning of the day.
Int64 date;
/// Time at beginning of the day.
Time date;
/// Properties of the day.
UInt16 year;
@ -182,20 +187,20 @@ private:
LUTIndex years_months_lut[DATE_LUT_YEARS * 12];
/// UTC offset at beginning of the Unix epoch. The same as unix timestamp of 1970-01-01 00:00:00 local time.
time_t offset_at_start_of_epoch;
Time offset_at_start_of_epoch;
/// UTC offset at the beginning of the first supported year.
time_t offset_at_start_of_lut;
Time offset_at_start_of_lut;
bool offset_is_whole_number_of_hours_during_epoch;
/// Time zone name.
std::string time_zone;
inline LUTIndex findIndex(time_t t) const
inline LUTIndex findIndex(Time t) const
{
/// First guess.
Int64 guess = (t / 86400) + daynum_offset_epoch;
Time guess = (t / 86400) + daynum_offset_epoch;
/// For negative time_t the integer division was rounded up, so the guess is offset by one.
/// For negative Time the integer division was rounded up, so the guess is offset by one.
if (unlikely(t < 0))
--guess;
@ -227,7 +232,7 @@ private:
return LUTIndex{static_cast<UInt32>(d + daynum_offset_epoch) & date_lut_mask};
}
inline LUTIndex toLUTIndex(time_t t) const
inline LUTIndex toLUTIndex(Time t) const
{
return findIndex(t);
}
@ -280,7 +285,7 @@ public:
/// Round down to start of monday.
template <typename DateOrTime>
inline time_t toFirstDayOfWeek(DateOrTime v) const
inline Time toFirstDayOfWeek(DateOrTime v) const
{
const LUTIndex i = toLUTIndex(v);
return lut[i - (lut[i].day_of_week - 1)].date;
@ -295,7 +300,7 @@ public:
/// Round down to start of month.
template <typename DateOrTime>
inline time_t toFirstDayOfMonth(DateOrTime v) const
inline Time toFirstDayOfMonth(DateOrTime v) const
{
const LUTIndex i = toLUTIndex(v);
return lut[i - (lut[i].day_of_month - 1)].date;
@ -332,13 +337,13 @@ public:
}
template <typename DateOrTime>
inline time_t toFirstDayOfQuarter(DateOrTime v) const
inline Time toFirstDayOfQuarter(DateOrTime v) const
{
return toDate(toFirstDayOfQuarterIndex(v));
}
/// Round down to start of year.
inline time_t toFirstDayOfYear(time_t t) const
inline Time toFirstDayOfYear(Time t) const
{
return lut[years_lut[lut[findIndex(t)].year - DATE_LUT_MIN_YEAR]].date;
}
@ -355,14 +360,14 @@ public:
return toDayNum(toFirstDayNumOfYearIndex(v));
}
inline time_t toFirstDayOfNextMonth(time_t t) const
inline Time toFirstDayOfNextMonth(Time t) const
{
LUTIndex index = findIndex(t);
index += 32 - lut[index].day_of_month;
return lut[index - (lut[index].day_of_month - 1)].date;
}
inline time_t toFirstDayOfPrevMonth(time_t t) const
inline Time toFirstDayOfPrevMonth(Time t) const
{
LUTIndex index = findIndex(t);
index -= lut[index].day_of_month;
@ -389,16 +394,16 @@ public:
/** Round to start of day, then shift for specified amount of days.
*/
inline time_t toDateAndShift(time_t t, Int32 days) const
inline Time toDateAndShift(Time t, Int32 days) const
{
return lut[findIndex(t) + days].date;
}
inline time_t toTime(time_t t) const
inline Time toTime(Time t) const
{
const LUTIndex index = findIndex(t);
time_t res = t - lut[index].date;
Time res = t - lut[index].date;
if (res >= lut[index].time_at_offset_change())
res += lut[index].amount_of_offset_change();
@ -406,11 +411,11 @@ public:
return res - offset_at_start_of_epoch; /// Starting at 1970-01-01 00:00:00 local time.
}
inline unsigned toHour(time_t t) const
inline unsigned toHour(Time t) const
{
const LUTIndex index = findIndex(t);
time_t time = t - lut[index].date;
Time time = t - lut[index].date;
if (time >= lut[index].time_at_offset_change())
time += lut[index].amount_of_offset_change();
@ -426,7 +431,7 @@ public:
* then subtract the former from the latter to get the offset result.
* The boundaries when meets DST(daylight saving time) change should be handled very carefully.
*/
inline time_t timezoneOffset(time_t t) const
inline Time timezoneOffset(Time t) const
{
const LUTIndex index = findIndex(t);
@ -434,7 +439,7 @@ public:
/// Because the "amount_of_offset_change" in LUT entry only exists in the change day, it's costly to scan it from the very begin.
/// but we can figure out all the accumulated offsets from 1970-01-01 to that day just by get the whole difference between lut[].date,
/// and then, we can directly subtract multiple 86400s to get the real DST offsets for the leap seconds is not considered now.
time_t res = (lut[index].date - lut[daynum_offset_epoch].date) % 86400;
Time res = (lut[index].date - lut[daynum_offset_epoch].date) % 86400;
/// As so far to know, the maximal DST offset couldn't be more than 2 hours, so after the modulo operation the remainder
/// will sits between [-offset --> 0 --> offset] which respectively corresponds to moving clock forward or backward.
@ -448,7 +453,7 @@ public:
}
inline unsigned toSecond(time_t t) const
inline unsigned toSecond(Time t) const
{
auto res = t % 60;
if (likely(res >= 0))
@ -456,7 +461,7 @@ public:
return res + 60;
}
inline unsigned toMinute(time_t t) const
inline unsigned toMinute(Time t) const
{
if (t >= 0 && offset_is_whole_number_of_hours_during_epoch)
return (t / 60) % 60;
@ -474,27 +479,27 @@ public:
}
/// NOTE: Assuming timezone offset is a multiple of 15 minutes.
inline time_t toStartOfMinute(time_t t) const { return roundDown(t, 60); }
inline time_t toStartOfFiveMinute(time_t t) const { return roundDown(t, 300); }
inline time_t toStartOfFifteenMinutes(time_t t) const { return roundDown(t, 900); }
inline Time toStartOfMinute(Time t) const { return roundDown(t, 60); }
inline Time toStartOfFiveMinute(Time t) const { return roundDown(t, 300); }
inline Time toStartOfFifteenMinutes(Time t) const { return roundDown(t, 900); }
inline time_t toStartOfTenMinutes(time_t t) const
inline Time toStartOfTenMinutes(Time t) const
{
if (t >= 0 && offset_is_whole_number_of_hours_during_epoch)
return t / 600 * 600;
/// More complex logic is for Nepal - it has offset 05:45. Australia/Eucla is also unfortunate.
Int64 date = find(t).date;
Time date = find(t).date;
return date + (t - date) / 600 * 600;
}
/// NOTE: Assuming timezone transitions are multiple of hours. Lord Howe Island in Australia is a notable exception.
inline time_t toStartOfHour(time_t t) const
inline Time toStartOfHour(Time t) const
{
if (t >= 0 && offset_is_whole_number_of_hours_during_epoch)
return t / 3600 * 3600;
Int64 date = find(t).date;
Time date = find(t).date;
return date + (t - date) / 3600 * 3600;
}
@ -506,11 +511,11 @@ public:
* because the same calendar day starts/ends at different timestamps in different time zones)
*/
inline time_t fromDayNum(DayNum d) const { return lut[toLUTIndex(d)].date; }
inline time_t fromDayNum(ExtendedDayNum d) const { return lut[toLUTIndex(d)].date; }
inline Time fromDayNum(DayNum d) const { return lut[toLUTIndex(d)].date; }
inline Time fromDayNum(ExtendedDayNum d) const { return lut[toLUTIndex(d)].date; }
template <typename DateOrTime>
inline time_t toDate(DateOrTime v) const { return lut[toLUTIndex(v)].date; }
inline Time toDate(DateOrTime v) const { return lut[toLUTIndex(v)].date; }
template <typename DateOrTime>
inline unsigned toMonth(DateOrTime v) const { return lut[toLUTIndex(v)].month; }
@ -578,7 +583,7 @@ public:
return toDayNum(toFirstDayNumOfISOYearIndex(v));
}
inline time_t toFirstDayOfISOYear(time_t t) const
inline Time toFirstDayOfISOYear(Time t) const
{
return lut[toFirstDayNumOfISOYearIndex(t)].date;
}
@ -773,7 +778,7 @@ public:
}
/// We count all hour-length intervals, unrelated to offset changes.
inline time_t toRelativeHourNum(time_t t) const
inline Time toRelativeHourNum(Time t) const
{
if (t >= 0 && offset_is_whole_number_of_hours_during_epoch)
return t / 3600;
@ -784,18 +789,18 @@ public:
}
template <typename DateOrTime>
inline time_t toRelativeHourNum(DateOrTime v) const
inline Time toRelativeHourNum(DateOrTime v) const
{
return toRelativeHourNum(lut[toLUTIndex(v)].date);
}
inline time_t toRelativeMinuteNum(time_t t) const
inline Time toRelativeMinuteNum(Time t) const
{
return (t + DATE_LUT_ADD) / 60 - (DATE_LUT_ADD / 60);
}
template <typename DateOrTime>
inline time_t toRelativeMinuteNum(DateOrTime v) const
inline Time toRelativeMinuteNum(DateOrTime v) const
{
return toRelativeMinuteNum(lut[toLUTIndex(v)].date);
}
@ -842,14 +847,14 @@ public:
return ExtendedDayNum(4 + (d - 4) / days * days);
}
inline time_t toStartOfDayInterval(ExtendedDayNum d, UInt64 days) const
inline Time toStartOfDayInterval(ExtendedDayNum d, UInt64 days) const
{
if (days == 1)
return toDate(d);
return lut[toLUTIndex(ExtendedDayNum(d / days * days))].date;
}
inline time_t toStartOfHourInterval(time_t t, UInt64 hours) const
inline Time toStartOfHourInterval(Time t, UInt64 hours) const
{
if (hours == 1)
return toStartOfHour(t);
@ -867,7 +872,7 @@ public:
const LUTIndex index = findIndex(t);
const Values & values = lut[index];
time_t time = t - values.date;
Time time = t - values.date;
if (time >= values.time_at_offset_change())
{
/// Align to new hour numbers before rounding.
@ -892,7 +897,7 @@ public:
return values.date + time;
}
inline time_t toStartOfMinuteInterval(time_t t, UInt64 minutes) const
inline Time toStartOfMinuteInterval(Time t, UInt64 minutes) const
{
if (minutes == 1)
return toStartOfMinute(t);
@ -909,7 +914,7 @@ public:
return roundDown(t, seconds);
}
inline time_t toStartOfSecondInterval(time_t t, UInt64 seconds) const
inline Time toStartOfSecondInterval(Time t, UInt64 seconds) const
{
if (seconds == 1)
return t;
@ -934,14 +939,14 @@ public:
return toDayNum(makeLUTIndex(year, month, day_of_month));
}
inline time_t makeDate(Int16 year, UInt8 month, UInt8 day_of_month) const
inline Time makeDate(Int16 year, UInt8 month, UInt8 day_of_month) const
{
return lut[makeLUTIndex(year, month, day_of_month)].date;
}
/** Does not accept daylight saving time as argument: in case of ambiguity, it choose greater timestamp.
*/
inline time_t makeDateTime(Int16 year, UInt8 month, UInt8 day_of_month, UInt8 hour, UInt8 minute, UInt8 second) const
inline Time makeDateTime(Int16 year, UInt8 month, UInt8 day_of_month, UInt8 hour, UInt8 minute, UInt8 second) const
{
size_t index = makeLUTIndex(year, month, day_of_month);
UInt32 time_offset = hour * 3600 + minute * 60 + second;
@ -969,7 +974,7 @@ public:
return values.year * 10000 + values.month * 100 + values.day_of_month;
}
inline time_t YYYYMMDDToDate(UInt32 num) const
inline Time YYYYMMDDToDate(UInt32 num) const
{
return makeDate(num / 10000, num / 100 % 100, num % 100);
}
@ -1000,13 +1005,13 @@ public:
TimeComponents time;
};
inline DateComponents toDateComponents(time_t t) const
inline DateComponents toDateComponents(Time t) const
{
const Values & values = getValues(t);
return { values.year, values.month, values.day_of_month };
}
inline DateTimeComponents toDateTimeComponents(time_t t) const
inline DateTimeComponents toDateTimeComponents(Time t) const
{
const LUTIndex index = findIndex(t);
const Values & values = lut[index];
@ -1017,7 +1022,7 @@ public:
res.date.month = values.month;
res.date.day = values.day_of_month;
time_t time = t - values.date;
Time time = t - values.date;
if (time >= values.time_at_offset_change())
time += values.amount_of_offset_change();
@ -1042,7 +1047,7 @@ public:
}
inline UInt64 toNumYYYYMMDDhhmmss(time_t t) const
inline UInt64 toNumYYYYMMDDhhmmss(Time t) const
{
DateTimeComponents components = toDateTimeComponents(t);
@ -1055,7 +1060,7 @@ public:
+ UInt64(components.date.year) * 10000000000;
}
inline time_t YYYYMMDDhhmmssToTime(UInt64 num) const
inline Time YYYYMMDDhhmmssToTime(UInt64 num) const
{
return makeDateTime(
num / 10000000000,
@ -1069,12 +1074,12 @@ public:
/// Adding calendar intervals.
/// Implementation specific behaviour when delta is too big.
inline NO_SANITIZE_UNDEFINED time_t addDays(time_t t, Int64 delta) const
inline NO_SANITIZE_UNDEFINED Time addDays(Time t, Int64 delta) const
{
const LUTIndex index = findIndex(t);
const Values & values = lut[index];
time_t time = t - values.date;
Time time = t - values.date;
if (time >= values.time_at_offset_change())
time += values.amount_of_offset_change();
@ -1086,7 +1091,7 @@ public:
return lut[new_index].date + time;
}
inline NO_SANITIZE_UNDEFINED time_t addWeeks(time_t t, Int64 delta) const
inline NO_SANITIZE_UNDEFINED Time addWeeks(Time t, Int64 delta) const
{
return addDays(t, delta * 7);
}
@ -1131,14 +1136,14 @@ public:
/// If resulting month has less deys than source month, then saturation can happen.
/// Example: 31 Aug + 1 month = 30 Sep.
inline time_t NO_SANITIZE_UNDEFINED addMonths(time_t t, Int64 delta) const
inline Time NO_SANITIZE_UNDEFINED addMonths(Time t, Int64 delta) const
{
const auto result_day = addMonthsIndex(t, delta);
const LUTIndex index = findIndex(t);
const Values & values = lut[index];
time_t time = t - values.date;
Time time = t - values.date;
if (time >= values.time_at_offset_change())
time += values.amount_of_offset_change();
@ -1153,7 +1158,7 @@ public:
return toDayNum(addMonthsIndex(d, delta));
}
inline time_t NO_SANITIZE_UNDEFINED addQuarters(time_t t, Int64 delta) const
inline Time NO_SANITIZE_UNDEFINED addQuarters(Time t, Int64 delta) const
{
return addMonths(t, delta * 3);
}
@ -1180,14 +1185,14 @@ public:
}
/// Saturation can occur if 29 Feb is mapped to non-leap year.
inline time_t addYears(time_t t, Int64 delta) const
inline Time addYears(Time t, Int64 delta) const
{
auto result_day = addYearsIndex(t, delta);
const LUTIndex index = findIndex(t);
const Values & values = lut[index];
time_t time = t - values.date;
Time time = t - values.date;
if (time >= values.time_at_offset_change())
time += values.amount_of_offset_change();
@ -1203,7 +1208,7 @@ public:
}
inline std::string timeToString(time_t t) const
inline std::string timeToString(Time t) const
{
DateTimeComponents components = toDateTimeComponents(t);
@ -1228,7 +1233,7 @@ public:
return s;
}
inline std::string dateToString(time_t t) const
inline std::string dateToString(Time t) const
{
const Values & values = getValues(t);

View File

@ -0,0 +1,41 @@
#include <functional>
/** Adapt functor to static method where functor passed as context.
* Main use case to convert lambda into function that can be passed into JIT code.
*/
template <typename Functor>
class FunctorToStaticMethodAdaptor : public FunctorToStaticMethodAdaptor<decltype(&Functor::operator())>
{
};
template <typename R, typename C, typename ...Args>
class FunctorToStaticMethodAdaptor<R (C::*)(Args...) const>
{
public:
static R call(C * ptr, Args &&... arguments)
{
return std::invoke(&C::operator(), ptr, std::forward<Args>(arguments)...);
}
static R unsafeCall(char * ptr, Args &&... arguments)
{
C * ptr_typed = reinterpret_cast<C*>(ptr);
return std::invoke(&C::operator(), ptr_typed, std::forward<Args>(arguments)...);
}
};
template <typename R, typename C, typename ...Args>
class FunctorToStaticMethodAdaptor<R (C::*)(Args...)>
{
public:
static R call(C * ptr, Args &&... arguments)
{
return std::invoke(&C::operator(), ptr, std::forward<Args>(arguments)...);
}
static R unsafeCall(char * ptr, Args &&... arguments)
{
C * ptr_typed = static_cast<C*>(ptr);
return std::invoke(&C::operator(), ptr_typed, std::forward<Args>(arguments)...);
}
};

View File

@ -1,8 +1,9 @@
#include <common/ReplxxLineReader.h>
#include <common/errnoToString.h>
#include <errno.h>
#include <string.h>
#include <chrono>
#include <cerrno>
#include <cstring>
#include <unistd.h>
#include <functional>
#include <sys/file.h>
@ -24,6 +25,94 @@ void trim(String & s)
s.erase(std::find_if(s.rbegin(), s.rend(), [](int ch) { return !std::isspace(ch); }).base(), s.end());
}
/// Copied from replxx::src/util.cxx::now_ms_str() under the terms of 3-clause BSD license of Replxx.
/// Copyright (c) 2017-2018, Marcin Konarski (amok at codestation.org)
/// Copyright (c) 2010, Salvatore Sanfilippo (antirez at gmail dot com)
/// Copyright (c) 2010, Pieter Noordhuis (pcnoordhuis at gmail dot com)
std::string replxx_now_ms_str()
{
std::chrono::milliseconds ms(std::chrono::duration_cast<std::chrono::milliseconds>(std::chrono::system_clock::now().time_since_epoch()));
time_t t = ms.count() / 1000;
tm broken;
if (!localtime_r(&t, &broken))
{
return std::string();
}
static int const BUFF_SIZE(32);
char str[BUFF_SIZE];
strftime(str, BUFF_SIZE, "%Y-%m-%d %H:%M:%S.", &broken);
snprintf(str + sizeof("YYYY-mm-dd HH:MM:SS"), 5, "%03d", static_cast<int>(ms.count() % 1000));
return str;
}
/// Convert from readline to replxx format.
///
/// replxx requires each history line to prepended with time line:
///
/// ### YYYY-MM-DD HH:MM:SS.SSS
/// select 1
///
/// And w/o those service lines it will load all lines from history file as
/// one history line for suggestion. And if there are lots of lines in file it
/// will take lots of time (getline() + tons of reallocations).
///
/// NOTE: this code uses std::ifstream/std::ofstream like original replxx code.
void convertHistoryFile(const std::string & path, replxx::Replxx & rx)
{
std::ifstream in(path);
if (!in)
{
rx.print("Cannot open %s reading (for conversion): %s\n",
path.c_str(), errnoToString(errno).c_str());
return;
}
std::string line;
if (!getline(in, line).good())
{
rx.print("Cannot read from %s (for conversion): %s\n",
path.c_str(), errnoToString(errno).c_str());
return;
}
/// This is the marker of the date, no need to convert.
static char const REPLXX_TIMESTAMP_PATTERN[] = "### dddd-dd-dd dd:dd:dd.ddd";
if (line.starts_with("### ") && line.size() == strlen(REPLXX_TIMESTAMP_PATTERN))
{
return;
}
std::vector<std::string> lines;
in.seekg(0);
while (getline(in, line).good())
{
lines.push_back(line);
}
in.close();
size_t lines_size = lines.size();
std::sort(lines.begin(), lines.end());
lines.erase(std::unique(lines.begin(), lines.end()), lines.end());
rx.print("The history file (%s) is in old format. %zu lines, %zu unique lines.\n",
path.c_str(), lines_size, lines.size());
std::ofstream out(path);
if (!out)
{
rx.print("Cannot open %s for writing (for conversion): %s\n",
path.c_str(), errnoToString(errno).c_str());
return;
}
const std::string & timestamp = replxx_now_ms_str();
for (const auto & out_line : lines)
{
out << "### " << timestamp << "\n" << out_line << std::endl;
}
out.close();
}
}
ReplxxLineReader::ReplxxLineReader(
@ -47,6 +136,8 @@ ReplxxLineReader::ReplxxLineReader(
}
else
{
convertHistoryFile(history_file_path, rx);
if (flock(history_file_fd, LOCK_SH))
{
rx.print("Shared lock of history file failed: %s\n", errnoToString(errno).c_str());

View File

@ -1,9 +1,12 @@
# This strings autochanged from release_lib.sh:
SET(VERSION_REVISION 54452)
# This variables autochanged by release_lib.sh:
# NOTE: has nothing common with DBMS_TCP_PROTOCOL_VERSION,
# only DBMS_TCP_PROTOCOL_VERSION should be incremented on protocol changes.
SET(VERSION_REVISION 54453)
SET(VERSION_MAJOR 21)
SET(VERSION_MINOR 7)
SET(VERSION_MINOR 8)
SET(VERSION_PATCH 1)
SET(VERSION_GITHASH 976ccc2e908ac3bc28f763bfea8134ea0a121b40)
SET(VERSION_DESCRIBE v21.7.1.1-prestable)
SET(VERSION_STRING 21.7.1.1)
SET(VERSION_GITHASH fb895056568e26200629c7d19626e92d2dedc70d)
SET(VERSION_DESCRIBE v21.8.1.1-prestable)
SET(VERSION_STRING 21.8.1.1)
# end of autochange

View File

@ -33,44 +33,25 @@ macro(clickhouse_embed_binaries)
message(FATAL_ERROR "The list of binary resources to embed may not be empty")
endif()
# If cross-compiling, ensure we use the toolchain file and target the
# actual target architecture
if (CMAKE_CROSSCOMPILING)
set(CROSS_COMPILE_FLAGS "--target=${CMAKE_C_COMPILER_TARGET} --gcc-toolchain=${TOOLCHAIN_FILE}")
else()
set(CROSS_COMPILE_FLAGS "")
endif()
add_library("${EMBED_TARGET}" STATIC)
set_target_properties("${EMBED_TARGET}" PROPERTIES LINKER_LANGUAGE C)
set(EMBED_TEMPLATE_FILE "${PROJECT_SOURCE_DIR}/programs/embed_binary.S.in")
set(RESOURCE_OBJS)
foreach(RESOURCE_FILE ${EMBED_RESOURCES})
set(RESOURCE_OBJ "${RESOURCE_FILE}.o")
list(APPEND RESOURCE_OBJS "${RESOURCE_OBJ}")
# Normalize the name of the resource
foreach(RESOURCE_FILE ${EMBED_RESOURCES})
set(ASSEMBLY_FILE_NAME "${RESOURCE_FILE}.S")
set(BINARY_FILE_NAME "${RESOURCE_FILE}")
# Normalize the name of the resource.
string(REGEX REPLACE "[\./-]" "_" SYMBOL_NAME "${RESOURCE_FILE}") # - must be last in regex
string(REPLACE "+" "_PLUS_" SYMBOL_NAME "${SYMBOL_NAME}")
set(ASSEMBLY_FILE_NAME "${RESOURCE_FILE}.S")
# Put the configured assembly file in the output directory.
# This is so we can clean it up as usual, and we CD to the
# source directory before compiling, so that the assembly
# `.incbin` directive can find the file.
# Generate the configured assembly file in the output directory.
configure_file("${EMBED_TEMPLATE_FILE}" "${CMAKE_CURRENT_BINARY_DIR}/${ASSEMBLY_FILE_NAME}" @ONLY)
# Generate the output object file by compiling the assembly, in the directory of
# the sources so that the resource file may also be found
add_custom_command(
OUTPUT ${RESOURCE_OBJ}
COMMAND cd "${EMBED_RESOURCE_DIR}" &&
${CMAKE_C_COMPILER} "${CROSS_COMPILE_FLAGS}" -c -o
"${CMAKE_CURRENT_BINARY_DIR}/${RESOURCE_OBJ}"
"${CMAKE_CURRENT_BINARY_DIR}/${ASSEMBLY_FILE_NAME}"
)
set_source_files_properties("${RESOURCE_OBJ}" PROPERTIES EXTERNAL_OBJECT true GENERATED true)
endforeach()
# Set the include directory for relative paths specified for `.incbin` directive.
set_property(SOURCE "${CMAKE_CURRENT_BINARY_DIR}/${ASSEMBLY_FILE_NAME}" APPEND PROPERTY INCLUDE_DIRECTORIES "${EMBED_RESOURCE_DIR}")
add_library("${EMBED_TARGET}" STATIC ${RESOURCE_OBJS})
set_target_properties("${EMBED_TARGET}" PROPERTIES LINKER_LANGUAGE C)
target_sources("${EMBED_TARGET}" PRIVATE "${CMAKE_CURRENT_BINARY_DIR}/${ASSEMBLY_FILE_NAME}")
endforeach()
endmacro()

View File

@ -4,7 +4,6 @@ set (CMAKE_C_COMPILER_TARGET "aarch64-linux-gnu")
set (CMAKE_CXX_COMPILER_TARGET "aarch64-linux-gnu")
set (CMAKE_ASM_COMPILER_TARGET "aarch64-linux-gnu")
set (CMAKE_SYSROOT "${CMAKE_CURRENT_LIST_DIR}/../toolchain/linux-aarch64/aarch64-linux-gnu/libc")
get_filename_component (TOOLCHAIN_FILE "${CMAKE_TOOLCHAIN_FILE}" REALPATH)
# We don't use compiler from toolchain because it's gcc-8, and we provide support only for gcc-9.
set (CMAKE_AR "${CMAKE_CURRENT_LIST_DIR}/../toolchain/linux-aarch64/bin/aarch64-linux-gnu-ar" CACHE FILEPATH "" FORCE)

View File

@ -34,7 +34,6 @@ endif()
set_property(DIRECTORY PROPERTY EXCLUDE_FROM_ALL 1)
add_subdirectory (abseil-cpp-cmake)
add_subdirectory (antlr4-runtime-cmake)
add_subdirectory (boost-cmake)
add_subdirectory (cctz-cmake)
add_subdirectory (consistent-hashing)

@ -1 +0,0 @@
Subproject commit 672643e9a427ef803abf13bc8cb4989606553d64

View File

@ -1,156 +0,0 @@
set (LIBRARY_DIR "${ClickHouse_SOURCE_DIR}/contrib/antlr4-runtime")
set (SRCS
"${LIBRARY_DIR}/ANTLRErrorListener.cpp"
"${LIBRARY_DIR}/ANTLRErrorStrategy.cpp"
"${LIBRARY_DIR}/ANTLRFileStream.cpp"
"${LIBRARY_DIR}/ANTLRInputStream.cpp"
"${LIBRARY_DIR}/atn/AbstractPredicateTransition.cpp"
"${LIBRARY_DIR}/atn/ActionTransition.cpp"
"${LIBRARY_DIR}/atn/AmbiguityInfo.cpp"
"${LIBRARY_DIR}/atn/ArrayPredictionContext.cpp"
"${LIBRARY_DIR}/atn/ATN.cpp"
"${LIBRARY_DIR}/atn/ATNConfig.cpp"
"${LIBRARY_DIR}/atn/ATNConfigSet.cpp"
"${LIBRARY_DIR}/atn/ATNDeserializationOptions.cpp"
"${LIBRARY_DIR}/atn/ATNDeserializer.cpp"
"${LIBRARY_DIR}/atn/ATNSerializer.cpp"
"${LIBRARY_DIR}/atn/ATNSimulator.cpp"
"${LIBRARY_DIR}/atn/ATNState.cpp"
"${LIBRARY_DIR}/atn/AtomTransition.cpp"
"${LIBRARY_DIR}/atn/BasicBlockStartState.cpp"
"${LIBRARY_DIR}/atn/BasicState.cpp"
"${LIBRARY_DIR}/atn/BlockEndState.cpp"
"${LIBRARY_DIR}/atn/BlockStartState.cpp"
"${LIBRARY_DIR}/atn/ContextSensitivityInfo.cpp"
"${LIBRARY_DIR}/atn/DecisionEventInfo.cpp"
"${LIBRARY_DIR}/atn/DecisionInfo.cpp"
"${LIBRARY_DIR}/atn/DecisionState.cpp"
"${LIBRARY_DIR}/atn/EmptyPredictionContext.cpp"
"${LIBRARY_DIR}/atn/EpsilonTransition.cpp"
"${LIBRARY_DIR}/atn/ErrorInfo.cpp"
"${LIBRARY_DIR}/atn/LexerAction.cpp"
"${LIBRARY_DIR}/atn/LexerActionExecutor.cpp"
"${LIBRARY_DIR}/atn/LexerATNConfig.cpp"
"${LIBRARY_DIR}/atn/LexerATNSimulator.cpp"
"${LIBRARY_DIR}/atn/LexerChannelAction.cpp"
"${LIBRARY_DIR}/atn/LexerCustomAction.cpp"
"${LIBRARY_DIR}/atn/LexerIndexedCustomAction.cpp"
"${LIBRARY_DIR}/atn/LexerModeAction.cpp"
"${LIBRARY_DIR}/atn/LexerMoreAction.cpp"
"${LIBRARY_DIR}/atn/LexerPopModeAction.cpp"
"${LIBRARY_DIR}/atn/LexerPushModeAction.cpp"
"${LIBRARY_DIR}/atn/LexerSkipAction.cpp"
"${LIBRARY_DIR}/atn/LexerTypeAction.cpp"
"${LIBRARY_DIR}/atn/LL1Analyzer.cpp"
"${LIBRARY_DIR}/atn/LookaheadEventInfo.cpp"
"${LIBRARY_DIR}/atn/LoopEndState.cpp"
"${LIBRARY_DIR}/atn/NotSetTransition.cpp"
"${LIBRARY_DIR}/atn/OrderedATNConfigSet.cpp"
"${LIBRARY_DIR}/atn/ParseInfo.cpp"
"${LIBRARY_DIR}/atn/ParserATNSimulator.cpp"
"${LIBRARY_DIR}/atn/PlusBlockStartState.cpp"
"${LIBRARY_DIR}/atn/PlusLoopbackState.cpp"
"${LIBRARY_DIR}/atn/PrecedencePredicateTransition.cpp"
"${LIBRARY_DIR}/atn/PredicateEvalInfo.cpp"
"${LIBRARY_DIR}/atn/PredicateTransition.cpp"
"${LIBRARY_DIR}/atn/PredictionContext.cpp"
"${LIBRARY_DIR}/atn/PredictionMode.cpp"
"${LIBRARY_DIR}/atn/ProfilingATNSimulator.cpp"
"${LIBRARY_DIR}/atn/RangeTransition.cpp"
"${LIBRARY_DIR}/atn/RuleStartState.cpp"
"${LIBRARY_DIR}/atn/RuleStopState.cpp"
"${LIBRARY_DIR}/atn/RuleTransition.cpp"
"${LIBRARY_DIR}/atn/SemanticContext.cpp"
"${LIBRARY_DIR}/atn/SetTransition.cpp"
"${LIBRARY_DIR}/atn/SingletonPredictionContext.cpp"
"${LIBRARY_DIR}/atn/StarBlockStartState.cpp"
"${LIBRARY_DIR}/atn/StarLoopbackState.cpp"
"${LIBRARY_DIR}/atn/StarLoopEntryState.cpp"
"${LIBRARY_DIR}/atn/TokensStartState.cpp"
"${LIBRARY_DIR}/atn/Transition.cpp"
"${LIBRARY_DIR}/atn/WildcardTransition.cpp"
"${LIBRARY_DIR}/BailErrorStrategy.cpp"
"${LIBRARY_DIR}/BaseErrorListener.cpp"
"${LIBRARY_DIR}/BufferedTokenStream.cpp"
"${LIBRARY_DIR}/CharStream.cpp"
"${LIBRARY_DIR}/CommonToken.cpp"
"${LIBRARY_DIR}/CommonTokenFactory.cpp"
"${LIBRARY_DIR}/CommonTokenStream.cpp"
"${LIBRARY_DIR}/ConsoleErrorListener.cpp"
"${LIBRARY_DIR}/DefaultErrorStrategy.cpp"
"${LIBRARY_DIR}/dfa/DFA.cpp"
"${LIBRARY_DIR}/dfa/DFASerializer.cpp"
"${LIBRARY_DIR}/dfa/DFAState.cpp"
"${LIBRARY_DIR}/dfa/LexerDFASerializer.cpp"
"${LIBRARY_DIR}/DiagnosticErrorListener.cpp"
"${LIBRARY_DIR}/Exceptions.cpp"
"${LIBRARY_DIR}/FailedPredicateException.cpp"
"${LIBRARY_DIR}/InputMismatchException.cpp"
"${LIBRARY_DIR}/InterpreterRuleContext.cpp"
"${LIBRARY_DIR}/IntStream.cpp"
"${LIBRARY_DIR}/Lexer.cpp"
"${LIBRARY_DIR}/LexerInterpreter.cpp"
"${LIBRARY_DIR}/LexerNoViableAltException.cpp"
"${LIBRARY_DIR}/ListTokenSource.cpp"
"${LIBRARY_DIR}/misc/InterpreterDataReader.cpp"
"${LIBRARY_DIR}/misc/Interval.cpp"
"${LIBRARY_DIR}/misc/IntervalSet.cpp"
"${LIBRARY_DIR}/misc/MurmurHash.cpp"
"${LIBRARY_DIR}/misc/Predicate.cpp"
"${LIBRARY_DIR}/NoViableAltException.cpp"
"${LIBRARY_DIR}/Parser.cpp"
"${LIBRARY_DIR}/ParserInterpreter.cpp"
"${LIBRARY_DIR}/ParserRuleContext.cpp"
"${LIBRARY_DIR}/ProxyErrorListener.cpp"
"${LIBRARY_DIR}/RecognitionException.cpp"
"${LIBRARY_DIR}/Recognizer.cpp"
"${LIBRARY_DIR}/RuleContext.cpp"
"${LIBRARY_DIR}/RuleContextWithAltNum.cpp"
"${LIBRARY_DIR}/RuntimeMetaData.cpp"
"${LIBRARY_DIR}/support/Any.cpp"
"${LIBRARY_DIR}/support/Arrays.cpp"
"${LIBRARY_DIR}/support/CPPUtils.cpp"
"${LIBRARY_DIR}/support/guid.cpp"
"${LIBRARY_DIR}/support/StringUtils.cpp"
"${LIBRARY_DIR}/Token.cpp"
"${LIBRARY_DIR}/TokenSource.cpp"
"${LIBRARY_DIR}/TokenStream.cpp"
"${LIBRARY_DIR}/TokenStreamRewriter.cpp"
"${LIBRARY_DIR}/tree/ErrorNode.cpp"
"${LIBRARY_DIR}/tree/ErrorNodeImpl.cpp"
"${LIBRARY_DIR}/tree/IterativeParseTreeWalker.cpp"
"${LIBRARY_DIR}/tree/ParseTree.cpp"
"${LIBRARY_DIR}/tree/ParseTreeListener.cpp"
"${LIBRARY_DIR}/tree/ParseTreeVisitor.cpp"
"${LIBRARY_DIR}/tree/ParseTreeWalker.cpp"
"${LIBRARY_DIR}/tree/pattern/Chunk.cpp"
"${LIBRARY_DIR}/tree/pattern/ParseTreeMatch.cpp"
"${LIBRARY_DIR}/tree/pattern/ParseTreePattern.cpp"
"${LIBRARY_DIR}/tree/pattern/ParseTreePatternMatcher.cpp"
"${LIBRARY_DIR}/tree/pattern/RuleTagToken.cpp"
"${LIBRARY_DIR}/tree/pattern/TagChunk.cpp"
"${LIBRARY_DIR}/tree/pattern/TextChunk.cpp"
"${LIBRARY_DIR}/tree/pattern/TokenTagToken.cpp"
"${LIBRARY_DIR}/tree/TerminalNode.cpp"
"${LIBRARY_DIR}/tree/TerminalNodeImpl.cpp"
"${LIBRARY_DIR}/tree/Trees.cpp"
"${LIBRARY_DIR}/tree/xpath/XPath.cpp"
"${LIBRARY_DIR}/tree/xpath/XPathElement.cpp"
"${LIBRARY_DIR}/tree/xpath/XPathLexer.cpp"
"${LIBRARY_DIR}/tree/xpath/XPathLexerErrorListener.cpp"
"${LIBRARY_DIR}/tree/xpath/XPathRuleAnywhereElement.cpp"
"${LIBRARY_DIR}/tree/xpath/XPathRuleElement.cpp"
"${LIBRARY_DIR}/tree/xpath/XPathTokenAnywhereElement.cpp"
"${LIBRARY_DIR}/tree/xpath/XPathTokenElement.cpp"
"${LIBRARY_DIR}/tree/xpath/XPathWildcardAnywhereElement.cpp"
"${LIBRARY_DIR}/tree/xpath/XPathWildcardElement.cpp"
"${LIBRARY_DIR}/UnbufferedCharStream.cpp"
"${LIBRARY_DIR}/UnbufferedTokenStream.cpp"
"${LIBRARY_DIR}/Vocabulary.cpp"
"${LIBRARY_DIR}/WritableToken.cpp"
)
add_library (antlr4-runtime ${SRCS})
target_include_directories (antlr4-runtime SYSTEM PUBLIC ${LIBRARY_DIR})

View File

@ -26,7 +26,7 @@ if (NOT USE_INTERNAL_CCTZ_LIBRARY)
set_property (TARGET cctz PROPERTY IMPORTED_LOCATION ${LIBRARY_CCTZ})
set_property (TARGET cctz PROPERTY INTERFACE_INCLUDE_DIRECTORIES ${INCLUDE_CCTZ})
endif()
set(SYSTEM_STORAGE_TZ_FILE "${CMAKE_BINARY_DIR}/src/Storages/System/StorageSystemTimeZones.generated.cpp")
file(REMOVE ${SYSTEM_STORAGE_TZ_FILE})
file(APPEND ${SYSTEM_STORAGE_TZ_FILE} "// autogenerated by ClickHouse/contrib/cctz-cmake/CMakeLists.txt\n")

2
contrib/libpq vendored

@ -1 +1 @@
Subproject commit c7624588ddd84f153dd5990e81b886e4568bddde
Subproject commit e071ea570f8985aa00e34f5b9d50a3cfe666327e

View File

@ -8,7 +8,7 @@ set(SRCS
"${LIBPQ_SOURCE_DIR}/fe-lobj.c"
"${LIBPQ_SOURCE_DIR}/fe-misc.c"
"${LIBPQ_SOURCE_DIR}/fe-print.c"
"${LIBPQ_SOURCE_DIR}/fe-protocol2.c"
"${LIBPQ_SOURCE_DIR}/fe-trace.c"
"${LIBPQ_SOURCE_DIR}/fe-protocol3.c"
"${LIBPQ_SOURCE_DIR}/fe-secure.c"
"${LIBPQ_SOURCE_DIR}/fe-secure-common.c"
@ -18,8 +18,12 @@ set(SRCS
"${LIBPQ_SOURCE_DIR}/pqexpbuffer.c"
"${LIBPQ_SOURCE_DIR}/common/scram-common.c"
"${LIBPQ_SOURCE_DIR}/common/sha2_openssl.c"
"${LIBPQ_SOURCE_DIR}/common/sha2.c"
"${LIBPQ_SOURCE_DIR}/common/sha1.c"
"${LIBPQ_SOURCE_DIR}/common/md5.c"
"${LIBPQ_SOURCE_DIR}/common/md5_common.c"
"${LIBPQ_SOURCE_DIR}/common/hmac_openssl.c"
"${LIBPQ_SOURCE_DIR}/common/cryptohash.c"
"${LIBPQ_SOURCE_DIR}/common/saslprep.c"
"${LIBPQ_SOURCE_DIR}/common/unicode_norm.c"
"${LIBPQ_SOURCE_DIR}/common/ip.c"

2
contrib/libunwind vendored

@ -1 +1 @@
Subproject commit a491c27b33109a842d577c0f7ac5f5f218859181
Subproject commit 6b816d2fba3991f8fd6aaec17d92f68947eab667

View File

@ -1,7 +1,7 @@
add_library(murmurhash
src/murmurhash2.cpp
src/murmurhash3.cpp
include/murmurhash2.h
include/murmurhash3.h)
src/MurmurHash2.cpp
src/MurmurHash3.cpp
include/MurmurHash2.h
include/MurmurHash3.h)
target_include_directories (murmurhash PUBLIC include)

View File

@ -0,0 +1,49 @@
//-----------------------------------------------------------------------------
// MurmurHash2 was written by Austin Appleby, and is placed in the public
// domain. The author hereby disclaims copyright to this source code.
#ifndef MURMURHASH2_H
#define MURMURHASH2_H
#include <stddef.h>
//-----------------------------------------------------------------------------
// Platform-specific functions and macros
// Microsoft Visual Studio
#if defined(_MSC_VER) && (_MSC_VER < 1600)
typedef unsigned char uint8_t;
typedef unsigned int uint32_t;
typedef unsigned __int64 uint64_t;
// Other compilers
#else // defined(_MSC_VER)
#include <stdint.h>
#endif // !defined(_MSC_VER)
//-----------------------------------------------------------------------------
#ifdef __cplusplus
extern "C" {
#endif
uint32_t MurmurHash2 ( const void * key, size_t len, uint32_t seed );
uint64_t MurmurHash64A ( const void * key, size_t len, uint64_t seed );
uint64_t MurmurHash64B ( const void * key, size_t len, uint64_t seed );
uint32_t MurmurHash2A ( const void * key, size_t len, uint32_t seed );
uint32_t MurmurHashNeutral2 ( const void * key, size_t len, uint32_t seed );
uint32_t MurmurHashAligned2 ( const void * key, size_t len, uint32_t seed );
#ifdef __cplusplus
}
#endif
//-----------------------------------------------------------------------------
#endif // _MURMURHASH2_H_

View File

@ -2,7 +2,10 @@
// MurmurHash3 was written by Austin Appleby, and is placed in the public
// domain. The author hereby disclaims copyright to this source code.
#pragma once
#ifndef MURMURHASH3_H
#define MURMURHASH3_H
#include <stddef.h>
//-----------------------------------------------------------------------------
// Platform-specific functions and macros
@ -23,20 +26,22 @@ typedef unsigned __int64 uint64_t;
#endif // !defined(_MSC_VER)
//-----------------------------------------------------------------------------
#ifdef __cplusplus
extern "C" {
#endif
//-----------------------------------------------------------------------------
void MurmurHash3_x86_32 ( const void * key, size_t len, uint32_t seed, void * out );
void MurmurHash3_x86_32 ( const void * key, int len, uint32_t seed, void * out );
void MurmurHash3_x86_128 ( const void * key, size_t len, uint32_t seed, void * out );
void MurmurHash3_x86_128 ( const void * key, int len, uint32_t seed, void * out );
void MurmurHash3_x64_128 ( const void * key, int len, uint32_t seed, void * out );
//-----------------------------------------------------------------------------
void MurmurHash3_x64_128 ( const void * key, size_t len, uint32_t seed, void * out );
#ifdef __cplusplus
}
#endif
//-----------------------------------------------------------------------------
#endif // _MURMURHASH3_H_

View File

@ -1,31 +0,0 @@
//-----------------------------------------------------------------------------
// MurmurHash2 was written by Austin Appleby, and is placed in the public
// domain. The author hereby disclaims copyright to this source code.
#pragma once
//-----------------------------------------------------------------------------
// Platform-specific functions and macros
// Microsoft Visual Studio
#if defined(_MSC_VER) && (_MSC_VER < 1600)
typedef unsigned char uint8_t;
typedef unsigned int uint32_t;
typedef unsigned __int64 uint64_t;
// Other compilers
#else // defined(_MSC_VER)
#include <stdint.h>
#endif // !defined(_MSC_VER)
uint32_t MurmurHash2 (const void * key, int len, uint32_t seed);
uint64_t MurmurHash64A (const void * key, int len, uint64_t seed);
uint64_t MurmurHash64B (const void * key, int len, uint64_t seed);
uint32_t MurmurHash2A (const void * key, int len, uint32_t seed);
uint32_t MurmurHashNeutral2 (const void * key, int len, uint32_t seed);
uint32_t MurmurHashAligned2 (const void * key, int len, uint32_t seed);

View File

@ -0,0 +1,523 @@
//-----------------------------------------------------------------------------
// MurmurHash2 was written by Austin Appleby, and is placed in the public
// domain. The author hereby disclaims copyright to this source code.
// Note - This code makes a few assumptions about how your machine behaves -
// 1. We can read a 4-byte value from any address without crashing
// 2. sizeof(int) == 4
// And it has a few limitations -
// 1. It will not work incrementally.
// 2. It will not produce the same results on little-endian and big-endian
// machines.
#include "MurmurHash2.h"
//-----------------------------------------------------------------------------
// Platform-specific functions and macros
// Microsoft Visual Studio
#if defined(_MSC_VER)
#define BIG_CONSTANT(x) (x)
// Other compilers
#else // defined(_MSC_VER)
#define BIG_CONSTANT(x) (x##LLU)
#endif // !defined(_MSC_VER)
//-----------------------------------------------------------------------------
uint32_t MurmurHash2 ( const void * key, size_t len, uint32_t seed )
{
// 'm' and 'r' are mixing constants generated offline.
// They're not really 'magic', they just happen to work well.
const uint32_t m = 0x5bd1e995;
const int r = 24;
// Initialize the hash to a 'random' value
uint32_t h = seed ^ len;
// Mix 4 bytes at a time into the hash
const unsigned char * data = (const unsigned char *)key;
while(len >= 4)
{
uint32_t k = *(uint32_t*)data;
k *= m;
k ^= k >> r;
k *= m;
h *= m;
h ^= k;
data += 4;
len -= 4;
}
// Handle the last few bytes of the input array
switch(len)
{
case 3: h ^= data[2] << 16;
case 2: h ^= data[1] << 8;
case 1: h ^= data[0];
h *= m;
};
// Do a few final mixes of the hash to ensure the last few
// bytes are well-incorporated.
h ^= h >> 13;
h *= m;
h ^= h >> 15;
return h;
}
//-----------------------------------------------------------------------------
// MurmurHash2, 64-bit versions, by Austin Appleby
// The same caveats as 32-bit MurmurHash2 apply here - beware of alignment
// and endian-ness issues if used across multiple platforms.
// 64-bit hash for 64-bit platforms
uint64_t MurmurHash64A ( const void * key, size_t len, uint64_t seed )
{
const uint64_t m = BIG_CONSTANT(0xc6a4a7935bd1e995);
const int r = 47;
uint64_t h = seed ^ (len * m);
const uint64_t * data = (const uint64_t *)key;
const uint64_t * end = data + (len/8);
while(data != end)
{
uint64_t k = *data++;
k *= m;
k ^= k >> r;
k *= m;
h ^= k;
h *= m;
}
const unsigned char * data2 = (const unsigned char*)data;
switch(len & 7)
{
case 7: h ^= uint64_t(data2[6]) << 48;
case 6: h ^= uint64_t(data2[5]) << 40;
case 5: h ^= uint64_t(data2[4]) << 32;
case 4: h ^= uint64_t(data2[3]) << 24;
case 3: h ^= uint64_t(data2[2]) << 16;
case 2: h ^= uint64_t(data2[1]) << 8;
case 1: h ^= uint64_t(data2[0]);
h *= m;
};
h ^= h >> r;
h *= m;
h ^= h >> r;
return h;
}
// 64-bit hash for 32-bit platforms
uint64_t MurmurHash64B ( const void * key, size_t len, uint64_t seed )
{
const uint32_t m = 0x5bd1e995;
const int r = 24;
uint32_t h1 = uint32_t(seed) ^ len;
uint32_t h2 = uint32_t(seed >> 32);
const uint32_t * data = (const uint32_t *)key;
while(len >= 8)
{
uint32_t k1 = *data++;
k1 *= m; k1 ^= k1 >> r; k1 *= m;
h1 *= m; h1 ^= k1;
len -= 4;
uint32_t k2 = *data++;
k2 *= m; k2 ^= k2 >> r; k2 *= m;
h2 *= m; h2 ^= k2;
len -= 4;
}
if(len >= 4)
{
uint32_t k1 = *data++;
k1 *= m; k1 ^= k1 >> r; k1 *= m;
h1 *= m; h1 ^= k1;
len -= 4;
}
switch(len)
{
case 3: h2 ^= ((unsigned char*)data)[2] << 16;
case 2: h2 ^= ((unsigned char*)data)[1] << 8;
case 1: h2 ^= ((unsigned char*)data)[0];
h2 *= m;
};
h1 ^= h2 >> 18; h1 *= m;
h2 ^= h1 >> 22; h2 *= m;
h1 ^= h2 >> 17; h1 *= m;
h2 ^= h1 >> 19; h2 *= m;
uint64_t h = h1;
h = (h << 32) | h2;
return h;
}
//-----------------------------------------------------------------------------
// MurmurHash2A, by Austin Appleby
// This is a variant of MurmurHash2 modified to use the Merkle-Damgard
// construction. Bulk speed should be identical to Murmur2, small-key speed
// will be 10%-20% slower due to the added overhead at the end of the hash.
// This variant fixes a minor issue where null keys were more likely to
// collide with each other than expected, and also makes the function
// more amenable to incremental implementations.
#define mmix(h,k) { k *= m; k ^= k >> r; k *= m; h *= m; h ^= k; }
uint32_t MurmurHash2A ( const void * key, size_t len, uint32_t seed )
{
const uint32_t m = 0x5bd1e995;
const int r = 24;
uint32_t l = len;
const unsigned char * data = (const unsigned char *)key;
uint32_t h = seed;
while(len >= 4)
{
uint32_t k = *(uint32_t*)data;
mmix(h,k);
data += 4;
len -= 4;
}
uint32_t t = 0;
switch(len)
{
case 3: t ^= data[2] << 16;
case 2: t ^= data[1] << 8;
case 1: t ^= data[0];
};
mmix(h,t);
mmix(h,l);
h ^= h >> 13;
h *= m;
h ^= h >> 15;
return h;
}
//-----------------------------------------------------------------------------
// CMurmurHash2A, by Austin Appleby
// This is a sample implementation of MurmurHash2A designed to work
// incrementally.
// Usage -
// CMurmurHash2A hasher
// hasher.Begin(seed);
// hasher.Add(data1,size1);
// hasher.Add(data2,size2);
// ...
// hasher.Add(dataN,sizeN);
// uint32_t hash = hasher.End()
class CMurmurHash2A
{
public:
void Begin ( uint32_t seed = 0 )
{
m_hash = seed;
m_tail = 0;
m_count = 0;
m_size = 0;
}
void Add ( const unsigned char * data, size_t len )
{
m_size += len;
MixTail(data,len);
while(len >= 4)
{
uint32_t k = *(uint32_t*)data;
mmix(m_hash,k);
data += 4;
len -= 4;
}
MixTail(data,len);
}
uint32_t End ( void )
{
mmix(m_hash,m_tail);
mmix(m_hash,m_size);
m_hash ^= m_hash >> 13;
m_hash *= m;
m_hash ^= m_hash >> 15;
return m_hash;
}
private:
static const uint32_t m = 0x5bd1e995;
static const int r = 24;
void MixTail ( const unsigned char * & data, size_t & len )
{
while( len && ((len<4) || m_count) )
{
m_tail |= (*data++) << (m_count * 8);
m_count++;
len--;
if(m_count == 4)
{
mmix(m_hash,m_tail);
m_tail = 0;
m_count = 0;
}
}
}
uint32_t m_hash;
uint32_t m_tail;
uint32_t m_count;
uint32_t m_size;
};
//-----------------------------------------------------------------------------
// MurmurHashNeutral2, by Austin Appleby
// Same as MurmurHash2, but endian- and alignment-neutral.
// Half the speed though, alas.
uint32_t MurmurHashNeutral2 ( const void * key, size_t len, uint32_t seed )
{
const uint32_t m = 0x5bd1e995;
const int r = 24;
uint32_t h = seed ^ len;
const unsigned char * data = (const unsigned char *)key;
while(len >= 4)
{
uint32_t k;
k = data[0];
k |= data[1] << 8;
k |= data[2] << 16;
k |= data[3] << 24;
k *= m;
k ^= k >> r;
k *= m;
h *= m;
h ^= k;
data += 4;
len -= 4;
}
switch(len)
{
case 3: h ^= data[2] << 16;
case 2: h ^= data[1] << 8;
case 1: h ^= data[0];
h *= m;
};
h ^= h >> 13;
h *= m;
h ^= h >> 15;
return h;
}
//-----------------------------------------------------------------------------
// MurmurHashAligned2, by Austin Appleby
// Same algorithm as MurmurHash2, but only does aligned reads - should be safer
// on certain platforms.
// Performance will be lower than MurmurHash2
#define MIX(h,k,m) { k *= m; k ^= k >> r; k *= m; h *= m; h ^= k; }
uint32_t MurmurHashAligned2 ( const void * key, size_t len, uint32_t seed )
{
const uint32_t m = 0x5bd1e995;
const int r = 24;
const unsigned char * data = (const unsigned char *)key;
uint32_t h = seed ^ len;
size_t align = (uint64_t)data & 3;
if(align && (len >= 4))
{
// Pre-load the temp registers
uint32_t t = 0, d = 0;
switch(align)
{
case 1: t |= data[2] << 16;
case 2: t |= data[1] << 8;
case 3: t |= data[0];
}
t <<= (8 * align);
data += 4-align;
len -= 4-align;
int sl = 8 * (4-align);
int sr = 8 * align;
// Mix
while(len >= 4)
{
d = *(uint32_t *)data;
t = (t >> sr) | (d << sl);
uint32_t k = t;
MIX(h,k,m);
t = d;
data += 4;
len -= 4;
}
// Handle leftover data in temp registers
d = 0;
if(len >= align)
{
switch(align)
{
case 3: d |= data[2] << 16;
case 2: d |= data[1] << 8;
case 1: d |= data[0];
}
uint32_t k = (t >> sr) | (d << sl);
MIX(h,k,m);
data += align;
len -= align;
//----------
// Handle tail bytes
switch(len)
{
case 3: h ^= data[2] << 16;
case 2: h ^= data[1] << 8;
case 1: h ^= data[0];
h *= m;
};
}
else
{
switch(len)
{
case 3: d |= data[2] << 16;
case 2: d |= data[1] << 8;
case 1: d |= data[0];
case 0: h ^= (t >> sr) | (d << sl);
h *= m;
}
}
h ^= h >> 13;
h *= m;
h ^= h >> 15;
return h;
}
else
{
while(len >= 4)
{
uint32_t k = *(uint32_t *)data;
MIX(h,k,m);
data += 4;
len -= 4;
}
//----------
// Handle tail bytes
switch(len)
{
case 3: h ^= data[2] << 16;
case 2: h ^= data[1] << 8;
case 1: h ^= data[0];
h *= m;
};
h ^= h >> 13;
h *= m;
h ^= h >> 15;
return h;
}
}
//-----------------------------------------------------------------------------

View File

@ -1,3 +1,4 @@
//-----------------------------------------------------------------------------
// MurmurHash3 was written by Austin Appleby, and is placed in the public
// domain. The author hereby disclaims copyright to this source code.
@ -6,8 +7,8 @@
// compile and run any of them on any platform, but your performance with the
// non-native version will be less than optimal.
#include "murmurhash3.h"
#include <cstring>
#include "MurmurHash3.h"
#include <string.h>
//-----------------------------------------------------------------------------
// Platform-specific functions and macros
@ -93,7 +94,7 @@ FORCE_INLINE uint64_t fmix64 ( uint64_t k )
//-----------------------------------------------------------------------------
void MurmurHash3_x86_32 ( const void * key, int len,
void MurmurHash3_x86_32 ( const void * key, size_t len,
uint32_t seed, void * out )
{
const uint8_t * data = (const uint8_t*)key;
@ -149,7 +150,7 @@ void MurmurHash3_x86_32 ( const void * key, int len,
//-----------------------------------------------------------------------------
void MurmurHash3_x86_128 ( const void * key, const int len,
void MurmurHash3_x86_128 ( const void * key, const size_t len,
uint32_t seed, void * out )
{
const uint8_t * data = (const uint8_t*)key;
@ -254,7 +255,7 @@ void MurmurHash3_x86_128 ( const void * key, const int len,
//-----------------------------------------------------------------------------
void MurmurHash3_x64_128 ( const void * key, const int len,
void MurmurHash3_x64_128 ( const void * key, const size_t len,
const uint32_t seed, void * out )
{
const uint8_t * data = (const uint8_t*)key;
@ -332,3 +333,6 @@ void MurmurHash3_x64_128 ( const void * key, const int len,
((uint64_t*)out)[0] = h1;
((uint64_t*)out)[1] = h2;
}
//-----------------------------------------------------------------------------

View File

@ -1,423 +0,0 @@
// MurmurHash2 was written by Austin Appleby, and is placed in the public
// domain. The author hereby disclaims copyright to this source code.
// Note - This code makes a few assumptions about how your machine behaves -
// 1. We can read a 4-byte value from any address without crashing
// 2. sizeof(int) == 4
// And it has a few limitations -
// 1. It will not work incrementally.
// 2. It will not produce the same results on little-endian and big-endian
// machines.
#include "murmurhash2.h"
#include <cstring>
// Platform-specific functions and macros
// Microsoft Visual Studio
#if defined(_MSC_VER)
#define BIG_CONSTANT(x) (x)
// Other compilers
#else // defined(_MSC_VER)
#define BIG_CONSTANT(x) (x##LLU)
#endif // !defined(_MSC_VER)
uint32_t MurmurHash2(const void * key, int len, uint32_t seed)
{
// 'm' and 'r' are mixing constants generated offline.
// They're not really 'magic', they just happen to work well.
const uint32_t m = 0x5bd1e995;
const int r = 24;
// Initialize the hash to a 'random' value
uint32_t h = seed ^ len;
// Mix 4 bytes at a time into the hash
const unsigned char * data = reinterpret_cast<const unsigned char *>(key);
while (len >= 4)
{
uint32_t k;
memcpy(&k, data, sizeof(k));
k *= m;
k ^= k >> r;
k *= m;
h *= m;
h ^= k;
data += 4;
len -= 4;
}
// Handle the last few bytes of the input array
switch (len)
{
case 3: h ^= data[2] << 16;
case 2: h ^= data[1] << 8;
case 1: h ^= data[0];
h *= m;
};
// Do a few final mixes of the hash to ensure the last few
// bytes are well-incorporated.
h ^= h >> 13;
h *= m;
h ^= h >> 15;
return h;
}
// MurmurHash2, 64-bit versions, by Austin Appleby
// The same caveats as 32-bit MurmurHash2 apply here - beware of alignment
// and endian-ness issues if used across multiple platforms.
// 64-bit hash for 64-bit platforms
uint64_t MurmurHash64A(const void * key, int len, uint64_t seed)
{
const uint64_t m = BIG_CONSTANT(0xc6a4a7935bd1e995);
const int r = 47;
uint64_t h = seed ^ (len * m);
const uint64_t * data = reinterpret_cast<const uint64_t *>(key);
const uint64_t * end = data + (len/8);
while (data != end)
{
uint64_t k = *data++;
k *= m;
k ^= k >> r;
k *= m;
h ^= k;
h *= m;
}
const unsigned char * data2 = reinterpret_cast<const unsigned char *>(data);
switch (len & 7)
{
case 7: h ^= static_cast<uint64_t>(data2[6]) << 48;
case 6: h ^= static_cast<uint64_t>(data2[5]) << 40;
case 5: h ^= static_cast<uint64_t>(data2[4]) << 32;
case 4: h ^= static_cast<uint64_t>(data2[3]) << 24;
case 3: h ^= static_cast<uint64_t>(data2[2]) << 16;
case 2: h ^= static_cast<uint64_t>(data2[1]) << 8;
case 1: h ^= static_cast<uint64_t>(data2[0]);
h *= m;
};
h ^= h >> r;
h *= m;
h ^= h >> r;
return h;
}
// 64-bit hash for 32-bit platforms
uint64_t MurmurHash64B(const void * key, int len, uint64_t seed)
{
const uint32_t m = 0x5bd1e995;
const int r = 24;
uint32_t h1 = static_cast<uint32_t>(seed) ^ len;
uint32_t h2 = static_cast<uint32_t>(seed >> 32);
const uint32_t * data = reinterpret_cast<const uint32_t *>(key);
while (len >= 8)
{
uint32_t k1 = *data++;
k1 *= m; k1 ^= k1 >> r; k1 *= m;
h1 *= m; h1 ^= k1;
len -= 4;
uint32_t k2 = *data++;
k2 *= m; k2 ^= k2 >> r; k2 *= m;
h2 *= m; h2 ^= k2;
len -= 4;
}
if (len >= 4)
{
uint32_t k1 = *data++;
k1 *= m; k1 ^= k1 >> r; k1 *= m;
h1 *= m; h1 ^= k1;
len -= 4;
}
switch (len)
{
case 3: h2 ^= reinterpret_cast<const unsigned char *>(data)[2] << 16;
case 2: h2 ^= reinterpret_cast<const unsigned char *>(data)[1] << 8;
case 1: h2 ^= reinterpret_cast<const unsigned char *>(data)[0];
h2 *= m;
};
h1 ^= h2 >> 18; h1 *= m;
h2 ^= h1 >> 22; h2 *= m;
h1 ^= h2 >> 17; h1 *= m;
h2 ^= h1 >> 19; h2 *= m;
uint64_t h = h1;
h = (h << 32) | h2;
return h;
}
// MurmurHash2A, by Austin Appleby
// This is a variant of MurmurHash2 modified to use the Merkle-Damgard
// construction. Bulk speed should be identical to Murmur2, small-key speed
// will be 10%-20% slower due to the added overhead at the end of the hash.
// This variant fixes a minor issue where null keys were more likely to
// collide with each other than expected, and also makes the function
// more amenable to incremental implementations.
#define mmix(h,k) { k *= m; k ^= k >> r; k *= m; h *= m; h ^= k; }
uint32_t MurmurHash2A(const void * key, int len, uint32_t seed)
{
const uint32_t m = 0x5bd1e995;
const int r = 24;
uint32_t l = len;
const unsigned char * data = reinterpret_cast<const unsigned char *>(key);
uint32_t h = seed;
while (len >= 4)
{
uint32_t k = *reinterpret_cast<const uint32_t *>(data);
mmix(h,k);
data += 4;
len -= 4;
}
uint32_t t = 0;
switch (len)
{
case 3: t ^= data[2] << 16;
case 2: t ^= data[1] << 8;
case 1: t ^= data[0];
};
mmix(h,t);
mmix(h,l);
h ^= h >> 13;
h *= m;
h ^= h >> 15;
return h;
}
// MurmurHashNeutral2, by Austin Appleby
// Same as MurmurHash2, but endian- and alignment-neutral.
// Half the speed though, alas.
uint32_t MurmurHashNeutral2(const void * key, int len, uint32_t seed)
{
const uint32_t m = 0x5bd1e995;
const int r = 24;
uint32_t h = seed ^ len;
const unsigned char * data = reinterpret_cast<const unsigned char *>(key);
while (len >= 4)
{
uint32_t k;
k = data[0];
k |= data[1] << 8;
k |= data[2] << 16;
k |= data[3] << 24;
k *= m;
k ^= k >> r;
k *= m;
h *= m;
h ^= k;
data += 4;
len -= 4;
}
switch (len)
{
case 3: h ^= data[2] << 16;
case 2: h ^= data[1] << 8;
case 1: h ^= data[0];
h *= m;
};
h ^= h >> 13;
h *= m;
h ^= h >> 15;
return h;
}
//-----------------------------------------------------------------------------
// MurmurHashAligned2, by Austin Appleby
// Same algorithm as MurmurHash2, but only does aligned reads - should be safer
// on certain platforms.
// Performance will be lower than MurmurHash2
#define MIX(h,k,m) { k *= m; k ^= k >> r; k *= m; h *= m; h ^= k; }
uint32_t MurmurHashAligned2(const void * key, int len, uint32_t seed)
{
const uint32_t m = 0x5bd1e995;
const int r = 24;
const unsigned char * data = reinterpret_cast<const unsigned char *>(key);
uint32_t h = seed ^ len;
int align = reinterpret_cast<uint64_t>(data) & 3;
if (align && (len >= 4))
{
// Pre-load the temp registers
uint32_t t = 0, d = 0;
switch (align)
{
case 1: t |= data[2] << 16;
case 2: t |= data[1] << 8;
case 3: t |= data[0];
}
t <<= (8 * align);
data += 4-align;
len -= 4-align;
int sl = 8 * (4-align);
int sr = 8 * align;
// Mix
while (len >= 4)
{
d = *(reinterpret_cast<const uint32_t *>(data));
t = (t >> sr) | (d << sl);
uint32_t k = t;
MIX(h,k,m);
t = d;
data += 4;
len -= 4;
}
// Handle leftover data in temp registers
d = 0;
if (len >= align)
{
switch (align)
{
case 3: d |= data[2] << 16;
case 2: d |= data[1] << 8;
case 1: d |= data[0];
}
uint32_t k = (t >> sr) | (d << sl);
MIX(h,k,m);
data += align;
len -= align;
//----------
// Handle tail bytes
switch (len)
{
case 3: h ^= data[2] << 16;
case 2: h ^= data[1] << 8;
case 1: h ^= data[0];
h *= m;
};
}
else
{
switch (len)
{
case 3: d |= data[2] << 16;
case 2: d |= data[1] << 8;
case 1: d |= data[0];
case 0: h ^= (t >> sr) | (d << sl);
h *= m;
}
}
h ^= h >> 13;
h *= m;
h ^= h >> 15;
return h;
}
else
{
while (len >= 4)
{
uint32_t k = *reinterpret_cast<const uint32_t *>(data);
MIX(h,k,m);
data += 4;
len -= 4;
}
// Handle tail bytes
switch (len)
{
case 3: h ^= data[2] << 16;
case 2: h ^= data[1] << 8;
case 1: h ^= data[0];
h *= m;
};
h ^= h >> 13;
h *= m;
h ^= h >> 15;
return h;
}
}

4
debian/changelog vendored
View File

@ -1,5 +1,5 @@
clickhouse (21.7.1.1) unstable; urgency=low
clickhouse (21.8.1.1) unstable; urgency=low
* Modified source code
-- clickhouse-release <clickhouse-release@yandex-team.ru> Thu, 20 May 2021 22:23:29 +0300
-- clickhouse-release <clickhouse-release@yandex-team.ru> Mon, 28 Jun 2021 00:50:15 +0300

View File

@ -43,29 +43,6 @@ command -v flock >/dev/null && FLOCK=flock
# Override defaults from optional config file
test -f /etc/default/clickhouse && . /etc/default/clickhouse
# On x86_64, check for required instruction set.
if uname -mpi | grep -q 'x86_64'; then
if ! grep -q 'sse4_2' /proc/cpuinfo; then
# On KVM, cpuinfo could falsely not report SSE 4.2 support, so skip the check.
if ! grep -q 'Common KVM processor' /proc/cpuinfo; then
# Some other VMs also report wrong flags in cpuinfo.
# Tricky way to test for instruction set:
# create temporary binary and run it;
# if it get caught illegal instruction signal,
# then required instruction set is not supported really.
#
# Generated this way:
# gcc -xc -Os -static -nostdlib - <<< 'void _start() { __asm__("pcmpgtq %%xmm0, %%xmm1; mov $0x3c, %%rax; xor %%rdi, %%rdi; syscall":::"memory"); }' && strip -R .note.gnu.build-id -R .comment -R .eh_frame -s ./a.out && gzip -c -9 ./a.out | base64 -w0; echo
if ! (echo -n 'H4sICAwAW1cCA2Eub3V0AKt39XFjYmRkgAEmBjsGEI+H0QHMd4CKGyCUAMUsGJiBJDNQNUiYlQEZOKDQclB9cnD9CmCSBYqJBRxQOvBpSQobGfqIAWn8FuYnPI4fsAGyPQz/87MeZtArziguKSpJTGLQK0mtKGGgGHADMSgoYH6AhTMPNHyE0NQzYuEzYzEXFr6CBPQDANAsXKTwAQAA' | base64 -d | gzip -d > /tmp/clickhouse_test_sse42 && chmod a+x /tmp/clickhouse_test_sse42 && /tmp/clickhouse_test_sse42); then
echo 'Warning! SSE 4.2 instruction set is not supported'
#exit 3
fi
fi
fi
fi
die()
{
@ -116,7 +93,7 @@ forcestop()
service_or_func()
{
if [ -x "/bin/systemctl" ] && [ -f /etc/systemd/system/clickhouse-server.service ] && [ -d /run/systemd/system ]; then
service $PROGRAM $1
systemctl $1 $PROGRAM
else
$1
fi

View File

@ -12,7 +12,6 @@ mkdir root
pushd root
mkdir lib lib64 etc tmp root
cp ${BUILD_DIR}/programs/clickhouse .
cp ${SRC_DIR}/programs/server/{config,users}.xml .
cp /lib/x86_64-linux-gnu/{libc.so.6,libdl.so.2,libm.so.6,libpthread.so.0,librt.so.1,libnss_dns.so.2,libresolv.so.2} lib
cp /lib64/ld-linux-x86-64.so.2 lib64
cp /etc/resolv.conf ./etc

View File

@ -1,7 +1,7 @@
FROM ubuntu:18.04
ARG repository="deb https://repo.clickhouse.tech/deb/stable/ main/"
ARG version=21.7.1.*
ARG version=21.8.1.*
RUN apt-get update \
&& apt-get install --yes --no-install-recommends \

View File

@ -72,7 +72,7 @@ RUN git clone https://github.com/tpoechtrager/apple-libtapi.git \
&& cd .. \
&& rm -rf apple-libtapi
# Build and install tools for cross-linking to Darwin
# Build and install tools for cross-linking to Darwin (x86-64)
RUN git clone https://github.com/tpoechtrager/cctools-port.git \
&& cd cctools-port/cctools \
&& ./configure --prefix=/cctools --with-libtapi=/cctools \
@ -81,8 +81,17 @@ RUN git clone https://github.com/tpoechtrager/cctools-port.git \
&& cd ../.. \
&& rm -rf cctools-port
# Download toolchain for Darwin
RUN wget -nv https://github.com/phracker/MacOSX-SDKs/releases/download/10.15/MacOSX10.15.sdk.tar.xz
# Build and install tools for cross-linking to Darwin (aarch64)
RUN git clone https://github.com/tpoechtrager/cctools-port.git \
&& cd cctools-port/cctools \
&& ./configure --prefix=/cctools --with-libtapi=/cctools \
--target=aarch64-apple-darwin \
&& make install \
&& cd ../.. \
&& rm -rf cctools-port
# Download toolchain and SDK for Darwin
RUN wget -nv https://github.com/phracker/MacOSX-SDKs/releases/download/11.3/MacOSX11.0.sdk.tar.xz
# Download toolchain for ARM
# It contains all required headers and libraries. Note that it's named as "gcc" but actually we are using clang for cross compiling.

View File

@ -3,7 +3,9 @@
set -x -e
mkdir -p build/cmake/toolchain/darwin-x86_64
tar xJf MacOSX10.15.sdk.tar.xz -C build/cmake/toolchain/darwin-x86_64 --strip-components=1
tar xJf MacOSX11.0.sdk.tar.xz -C build/cmake/toolchain/darwin-x86_64 --strip-components=1
ln -sf darwin-x86_64 build/cmake/toolchain/darwin-aarch64
mkdir -p build/cmake/toolchain/linux-aarch64
tar xJf gcc-arm-8.3-2019.03-x86_64-aarch64-linux-gnu.tar.xz -C build/cmake/toolchain/linux-aarch64 --strip-components=1

View File

@ -58,6 +58,7 @@ def run_docker_image_with_env(image_name, output, env_variables, ch_root, ccache
def parse_env_variables(build_type, compiler, sanitizer, package_type, image_type, cache, distcc_hosts, unbundled, split_binary, clang_tidy, version, author, official, alien_pkgs, with_coverage, with_binaries):
CLANG_PREFIX = "clang"
DARWIN_SUFFIX = "-darwin"
DARWIN_ARM_SUFFIX = "-darwin-aarch64"
ARM_SUFFIX = "-aarch64"
FREEBSD_SUFFIX = "-freebsd"
@ -66,9 +67,10 @@ def parse_env_variables(build_type, compiler, sanitizer, package_type, image_typ
is_clang = compiler.startswith(CLANG_PREFIX)
is_cross_darwin = compiler.endswith(DARWIN_SUFFIX)
is_cross_darwin_arm = compiler.endswith(DARWIN_ARM_SUFFIX)
is_cross_arm = compiler.endswith(ARM_SUFFIX)
is_cross_freebsd = compiler.endswith(FREEBSD_SUFFIX)
is_cross_compile = is_cross_darwin or is_cross_arm or is_cross_freebsd
is_cross_compile = is_cross_darwin or is_cross_darwin_arm or is_cross_arm or is_cross_freebsd
# Explicitly use LLD with Clang by default.
# Don't force linker for cross-compilation.
@ -82,6 +84,13 @@ def parse_env_variables(build_type, compiler, sanitizer, package_type, image_typ
cmake_flags.append("-DCMAKE_RANLIB:FILEPATH=/cctools/bin/x86_64-apple-darwin-ranlib")
cmake_flags.append("-DLINKER_NAME=/cctools/bin/x86_64-apple-darwin-ld")
cmake_flags.append("-DCMAKE_TOOLCHAIN_FILE=/build/cmake/darwin/toolchain-x86_64.cmake")
elif is_cross_darwin_arm:
cc = compiler[:-len(DARWIN_ARM_SUFFIX)]
cmake_flags.append("-DCMAKE_AR:FILEPATH=/cctools/bin/aarch64-apple-darwin-ar")
cmake_flags.append("-DCMAKE_INSTALL_NAME_TOOL=/cctools/bin/aarch64-apple-darwin-install_name_tool")
cmake_flags.append("-DCMAKE_RANLIB:FILEPATH=/cctools/bin/aarch64-apple-darwin-ranlib")
cmake_flags.append("-DLINKER_NAME=/cctools/bin/aarch64-apple-darwin-ld")
cmake_flags.append("-DCMAKE_TOOLCHAIN_FILE=/build/cmake/darwin/toolchain-aarch64.cmake")
elif is_cross_arm:
cc = compiler[:-len(ARM_SUFFIX)]
cmake_flags.append("-DCMAKE_TOOLCHAIN_FILE=/build/cmake/linux/toolchain-aarch64.cmake")
@ -185,8 +194,8 @@ if __name__ == "__main__":
parser.add_argument("--clickhouse-repo-path", default=os.path.join(os.path.dirname(os.path.abspath(__file__)), os.pardir, os.pardir))
parser.add_argument("--output-dir", required=True)
parser.add_argument("--build-type", choices=("debug", ""), default="")
parser.add_argument("--compiler", choices=("clang-11", "clang-11-darwin", "clang-11-aarch64", "clang-11-freebsd",
"gcc-10"), default="clang-11")
parser.add_argument("--compiler", choices=("clang-11", "clang-11-darwin", "clang-11-darwin-aarch64", "clang-11-aarch64",
"clang-11-freebsd", "gcc-10"), default="clang-11")
parser.add_argument("--sanitizer", choices=("address", "thread", "memory", "undefined", ""), default="")
parser.add_argument("--unbundled", action="store_true")
parser.add_argument("--split-binary", action="store_true")

View File

@ -1,7 +1,7 @@
FROM ubuntu:20.04
ARG repository="deb https://repo.clickhouse.tech/deb/stable/ main/"
ARG version=21.7.1.*
ARG version=21.8.1.*
ARG gosu_ver=1.10
# set non-empty deb_location_url url to create a docker image

View File

@ -1,7 +1,7 @@
FROM ubuntu:18.04
ARG repository="deb https://repo.clickhouse.tech/deb/stable/ main/"
ARG version=21.7.1.*
ARG version=21.8.1.*
RUN apt-get update && \
apt-get install -y apt-transport-https dirmngr && \

View File

@ -46,6 +46,7 @@ RUN apt-get update \
pigz \
pkg-config \
tzdata \
pv \
--yes --no-install-recommends
# Sanitizer options for services (clickhouse-server)

View File

@ -113,6 +113,7 @@ function start_server
echo "ClickHouse server pid '$server_pid' started and responded"
echo "
set follow-fork-mode child
handle all noprint
handle SIGSEGV stop print
handle SIGBUS stop print
@ -159,7 +160,6 @@ function clone_submodules
SUBMODULES_TO_UPDATE=(
contrib/abseil-cpp
contrib/antlr4-runtime
contrib/boost
contrib/zlib-ng
contrib/libxml2
@ -373,14 +373,11 @@ function run_tests
# Depends on AWS
01801_s3_cluster
# Depends on LLVM JIT
01072_nullable_jit
01852_jit_if
01865_jit_comparison_constant_result
01871_merge_tree_compile_expressions
# needs psql
01889_postgresql_protocol_null_fields
# needs pv
01923_network_receive_time_metric_insert
)
time clickhouse-test --hung-check -j 8 --order=random --use-skip-list \

View File

@ -103,6 +103,7 @@ function fuzz
kill -0 $server_pid
echo "
set follow-fork-mode child
handle all noprint
handle SIGSEGV stop print
handle SIGBUS stop print

View File

@ -1,6 +1,8 @@
# docker build -t yandex/clickhouse-integration-test .
FROM yandex/clickhouse-test-base
SHELL ["/bin/bash", "-c"]
RUN apt-get update \
&& env DEBIAN_FRONTEND=noninteractive apt-get -y install \
tzdata \
@ -20,7 +22,9 @@ RUN apt-get update \
krb5-user \
iproute2 \
lsof \
g++
g++ \
default-jre
RUN rm -rf \
/var/lib/apt/lists/* \
/var/cache/debconf \
@ -30,6 +34,19 @@ RUN apt-get clean
# Install MySQL ODBC driver
RUN curl 'https://cdn.mysql.com//Downloads/Connector-ODBC/8.0/mysql-connector-odbc-8.0.21-linux-glibc2.12-x86-64bit.tar.gz' --output 'mysql-connector.tar.gz' && tar -xzf mysql-connector.tar.gz && cd mysql-connector-odbc-8.0.21-linux-glibc2.12-x86-64bit/lib && mv * /usr/local/lib && ln -s /usr/local/lib/libmyodbc8a.so /usr/lib/x86_64-linux-gnu/odbc/libmyodbc.so
# Unfortunately this is required for a single test for conversion data from zookeeper to clickhouse-keeper.
# ZooKeeper is not started by default, but consumes some space in containers.
# 777 perms used to allow anybody to start/stop ZooKeeper
ENV ZOOKEEPER_VERSION='3.6.3'
RUN curl -O "https://mirrors.estointernet.in/apache/zookeeper/zookeeper-${ZOOKEEPER_VERSION}/apache-zookeeper-${ZOOKEEPER_VERSION}-bin.tar.gz"
RUN tar -zxvf apache-zookeeper-${ZOOKEEPER_VERSION}-bin.tar.gz && mv apache-zookeeper-${ZOOKEEPER_VERSION}-bin /opt/zookeeper && chmod -R 777 /opt/zookeeper && rm apache-zookeeper-${ZOOKEEPER_VERSION}-bin.tar.gz
RUN echo $'tickTime=2500 \n\
tickTime=2500 \n\
dataDir=/zookeeper \n\
clientPort=2181 \n\
maxClientCnxns=80' > /opt/zookeeper/conf/zoo.cfg
RUN mkdir /zookeeper && chmod -R 777 /zookeeper
ENV TZ=Europe/Moscow
RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone

View File

@ -11,6 +11,7 @@ services:
interval: 10s
timeout: 5s
retries: 5
command: [ "postgres", "-c", "wal_level=logical", "-c", "max_replication_slots=2"]
networks:
default:
aliases:
@ -22,4 +23,4 @@ services:
volumes:
- type: ${POSTGRES_LOGS_FS:-tmpfs}
source: ${POSTGRES_DIR:-}
target: /postgres/
target: /postgres/

View File

@ -319,14 +319,14 @@ function get_profiles
wait
clickhouse-client --port $LEFT_SERVER_PORT --query "select * from system.query_log where type = 2 format TSVWithNamesAndTypes" > left-query-log.tsv ||: &
clickhouse-client --port $LEFT_SERVER_PORT --query "select * from system.query_log where type = 'QueryFinish' format TSVWithNamesAndTypes" > left-query-log.tsv ||: &
clickhouse-client --port $LEFT_SERVER_PORT --query "select * from system.query_thread_log format TSVWithNamesAndTypes" > left-query-thread-log.tsv ||: &
clickhouse-client --port $LEFT_SERVER_PORT --query "select * from system.trace_log format TSVWithNamesAndTypes" > left-trace-log.tsv ||: &
clickhouse-client --port $LEFT_SERVER_PORT --query "select arrayJoin(trace) addr, concat(splitByChar('/', addressToLine(addr))[-1], '#', demangle(addressToSymbol(addr)) ) name from system.trace_log group by addr format TSVWithNamesAndTypes" > left-addresses.tsv ||: &
clickhouse-client --port $LEFT_SERVER_PORT --query "select * from system.metric_log format TSVWithNamesAndTypes" > left-metric-log.tsv ||: &
clickhouse-client --port $LEFT_SERVER_PORT --query "select * from system.asynchronous_metric_log format TSVWithNamesAndTypes" > left-async-metric-log.tsv ||: &
clickhouse-client --port $RIGHT_SERVER_PORT --query "select * from system.query_log where type = 2 format TSVWithNamesAndTypes" > right-query-log.tsv ||: &
clickhouse-client --port $RIGHT_SERVER_PORT --query "select * from system.query_log where type = 'QueryFinish' format TSVWithNamesAndTypes" > right-query-log.tsv ||: &
clickhouse-client --port $RIGHT_SERVER_PORT --query "select * from system.query_thread_log format TSVWithNamesAndTypes" > right-query-thread-log.tsv ||: &
clickhouse-client --port $RIGHT_SERVER_PORT --query "select * from system.trace_log format TSVWithNamesAndTypes" > right-trace-log.tsv ||: &
clickhouse-client --port $RIGHT_SERVER_PORT --query "select arrayJoin(trace) addr, concat(splitByChar('/', addressToLine(addr))[-1], '#', demangle(addressToSymbol(addr)) ) name from system.trace_log group by addr format TSVWithNamesAndTypes" > right-addresses.tsv ||: &
@ -409,10 +409,10 @@ create view right_query_log as select *
'$(cat "right-query-log.tsv.columns")');
create view query_logs as
select 0 version, query_id, ProfileEvents.Names, ProfileEvents.Values,
select 0 version, query_id, ProfileEvents,
query_duration_ms, memory_usage from left_query_log
union all
select 1 version, query_id, ProfileEvents.Names, ProfileEvents.Values,
select 1 version, query_id, ProfileEvents,
query_duration_ms, memory_usage from right_query_log
;
@ -424,7 +424,7 @@ create table query_run_metric_arrays engine File(TSV, 'analyze/query-run-metric-
with (
-- sumMapState with the list of all keys with '-0.' values. Negative zero is because
-- sumMap removes keys with positive zeros.
with (select groupUniqArrayArray(ProfileEvents.Names) from query_logs) as all_names
with (select groupUniqArrayArray(mapKeys(ProfileEvents)) from query_logs) as all_names
select arrayReduce('sumMapState', [(all_names, arrayMap(x->-0., all_names))])
) as all_metrics
select test, query_index, version, query_id,
@ -433,8 +433,8 @@ create table query_run_metric_arrays engine File(TSV, 'analyze/query-run-metric-
[
all_metrics,
arrayReduce('sumMapState',
[(ProfileEvents.Names,
arrayMap(x->toFloat64(x), ProfileEvents.Values))]
[(mapKeys(ProfileEvents),
arrayMap(x->toFloat64(x), mapValues(ProfileEvents)))]
),
arrayReduce('sumMapState', [(
['client_time', 'server_time', 'memory_usage'],
@ -1003,10 +1003,11 @@ create view query_log as select *
create table unstable_run_metrics engine File(TSVWithNamesAndTypes,
'unstable-run-metrics.$version.rep') as
select
test, query_index, query_id,
ProfileEvents.Values value, ProfileEvents.Names metric
from query_log array join ProfileEvents
select test, query_index, query_id, value, metric
from query_log
array join
mapValues(ProfileEvents) as value,
mapKeys(ProfileEvents) as metric
join unstable_query_runs using (query_id)
;
@ -1280,7 +1281,7 @@ create table ci_checks engine File(TSVWithNamesAndTypes, 'ci-checks.tsv')
then
echo Database for test results is not specified, will not upload them.
return 0
fi
fi
set +x # Don't show password in the log
client=(clickhouse-client

View File

@ -561,7 +561,7 @@ if args.report == 'main':
# Don't show mildly unstable queries, only the very unstable ones we
# treat as errors.
if very_unstable_queries:
if very_unstable_queries > 3:
if very_unstable_queries > 5:
error_tests += very_unstable_queries
status = 'failure'
message_array.append(str(very_unstable_queries) + ' unstable')

View File

@ -35,7 +35,7 @@ if [ "$NUM_TRIES" -gt "1" ]; then
# simpliest way to forward env variables to server
sudo -E -u clickhouse /usr/bin/clickhouse-server --config /etc/clickhouse-server/config.xml --daemon
else
service clickhouse-server start
sudo clickhouse start
fi
if [[ -n "$USE_DATABASE_REPLICATED" ]] && [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]; then

View File

@ -1,4 +1,6 @@
#!/bin/bash
# shellcheck disable=SC2094
# shellcheck disable=SC2086
set -x
@ -37,6 +39,17 @@ function stop()
function start()
{
# Rename existing log file - it will be more convenient to read separate files for separate server runs.
if [ -f '/var/log/clickhouse-server/clickhouse-server.log' ]
then
log_file_counter=1
while [ -f "/var/log/clickhouse-server/clickhouse-server.log.${log_file_counter}" ]
do
log_file_counter=$((log_file_counter + 1))
done
mv '/var/log/clickhouse-server/clickhouse-server.log' "/var/log/clickhouse-server/clickhouse-server.log.${log_file_counter}"
fi
counter=0
until clickhouse-client --query "SELECT 1"
do
@ -55,6 +68,7 @@ function start()
done
echo "
set follow-fork-mode child
handle all noprint
handle SIGSEGV stop print
handle SIGBUS stop print
@ -140,7 +154,11 @@ zgrep -Fa "########################################" /test_output/* > /dev/null
&& echo -e 'Killed by signal (output files)\tFAIL' >> /test_output/test_results.tsv
# Put logs into /test_output/
pigz < /var/log/clickhouse-server/clickhouse-server.log > /test_output/clickhouse-server.log.gz
for log_file in /var/log/clickhouse-server/clickhouse-server.log*
do
pigz < "${log_file}" > /test_output/"$(basename ${log_file})".gz
done
tar -chf /test_output/coordination.tar /var/lib/clickhouse/coordination ||:
mv /var/log/clickhouse-server/stderr.log /test_output/
tar -chf /test_output/query_log_dump.tar /var/lib/clickhouse/data/system/query_log ||:

View File

@ -2,18 +2,16 @@
## TL; DR How to make ClickHouse compile and link faster?
Developer only! This command will likely fulfill most of your needs. Run before calling `ninja`.
Minimal ClickHouse build example:
```cmake
```bash
cmake .. \
-DCMAKE_C_COMPILER=/bin/clang-10 \
-DCMAKE_CXX_COMPILER=/bin/clang++-10 \
-DCMAKE_C_COMPILER=$(which clang-11) \
-DCMAKE_CXX_COMPILER=$(which clang++-11) \
-DCMAKE_BUILD_TYPE=Debug \
-DENABLE_CLICKHOUSE_ALL=OFF \
-DENABLE_CLICKHOUSE_SERVER=ON \
-DENABLE_CLICKHOUSE_CLIENT=ON \
-DUSE_STATIC_LIBRARIES=OFF \
-DSPLIT_SHARED_LIBRARIES=ON \
-DENABLE_LIBRARIES=OFF \
-DUSE_UNWIND=ON \
-DENABLE_UTILS=OFF \

View File

@ -0,0 +1,6 @@
# ARM (AArch64) build works on Amazon Graviton, Oracle Cloud, Huawei Cloud ARM machines.
# The support for AArch64 is pre-production ready.
wget 'https://builds.clickhouse.tech/master/aarch64/clickhouse'
chmod a+x ./clickhouse
sudo ./clickhouse install

View File

@ -0,0 +1,3 @@
wget 'https://builds.clickhouse.tech/master/freebsd/clickhouse'
chmod a+x ./clickhouse
sudo ./clickhouse install

View File

@ -0,0 +1,3 @@
wget 'https://builds.clickhouse.tech/master/macos-aarch64/clickhouse'
chmod a+x ./clickhouse
./clickhouse

View File

@ -0,0 +1,3 @@
wget 'https://builds.clickhouse.tech/master/macos/clickhouse'
chmod a+x ./clickhouse
./clickhouse

View File

@ -33,7 +33,7 @@ Reboot.
``` bash
brew update
brew install cmake ninja libtool gettext llvm gcc
brew install cmake ninja libtool gettext llvm gcc binutils
```
## Checkout ClickHouse Sources {#checkout-clickhouse-sources}

View File

@ -49,6 +49,7 @@ When working with the `MaterializeMySQL` database engine, [ReplacingMergeTree](.
| DATE, NEWDATE | [Date](../../sql-reference/data-types/date.md) |
| DATETIME, TIMESTAMP | [DateTime](../../sql-reference/data-types/datetime.md) |
| DATETIME2, TIMESTAMP2 | [DateTime64](../../sql-reference/data-types/datetime64.md) |
| ENUM | [Enum](../../sql-reference/data-types/enum.md) |
| STRING | [String](../../sql-reference/data-types/string.md) |
| VARCHAR, VAR_STRING | [String](../../sql-reference/data-types/string.md) |
| BLOB | [String](../../sql-reference/data-types/string.md) |

View File

@ -0,0 +1,71 @@
---
toc_priority: 30
toc_title: MaterializedPostgreSQL
---
# MaterializedPostgreSQL {#materialize-postgresql}
## Creating a Database {#creating-a-database}
``` sql
CREATE DATABASE test_database
ENGINE = MaterializedPostgreSQL('postgres1:5432', 'postgres_database', 'postgres_user', 'postgres_password'
SELECT * FROM test_database.postgres_table;
```
## Settings {#settings}
1. `materialized_postgresql_max_block_size` - Number of rows collected before flushing data into table. Default: `65536`.
2. `materialized_postgresql_tables_list` - List of tables for MaterializedPostgreSQL database engine. Default: `whole database`.
3. `materialized_postgresql_allow_automatic_update` - Allow to reload table in the background, when schema changes are detected. Default: `0` (`false`).
``` sql
CREATE DATABASE test_database
ENGINE = MaterializedPostgreSQL('postgres1:5432', 'postgres_database', 'postgres_user', 'postgres_password'
SETTINGS materialized_postgresql_max_block_size = 65536,
materialized_postgresql_tables_list = 'table1,table2,table3';
SELECT * FROM test_database.table1;
```
## Requirements {#requirements}
- Setting `wal_level`to `logical` and `max_replication_slots` to at least `2` in the postgresql config file.
- Each replicated table must have one of the following **replica identity**:
1. **default** (primary key)
2. **index**
``` bash
postgres# CREATE TABLE postgres_table (a Integer NOT NULL, b Integer, c Integer NOT NULL, d Integer, e Integer NOT NULL);
postgres# CREATE unique INDEX postgres_table_index on postgres_table(a, c, e);
postgres# ALTER TABLE postgres_table REPLICA IDENTITY USING INDEX postgres_table_index;
```
Primary key is always checked first. If it is absent, then index, defined as replica identity index, is checked.
If index is used as replica identity, there has to be only one such index in a table.
You can check what type is used for a specific table with the following command:
``` bash
postgres# SELECT CASE relreplident
WHEN 'd' THEN 'default'
WHEN 'n' THEN 'nothing'
WHEN 'f' THEN 'full'
WHEN 'i' THEN 'index'
END AS replica_identity
FROM pg_class
WHERE oid = 'postgres_table'::regclass;
```
## Warning {#warning}
1. **TOAST** values convertion is not supported. Default value for the data type will be used.

View File

@ -0,0 +1,53 @@
---
toc_priority: 12
toc_title: ExternalDistributed
---
# ExternalDistributed {#externaldistributed}
The `ExternalDistributed` engine allows to perform `SELECT` queries on data that is stored on a remote servers MySQL or PostgreSQL. Accepts [MySQL](../../../engines/table-engines/integrations/mysql.md) or [PostgreSQL](../../../engines/table-engines/integrations/postgresql.md) engines as an argument so sharding is possible.
## Creating a Table {#creating-a-table}
``` sql
CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster]
(
name1 [type1] [DEFAULT|MATERIALIZED|ALIAS expr1] [TTL expr1],
name2 [type2] [DEFAULT|MATERIALIZED|ALIAS expr2] [TTL expr2],
...
) ENGINE = ExternalDistributed('engine', 'host:port', 'database', 'table', 'user', 'password');
```
See a detailed description of the [CREATE TABLE](../../../sql-reference/statements/create/table.md#create-table-query) query.
The table structure can differ from the original table structure:
- Column names should be the same as in the original table, but you can use just some of these columns and in any order.
- Column types may differ from those in the original table. ClickHouse tries to [cast](../../../sql-reference/functions/type-conversion-functions.md#type_conversion_function-cast) values to the ClickHouse data types.
**Engine Parameters**
- `engine` — The table engine `MySQL` or `PostgreSQL`.
- `host:port` — MySQL or PostgreSQL server address.
- `database` — Remote database name.
- `table` — Remote table name.
- `user` — User name.
- `password` — User password.
## Implementation Details {#implementation-details}
Supports multiple replicas that must be listed by `|` and shards must be listed by `,`. For example:
```sql
CREATE TABLE test_shards (id UInt32, name String, age UInt32, money UInt32) ENGINE = ExternalDistributed('MySQL', `mysql{1|2}:3306,mysql{3|4}:3306`, 'clickhouse', 'test_replicas', 'root', 'clickhouse');
```
When specifying replicas, one of the available replicas is selected for each of the shards when reading. If the connection fails, the next replica is selected, and so on for all the replicas. If the connection attempt fails for all the replicas, the attempt is repeated the same way several times.
You can specify any number of shards and any number of replicas for each shard.
**See Also**
- [MySQL table engine](../../../engines/table-engines/integrations/mysql.md)
- [PostgreSQL table engine](../../../engines/table-engines/integrations/postgresql.md)
- [Distributed table engine](../../../engines/table-engines/special/distributed.md)

View File

@ -0,0 +1,46 @@
---
toc_priority: 12
toc_title: MateriaziePostgreSQL
---
# MaterializedPostgreSQL {#materialize-postgresql}
## Creating a Table {#creating-a-table}
``` sql
CREATE TABLE test.postgresql_replica (key UInt64, value UInt64)
ENGINE = MaterializedPostgreSQL('postgres1:5432', 'postgres_database', 'postgresql_replica', 'postgres_user', 'postgres_password')
PRIMARY KEY key;
```
## Requirements {#requirements}
- Setting `wal_level`to `logical` and `max_replication_slots` to at least `2` in the postgresql config file.
- A table with engine `MaterializedPostgreSQL` must have a primary key - the same as a replica identity index (default: primary key) of a postgres table (See [details on replica identity index](../../database-engines/materialized-postgresql.md#requirements)).
- Only database `Atomic` is allowed.
## Virtual columns {#creating-a-table}
- `_version` (`UInt64`)
- `_sign` (`Int8`)
These columns do not need to be added, when table is created. They are always accessible in `SELECT` query.
`_version` column equals `LSN` position in `WAL`, so it might be used to check how up-to-date replication is.
``` sql
CREATE TABLE test.postgresql_replica (key UInt64, value UInt64)
ENGINE = MaterializedPostgreSQL('postgres1:5432', 'postgres_database', 'postgresql_replica', 'postgres_user', 'postgres_password')
PRIMARY KEY key;
SELECT key, value, _version FROM test.postgresql_replica;
```
## Warning {#warning}
1. **TOAST** values convertion is not supported. Default value for the data type will be used.

View File

@ -28,8 +28,8 @@ See a detailed description of the [CREATE TABLE](../../../sql-reference/statemen
The table structure can differ from the original MySQL table structure:
- Column names should be the same as in the original MySQL table, but you can use just some of these columns and in any order.
- Column types may differ from those in the original MySQL table. ClickHouse tries to [cast](../../../sql-reference/functions/type-conversion-functions.md#type_conversion_function-cast) values to the ClickHouse data types.
- Setting `external_table_functions_use_nulls` defines how to handle Nullable columns. Default is true, if false - table function will not make nullable columns and will insert default values instead of nulls. This is also applicable for null values inside array data types.
- Column types may differ from those in the original MySQL table. ClickHouse tries to [cast](../../../engines/database-engines/mysql.md#data_types-support) values to the ClickHouse data types.
- The [external_table_functions_use_nulls](../../../operations/settings/settings.md#external-table-functions-use-nulls) setting defines how to handle Nullable columns. Default value: 1. If 0, the table function does not make Nullable columns and inserts default values instead of nulls. This is also applicable for NULL values inside arrays.
**Engine Parameters**
@ -55,6 +55,12 @@ Simple `WHERE` clauses such as `=, !=, >, >=, <, <=` are executed on the MySQL s
The rest of the conditions and the `LIMIT` sampling constraint are executed in ClickHouse only after the query to MySQL finishes.
Supports multiple replicas that must be listed by `|`. For example:
```sql
CREATE TABLE test_replicas (id UInt32, name String, age UInt32, money UInt32) ENGINE = MySQL(`mysql{2|3|4}:3306`, 'clickhouse', 'test_replicas', 'root', 'clickhouse');
```
## Usage Example {#usage-example}
Table in MySQL:

View File

@ -29,7 +29,7 @@ The table structure can differ from the source table structure:
- Column names should be the same as in the source table, but you can use just some of these columns and in any order.
- Column types may differ from those in the source table. ClickHouse tries to [cast](../../../sql-reference/functions/type-conversion-functions.md#type_conversion_function-cast) values to the ClickHouse data types.
- Setting `external_table_functions_use_nulls` defines how to handle Nullable columns. Default is true, if false - table function will not make nullable columns and will insert default values instead of nulls. This is also applicable for null values inside array data types.
- The [external_table_functions_use_nulls](../../../operations/settings/settings.md#external-table-functions-use-nulls) setting defines how to handle Nullable columns. Default value: 1. If 0, the table function does not make Nullable columns and inserts default values instead of nulls. This is also applicable for NULL values inside arrays.
**Engine Parameters**

View File

@ -23,8 +23,8 @@ See a detailed description of the [CREATE TABLE](../../../sql-reference/statemen
The table structure can differ from the original PostgreSQL table structure:
- Column names should be the same as in the original PostgreSQL table, but you can use just some of these columns and in any order.
- Column types may differ from those in the original PostgreSQL table. ClickHouse tries to [cast](../../../sql-reference/functions/type-conversion-functions.md#type_conversion_function-cast) values to the ClickHouse data types.
- Setting `external_table_functions_use_nulls` defines how to handle Nullable columns. Default is 1, if 0 - table function will not make nullable columns and will insert default values instead of nulls. This is also applicable for null values inside array data types.
- Column types may differ from those in the original PostgreSQL table. ClickHouse tries to [cast](../../../engines/database-engines/postgresql.md#data_types-support) values to the ClickHouse data types.
- The [external_table_functions_use_nulls](../../../operations/settings/settings.md#external-table-functions-use-nulls) setting defines how to handle Nullable columns. Default value: 1. If 0, the table function does not make Nullable columns and inserts default values instead of nulls. This is also applicable for NULL values inside arrays.
**Engine Parameters**
@ -49,6 +49,12 @@ PostgreSQL `Array` types are converted into ClickHouse arrays.
!!! info "Note"
Be careful - in PostgreSQL an array data, created like a `type_name[]`, may contain multi-dimensional arrays of different dimensions in different table rows in same column. But in ClickHouse it is only allowed to have multidimensional arrays of the same count of dimensions in all table rows in same column.
Supports multiple replicas that must be listed by `|`. For example:
```sql
CREATE TABLE test_replicas (id UInt32, name String) ENGINE = PostgreSQL(`postgres{2|3|4}:5432`, 'clickhouse', 'test_replicas', 'postgres', 'mysecretpassword');
```
Replicas priority for PostgreSQL dictionary source is supported. The bigger the number in map, the less the priority. The highest priority is `0`.

View File

@ -65,7 +65,7 @@ By checking the row count:
Query:
``` sq;
``` sql
SELECT count() FROM recipes;
```

View File

@ -94,11 +94,11 @@ For production environments, its recommended to use the latest `stable`-versi
To run ClickHouse inside Docker follow the guide on [Docker Hub](https://hub.docker.com/r/yandex/clickhouse-server/). Those images use official `deb` packages inside.
### Single Binary
### Single Binary {#from-single-binary}
You can install ClickHouse on Linux using single portable binary from the latest commit of the `master` branch: [https://builds.clickhouse.tech/master/amd64/clickhouse].
You can install ClickHouse on Linux using a single portable binary from the latest commit of the `master` branch: [https://builds.clickhouse.tech/master/amd64/clickhouse].
```
``` bash
curl -O 'https://builds.clickhouse.tech/master/amd64/clickhouse' && chmod a+x clickhouse
sudo ./clickhouse install
```
@ -107,9 +107,10 @@ sudo ./clickhouse install
For non-Linux operating systems and for AArch64 CPU arhitecture, ClickHouse builds are provided as a cross-compiled binary from the latest commit of the `master` branch (with a few hours delay).
- [macOS](https://builds.clickhouse.tech/master/macos/clickhouse) — `curl -O 'https://builds.clickhouse.tech/master/macos/clickhouse' && chmod a+x ./clickhouse`
- [FreeBSD](https://builds.clickhouse.tech/master/freebsd/clickhouse) — `curl -O 'https://builds.clickhouse.tech/master/freebsd/clickhouse' && chmod a+x ./clickhouse`
- [AArch64](https://builds.clickhouse.tech/master/aarch64/clickhouse) — `curl -O 'https://builds.clickhouse.tech/master/aarch64/clickhouse' && chmod a+x ./clickhouse`
- [MacOS x86_64](https://builds.clickhouse.tech/master/macos/clickhouse) — `curl -O 'https://builds.clickhouse.tech/master/macos/clickhouse' && chmod a+x ./clickhouse`
- [MacOS Aarch64 (Apple Silicon)](https://builds.clickhouse.tech/master/macos-aarch64/clickhouse) — `curl -O 'https://builds.clickhouse.tech/master/macos-aarch64/clickhouse' && chmod a+x ./clickhouse`
- [FreeBSD x86_64](https://builds.clickhouse.tech/master/freebsd/clickhouse) — `curl -O 'https://builds.clickhouse.tech/master/freebsd/clickhouse' && chmod a+x ./clickhouse`
- [Linux AArch64](https://builds.clickhouse.tech/master/aarch64/clickhouse) — `curl -O 'https://builds.clickhouse.tech/master/aarch64/clickhouse' && chmod a+x ./clickhouse`
After downloading, you can use the `clickhouse client` to connect to the server, or `clickhouse local` to process local data.

View File

@ -498,7 +498,7 @@ Return a message.
<response_content>Say Hi!</response_content>
</handler>
</rule>
<http_handlers>
</http_handlers>
```
``` bash

View File

@ -110,7 +110,7 @@ toc_title: Adopters
| <a href="https://www.semrush.com/" class="favicon">SEMrush</a> | Marketing | Main product | — | — | [Slides in Russian, August 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup17/5_semrush.pdf) |
| <a href="https://sentry.io/" class="favicon">Sentry</a> | Software Development | Main product | — | — | [Blog Post in English, May 2019](https://blog.sentry.io/2019/05/16/introducing-snuba-sentrys-new-search-infrastructure) |
| <a href="https://seo.do/" class="favicon">seo.do</a> | Analytics | Main product | — | — | [Slides in English, November 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup35/CH%20Presentation-%20Metehan%20Çetinkaya.pdf) |
| <a href="http://www.sgk.gov.tr/wps/portal/sgk/tr" class="favicon">SGK</a> | Goverment Social Security | Analytics | — | — | [Slides in English, November 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup35/ClickHouse%20Meetup-Ramazan%20POLAT.pdf) |
| <a href="http://www.sgk.gov.tr/wps/portal/sgk/tr" class="favicon">SGK</a> | Government Social Security | Analytics | — | — | [Slides in English, November 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup35/ClickHouse%20Meetup-Ramazan%20POLAT.pdf) |
| <a href="http://english.sina.com/index.html" class="favicon">Sina</a> | News | — | — | — | [Slides in Chinese, October 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup19/6.%20ClickHouse最佳实践%20高鹏_新浪.pdf) |
| <a href="https://smi2.ru/" class="favicon">SMI2</a> | News | Analytics | — | — | [Blog Post in Russian, November 2017](https://habr.com/ru/company/smi2/blog/314558/) |
| <a href="https://www.spark.co.nz/" class="favicon">Spark New Zealand</a> | Telecommunications | Security Operations | — | — | [Blog Post, Feb 2020](https://blog.n0p.me/2020/02/2020-02-05-dnsmonster/) |
@ -154,5 +154,7 @@ toc_title: Adopters
| <a href="https://www.hydrolix.io/" class="favicon">Hydrolix</a> | Cloud data platform | Main product | — | — | [Documentation](https://docs.hydrolix.io/guide/query) |
| <a href="https://www.argedor.com/en/clickhouse/" class="favicon">Argedor</a> | ClickHouse support | — | — | — | [Official website](https://www.argedor.com/en/clickhouse/) |
| <a href="https://signoz.io/" class="favicon">SigNoz</a> | Observability Platform | Main Product | — | — | [Source code](https://github.com/SigNoz/signoz) |
| <a href="https://chelpipegroup.com/" class="favicon">ChelPipe Group</a> | Analytics | — | — | — | [Blog post, June 2021](https://vc.ru/trade/253172-tyazhelomu-proizvodstvu-user-friendly-sayt-internet-magazin-trub-dlya-chtpz) |
| <a href="https://zagravagames.com/en/" class="favicon">Zagrava Trading</a> | — | — | — | — | [Job offer, May 2021](https://twitter.com/datastackjobs/status/1394707267082063874) |
[Original article](https://clickhouse.tech/docs/en/introduction/adopters/) <!--hide-->

View File

@ -379,7 +379,7 @@ Default value: `1`.
## insert_null_as_default {#insert_null_as_default}
Enables or disables the insertion of [default values](../../sql-reference/statements/create/table.md#create-default-values) instead of [NULL](../../sql-reference/syntax.md#null-literal) into columns with not [nullable](../../sql-reference/data-types/nullable.md#data_type-nullable) data type.
Enables or disables the insertion of [default values](../../sql-reference/statements/create/table.md#create-default-values) instead of [NULL](../../sql-reference/syntax.md#null-literal) into columns with not [nullable](../../sql-reference/data-types/nullable.md#data_type-nullable) data type.
If column type is not nullable and this setting is disabled, then inserting `NULL` causes an exception. If column type is nullable, then `NULL` values are inserted as is, regardless of this setting.
This setting is applicable to [INSERT ... SELECT](../../sql-reference/statements/insert-into.md#insert_query_insert-select) queries. Note that `SELECT` subqueries may be concatenated with `UNION ALL` clause.
@ -1182,7 +1182,7 @@ Possible values:
Default value: `1`.
**Additional Info**
**Additional Info**
This setting is useful for replicated tables with a sampling key. A query may be processed faster if it is executed on several servers in parallel. But the query performance may degrade in the following cases:
@ -1194,21 +1194,22 @@ This setting is useful for replicated tables with a sampling key. A query may be
!!! warning "Warning"
This setting will produce incorrect results when joins or subqueries are involved, and all tables don't meet certain requirements. See [Distributed Subqueries and max_parallel_replicas](../../sql-reference/operators/in.md#max_parallel_replica-subqueries) for more details.
## compile {#compile}
## compile_expressions {#compile-expressions}
Enable compilation of queries. By default, 0 (disabled).
Enables or disables compilation of frequently used simple functions and operators to native code with LLVM at runtime.
The compilation is only used for part of the query-processing pipeline: for the first stage of aggregation (GROUP BY).
If this portion of the pipeline was compiled, the query may run faster due to the deployment of short cycles and inlining aggregate function calls. The maximum performance improvement (up to four times faster in rare cases) is seen for queries with multiple simple aggregate functions. Typically, the performance gain is insignificant. In very rare cases, it may slow down query execution.
Possible values:
## min_count_to_compile {#min-count-to-compile}
- 0 — Disabled.
- 1 — Enabled.
How many times to potentially use a compiled chunk of code before running compilation. By default, 3.
For testing, the value can be set to 0: compilation runs synchronously and the query waits for the end of the compilation process before continuing execution. For all other cases, use values starting with 1. Compilation normally takes about 5-10 seconds.
If the value is 1 or more, compilation occurs asynchronously in a separate thread. The result will be used as soon as it is ready, including queries that are currently running.
Default value: `1`.
Compiled code is required for each different combination of aggregate functions used in the query and the type of keys in the GROUP BY clause.
The results of the compilation are saved in the build directory in the form of .so files. There is no restriction on the number of compilation results since they do not use very much space. Old results will be used after server restarts, except in the case of a server upgrade in this case, the old results are deleted.
## min_count_to_compile_expression {#min-count-to-compile-expression}
Minimum count of executing same expression before it is get compiled.
Default value: `3`.
## output_format_json_quote_64bit_integers {#session_settings-output_format_json_quote_64bit_integers}
@ -1558,7 +1559,7 @@ Possible values:
- 0 — Disabled (final query processing is done on the initiator node).
- 1 - Do not merge aggregation states from different servers for distributed query processing (query completelly processed on the shard, initiator only proxy the data), can be used in case it is for certain that there are different keys on different shards.
- 2 - Same as `1` but applies `ORDER BY` and `LIMIT` (it is not possilbe when the query processed completelly on the remote node, like for `distributed_group_by_no_merge=1`) on the initiator (can be used for queries with `ORDER BY` and/or `LIMIT`).
- 2 - Same as `1` but applies `ORDER BY` and `LIMIT` (it is not possible when the query processed completelly on the remote node, like for `distributed_group_by_no_merge=1`) on the initiator (can be used for queries with `ORDER BY` and/or `LIMIT`).
**Example**
@ -1622,7 +1623,7 @@ Possible values:
Default value: 0
## optimize_skip_unused_shards_rewrite_in {#optimize-skip-unused-shardslrewrite-in}
## optimize_skip_unused_shards_rewrite_in {#optimize-skip-unused-shards-rewrite-in}
Rewrite IN in query for remote shards to exclude values that does not belong to the shard (requires optimize_skip_unused_shards).
@ -1727,6 +1728,28 @@ Possible values:
Default value: 0.
## optimize_functions_to_subcolumns {#optimize-functions-to-subcolumns}
Enables or disables optimization by transforming some functions to reading subcolumns. This reduces the amount of data to read.
These functions can be transformed:
- [length](../../sql-reference/functions/array-functions.md#array_functions-length) to read the [size0](../../sql-reference/data-types/array.md#array-size) subcolumn.
- [empty](../../sql-reference/functions/array-functions.md#function-empty) to read the [size0](../../sql-reference/data-types/array.md#array-size) subcolumn.
- [notEmpty](../../sql-reference/functions/array-functions.md#function-notempty) to read the [size0](../../sql-reference/data-types/array.md#array-size) subcolumn.
- [isNull](../../sql-reference/operators/index.md#operator-is-null) to read the [null](../../sql-reference/data-types/nullable.md#finding-null) subcolumn.
- [isNotNull](../../sql-reference/operators/index.md#is-not-null) to read the [null](../../sql-reference/data-types/nullable.md#finding-null) subcolumn.
- [count](../../sql-reference/aggregate-functions/reference/count.md) to read the [null](../../sql-reference/data-types/nullable.md#finding-null) subcolumn.
- [mapKeys](../../sql-reference/functions/tuple-map-functions.md#mapkeys) to read the [keys](../../sql-reference/data-types/map.md#map-subcolumns) subcolumn.
- [mapValues](../../sql-reference/functions/tuple-map-functions.md#mapvalues) to read the [values](../../sql-reference/data-types/map.md#map-subcolumns) subcolumn.
Possible values:
- 0 — Optimization disabled.
- 1 — Optimization enabled.
Default value: `0`.
## distributed_replica_error_half_life {#settings-distributed_replica_error_half_life}
- Type: seconds
@ -1802,6 +1825,27 @@ Possible values:
Default value: 0.
## distributed_directory_monitor_split_batch_on_failure {#distributed_directory_monitor_split_batch_on_failure}
Enables/disables splitting batches on failures.
Sometimes sending particular batch to the remote shard may fail, because of some complex pipeline after (i.e. `MATERIALIZED VIEW` with `GROUP BY`) due to `Memory limit exceeded` or similar errors. In this case, retrying will not help (and this will stuck distributed sends for the table) but sending files from that batch one by one may succeed INSERT.
So installing this setting to `1` will disable batching for such batches (i.e. temporary disables `distributed_directory_monitor_batch_inserts` for failed batches).
Possible values:
- 1 — Enabled.
- 0 — Disabled.
Default value: 0.
!!! note "Note"
This setting also affects broken batches (that may appears because of abnormal server (machine) termination and no `fsync_after_insert`/`fsync_directories` for [Distributed](../../engines/table-engines/special/distributed.md) table engine).
!!! warning "Warning"
You should not rely on automatic batch splitting, since this may hurt performance.
## os_thread_priority {#setting-os-thread-priority}
Sets the priority ([nice](https://en.wikipedia.org/wiki/Nice_(Unix))) for threads that execute queries. The OS scheduler considers this priority when choosing the next thread to run on each available CPU core.
@ -2085,7 +2129,7 @@ Default value: 128.
## background_fetches_pool_size {#background_fetches_pool_size}
Sets the number of threads performing background fetches for [replicated](../../engines/table-engines/mergetree-family/replication.md) tables. This setting is applied at the ClickHouse server start and cant be changed in a user session. For production usage with frequent small insertions or slow ZooKeeper cluster is recomended to use default value.
Sets the number of threads performing background fetches for [replicated](../../engines/table-engines/mergetree-family/replication.md) tables. This setting is applied at the ClickHouse server start and cant be changed in a user session. For production usage with frequent small insertions or slow ZooKeeper cluster is recommended to use default value.
Possible values:
@ -2672,7 +2716,7 @@ Default value: `0`.
## aggregate_functions_null_for_empty {#aggregate_functions_null_for_empty}
Enables or disables rewriting all aggregate functions in a query, adding [-OrNull](../../sql-reference/aggregate-functions/combinators.md#agg-functions-combinator-ornull) suffix to them. Enable it for SQL standard compatibility.
It is implemented via query rewrite (similar to [count_distinct_implementation](#settings-count_distinct_implementation) setting) to get consistent results for distributed queries.
It is implemented via query rewrite (similar to [count_distinct_implementation](#settings-count_distinct_implementation) setting) to get consistent results for distributed queries.
Possible values:
@ -2856,7 +2900,7 @@ Default value: `0`.
## database_atomic_wait_for_drop_and_detach_synchronously {#database_atomic_wait_for_drop_and_detach_synchronously}
Adds a modifier `SYNC` to all `DROP` and `DETACH` queries.
Adds a modifier `SYNC` to all `DROP` and `DETACH` queries.
Possible values:
@ -2962,7 +3006,7 @@ Enables or disables using the original column names instead of aliases in query
Possible values:
- 0 — The column name is substituted with the alias.
- 1 — The column name is not substituted with the alias.
- 1 — The column name is not substituted with the alias.
Default value: `0`.
@ -3075,7 +3119,7 @@ SELECT
sum(a),
sumCount(b).1,
sumCount(b).2,
(sumCount(b).1) / (sumCount(b).2)
(sumCount(b).1) / (sumCount(b).2)
FROM fuse_tbl
```
@ -3144,4 +3188,17 @@ SETTINGS index_granularity = 8192 │
└────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┘
```
[Original article](https://clickhouse.tech/docs/en/operations/settings/settings/) <!-- hide -->
## external_table_functions_use_nulls {#external-table-functions-use-nulls}
Defines how [mysql](../../sql-reference/table-functions/mysql.md), [postgresql](../../sql-reference/table-functions/postgresql.md) and [odbc](../../sql-reference/table-functions/odbc.md)] table functions use Nullable columns.
Possible values:
- 0 — The table function explicitly uses Nullable columns.
- 1 — The table function implicitly uses Nullable columns.
Default value: `1`.
**Usage**
If the setting is set to `0`, the table function does not make Nullable columns and inserts default values instead of NULL. This is also applicable for NULL values inside arrays.

View File

@ -36,4 +36,4 @@ SELECT * FROM system.asynchronous_metric_log LIMIT 10
- [system.asynchronous_metrics](../system-tables/asynchronous_metrics.md) — Contains metrics, calculated periodically in the background.
- [system.metric_log](../system-tables/metric_log.md) — Contains history of metrics values from tables `system.metrics` and `system.events`, periodically flushed to disk.
[Original article](https://clickhouse.tech/docs/en/operations/system_tables/asynchronous_metric_log) <!--hide-->
[Original article](https://clickhouse.tech/docs/en/operations/system-tables/asynchronous_metric_log) <!--hide-->

View File

@ -33,6 +33,6 @@ SELECT * FROM system.asynchronous_metrics LIMIT 10
- [Monitoring](../../operations/monitoring.md) — Base concepts of ClickHouse monitoring.
- [system.metrics](../../operations/system-tables/metrics.md#system_tables-metrics) — Contains instantly calculated metrics.
- [system.events](../../operations/system-tables/events.md#system_tables-events) — Contains a number of events that have occurred.
- [system.metric_log](../../operations/system-tables/metric_log.md#system_tables-metric_log) — Contains a history of metrics values from tables `system.metrics` и `system.events`.
- [system.metric_log](../../operations/system-tables/metric_log.md#system_tables-metric_log) — Contains a history of metrics values from tables `system.metrics` and `system.events`.
[Original article](https://clickhouse.tech/docs/en/operations/system_tables/asynchronous_metrics) <!--hide-->
[Original article](https://clickhouse.tech/docs/en/operations/system-tables/asynchronous_metrics) <!--hide-->

View File

@ -68,4 +68,4 @@ estimated_recovery_time: 0
- [distributed_replica_error_cap setting](../../operations/settings/settings.md#settings-distributed_replica_error_cap)
- [distributed_replica_error_half_life setting](../../operations/settings/settings.md#settings-distributed_replica_error_half_life)
[Original article](https://clickhouse.tech/docs/en/operations/system_tables/clusters) <!--hide-->
[Original article](https://clickhouse.tech/docs/en/operations/system-tables/clusters) <!--hide-->

View File

@ -69,4 +69,21 @@ is_in_sampling_key: 0
compression_codec:
```
[Original article](https://clickhouse.tech/docs/en/operations/system_tables/columns) <!--hide-->
The `system.columns` table contains the following columns (the column type is shown in brackets):
- `database` (String) — Database name.
- `table` (String) — Table name.
- `name` (String) — Column name.
- `type` (String) — Column type.
- `default_kind` (String) — Expression type (`DEFAULT`, `MATERIALIZED`, `ALIAS`) for the default value, or an empty string if it is not defined.
- `default_expression` (String) — Expression for the default value, or an empty string if it is not defined.
- `data_compressed_bytes` (UInt64) — The size of compressed data, in bytes.
- `data_uncompressed_bytes` (UInt64) — The size of decompressed data, in bytes.
- `marks_bytes` (UInt64) — The size of marks, in bytes.
- `comment` (String) — Comment on the column, or an empty string if it is not defined.
- `is_in_partition_key` (UInt8) — Flag that indicates whether the column is in the partition expression.
- `is_in_sorting_key` (UInt8) — Flag that indicates whether the column is in the sorting key expression.
- `is_in_primary_key` (UInt8) — Flag that indicates whether the column is in the primary key expression.
- `is_in_sampling_key` (UInt8) — Flag that indicates whether the column is in the sampling key expression.
[Original article](https://clickhouse.tech/docs/en/operations/system-tables/columns) <!--hide-->

View File

@ -38,4 +38,4 @@ SELECT * FROM system.contributors WHERE name = 'Olga Khvostikova'
│ Olga Khvostikova │
└──────────────────┘
```
[Original article](https://clickhouse.tech/docs/en/operations/system_tables/contributors) <!--hide-->
[Original article](https://clickhouse.tech/docs/en/operations/system-tables/contributors) <!--hide-->

View File

@ -8,4 +8,4 @@ Columns:
- `with_admin_option` ([UInt8](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Flag that shows whether `current_role` is a role with `ADMIN OPTION` privilege.
- `is_default` ([UInt8](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Flag that shows whether `current_role` is a default role.
[Original article](https://clickhouse.tech/docs/en/operations/system_tables/current-roles) <!--hide-->
[Original article](https://clickhouse.tech/docs/en/operations/system-tables/current-roles) <!--hide-->

View File

@ -0,0 +1,39 @@
# system.data_skipping_indices {#system-data-skipping-indices}
Contains information about existing data skipping indices in all the tables.
Columns:
- `database` ([String](../../sql-reference/data-types/string.md)) — Database name.
- `table` ([String](../../sql-reference/data-types/string.md)) — Table name.
- `name` ([String](../../sql-reference/data-types/string.md)) — Index name.
- `type` ([String](../../sql-reference/data-types/string.md)) — Index type.
- `expr` ([String](../../sql-reference/data-types/string.md)) — Expression used to calculate the index.
- `granularity` ([UInt64](../../sql-reference/data-types/int-uint.md)) — Number of granules in the block.
**Example**
```sql
SELECT * FROM system.data_skipping_indices LIMIT 2 FORMAT Vertical;
```
```text
Row 1:
──────
database: default
table: user_actions
name: clicks_idx
type: minmax
expr: clicks
granularity: 1
Row 2:
──────
database: default
table: users
name: contacts_null_idx
type: minmax
expr: assumeNotNull(contacts_null)
granularity: 1
```

View File

@ -33,4 +33,4 @@ SELECT * FROM system.data_type_families WHERE alias_to = 'String'
- [Syntax](../../sql-reference/syntax.md) — Information about supported syntax.
[Original article](https://clickhouse.tech/docs/en/operations/system_tables/data_type_families) <!--hide-->
[Original article](https://clickhouse.tech/docs/en/operations/system-tables/data_type_families) <!--hide-->

View File

@ -35,4 +35,4 @@ SELECT * FROM system.databases
└────────────────────────────────┴────────┴────────────────────────────┴─────────────────────────────────────────────────────────────────────┴──────────────────────────────────────┘
```
[Original article](https://clickhouse.tech/docs/en/operations/system_tables/databases) <!--hide-->
[Original article](https://clickhouse.tech/docs/en/operations/system-tables/databases) <!--hide-->

View File

@ -8,4 +8,4 @@ For the description of other columns, see [system.parts](../../operations/system
If part name is invalid, values of some columns may be `NULL`. Such parts can be deleted with [ALTER TABLE DROP DETACHED PART](../../sql-reference/statements/alter/partition.md#alter_drop-detached).
[Original article](https://clickhouse.tech/docs/en/operations/system_tables/detached_parts) <!--hide-->
[Original article](https://clickhouse.tech/docs/en/operations/system-tables/detached_parts) <!--hide-->

View File

@ -61,4 +61,4 @@ SELECT * FROM system.dictionaries
└──────────┴──────┴────────┴─────────────┴──────┴────────┴──────────────────────────────────────┴─────────────────────┴─────────────────┴─────────────┴──────────┴───────────────┴───────────────────────┴────────────────────────────┴──────────────┴──────────────┴─────────────────────┴──────────────────────────────┘───────────────────────┴────────────────┘
```
[Original article](https://clickhouse.tech/docs/en/operations/system_tables/dictionaries) <!--hide-->
[Original article](https://clickhouse.tech/docs/en/operations/system-tables/dictionaries) <!--hide-->

View File

@ -10,9 +10,6 @@ Columns:
- `total_space` ([UInt64](../../sql-reference/data-types/int-uint.md)) — Disk volume in bytes.
- `keep_free_space` ([UInt64](../../sql-reference/data-types/int-uint.md)) — Amount of disk space that should stay free on disk in bytes. Defined in the `keep_free_space_bytes` parameter of disk configuration.
[Original article](https://clickhouse.tech/docs/en/operations/system_tables/disks) <!--hide-->
**Example**
```sql
@ -27,5 +24,4 @@ Columns:
1 rows in set. Elapsed: 0.001 sec.
```
[Original article](https://clickhouse.tech/docs/en/operations/system-tables/disks) <!--hide-->

View File

@ -9,4 +9,4 @@ Columns:
- `is_current` ([UInt8](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Flag that shows whether `enabled_role` is a current role of a current user.
- `is_default` ([UInt8](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Flag that shows whether `enabled_role` is a default role.
[Original article](https://clickhouse.tech/docs/en/operations/system_tables/enabled-roles) <!--hide-->
[Original article](https://clickhouse.tech/docs/en/operations/system-tables/enabled-roles) <!--hide-->

View File

@ -31,4 +31,4 @@ SELECT * FROM system.events LIMIT 5
- [system.metric_log](../../operations/system-tables/metric_log.md#system_tables-metric_log) — Contains a history of metrics values from tables `system.metrics` и `system.events`.
- [Monitoring](../../operations/monitoring.md) — Base concepts of ClickHouse monitoring.
[Original article](https://clickhouse.tech/docs/en/operations/system_tables/events) <!--hide-->
[Original article](https://clickhouse.tech/docs/en/operations/system-tables/events) <!--hide-->

View File

@ -7,8 +7,6 @@ Columns:
- `name`(`String`) The name of the function.
- `is_aggregate`(`UInt8`) — Whether the function is aggregate.
[Original article](https://clickhouse.tech/docs/en/operations/system_tables/functions) <!--hide-->
**Example**
```sql
@ -30,4 +28,6 @@ Columns:
└──────────────────────────┴──────────────┴──────────────────┴──────────┘
10 rows in set. Elapsed: 0.002 sec.
```
```
[Original article](https://clickhouse.tech/docs/en/operations/system-tables/functions) <!--hide-->

View File

@ -21,4 +21,4 @@ Columns:
- `grant_option` ([UInt8](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Permission is granted `WITH GRANT OPTION`, see [GRANT](../../sql-reference/statements/grant.md#grant-privigele-syntax).
[Original article](https://clickhouse.tech/docs/en/operations/system_tables/grants) <!--hide-->
[Original article](https://clickhouse.tech/docs/en/operations/system-tables/grants) <!--hide-->

View File

@ -14,4 +14,4 @@ Columns:
- `Tables.database` (Array(String)) - Array of names of database tables that use the `config_name` parameter.
- `Tables.table` (Array(String)) - Array of table names that use the `config_name` parameter.
[Original article](https://clickhouse.tech/docs/en/operations/system_tables/graphite_retentions) <!--hide-->
[Original article](https://clickhouse.tech/docs/en/operations/system-tables/graphite_retentions) <!--hide-->

View File

@ -36,4 +36,4 @@ SELECT library_name, license_type, license_path FROM system.licenses LIMIT 15
```
[Original article](https://clickhouse.tech/docs/en/operations/system_tables/licenses) <!--hide-->
[Original article](https://clickhouse.tech/docs/en/operations/system-tables/licenses) <!--hide-->

View File

@ -51,4 +51,4 @@ type: SettingUInt64
4 rows in set. Elapsed: 0.001 sec.
```
[Original article](https://clickhouse.tech/docs/en/operations/system_tables/merge_tree_settings) <!--hide-->
[Original article](https://clickhouse.tech/docs/en/operations/system-tables/merge_tree_settings) <!--hide-->

View File

@ -22,4 +22,4 @@ Columns:
- `merge_type` — The type of current merge. Empty if it's an mutation.
- `merge_algorithm` — The algorithm used in current merge. Empty if it's an mutation.
[Original article](https://clickhouse.tech/docs/en/operations/system_tables/merges) <!--hide-->
[Original article](https://clickhouse.tech/docs/en/operations/system-tables/merges) <!--hide-->

View File

@ -48,4 +48,4 @@ CurrentMetric_DistributedFilesToInsert: 0
- [system.metrics](../../operations/system-tables/metrics.md) — Contains instantly calculated metrics.
- [Monitoring](../../operations/monitoring.md) — Base concepts of ClickHouse monitoring.
[Original article](https://clickhouse.tech/docs/en/operations/system_tables/metric_log) <!--hide-->
[Original article](https://clickhouse.tech/docs/en/operations/system-tables/metric_log) <!--hide-->

View File

@ -38,4 +38,4 @@ SELECT * FROM system.metrics LIMIT 10
- [system.metric_log](../../operations/system-tables/metric_log.md#system_tables-metric_log) — Contains a history of metrics values from tables `system.metrics` и `system.events`.
- [Monitoring](../../operations/monitoring.md) — Base concepts of ClickHouse monitoring.
[Original article](https://clickhouse.tech/docs/en/operations/system_tables/metrics) <!--hide-->
[Original article](https://clickhouse.tech/docs/en/operations/system-tables/metrics) <!--hide-->

View File

@ -45,4 +45,4 @@ If there were problems with mutating some data parts, the following columns cont
- [MergeTree](../../engines/table-engines/mergetree-family/mergetree.md) table engine
- [ReplicatedMergeTree](../../engines/table-engines/mergetree-family/replication.md) family
[Original article](https://clickhouse.tech/docs/en/operations/system_tables/mutations) <!--hide-->
[Original article](https://clickhouse.tech/docs/en/operations/system-tables/mutations) <!--hide-->

View File

@ -29,4 +29,4 @@ Reads from this table are not parallelized.
10 rows in set. Elapsed: 0.001 sec.
```
[Original article](https://clickhouse.tech/docs/en/operations/system_tables/numbers) <!--hide-->
[Original article](https://clickhouse.tech/docs/en/operations/system-tables/numbers) <!--hide-->

View File

@ -27,4 +27,4 @@ Used for tests.
10 rows in set. Elapsed: 0.001 sec.
```
[Original article](https://clickhouse.tech/docs/en/operations/system_tables/numbers_mt) <!--hide-->
[Original article](https://clickhouse.tech/docs/en/operations/system-tables/numbers_mt) <!--hide-->

View File

@ -20,4 +20,4 @@ This is similar to the `DUAL` table found in other DBMSs.
1 rows in set. Elapsed: 0.001 sec.
```
[Original article](https://clickhouse.tech/docs/en/operations/system_tables/one) <!--hide-->
[Original article](https://clickhouse.tech/docs/en/operations/system-tables/one) <!--hide-->

View File

@ -66,4 +66,4 @@ error: 0
exception:
```
[Original article](https://clickhouse.tech/docs/en/operations/system_tables/part_log) <!--hide-->
[Original article](https://clickhouse.tech/docs/en/operations/system-tables/part_log) <!--hide-->

View File

@ -155,4 +155,4 @@ move_ttl_info.max: []
- [MergeTree family](../../engines/table-engines/mergetree-family/mergetree.md)
- [TTL for Columns and Tables](../../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-ttl)
[Original article](https://clickhouse.tech/docs/en/operations/system_tables/parts) <!--hide-->
[Original article](https://clickhouse.tech/docs/en/operations/system-tables/parts) <!--hide-->

View File

@ -14,7 +14,6 @@ Columns:
- `query` (String) The query text. For `INSERT`, it does not include the data to insert.
- `query_id` (String) Query ID, if defined.
```sql
:) SELECT * FROM system.processes LIMIT 10 FORMAT Vertical;
```
@ -34,14 +33,14 @@ initial_port: 47588
interface: 1
os_user: bharatnc
client_hostname: tower
client_name: ClickHouse
client_name: ClickHouse
client_revision: 54437
client_version_major: 20
client_version_minor: 7
client_version_patch: 2
http_method: 0
http_user_agent:
quota_key:
http_user_agent:
quota_key:
elapsed: 0.000582537
is_cancelled: 0
read_rows: 0
@ -53,12 +52,10 @@ memory_usage: 0
peak_memory_usage: 0
query: SELECT * from system.processes LIMIT 10 FORMAT Vertical;
thread_ids: [67]
ProfileEvents.Names: ['Query','SelectQuery','ReadCompressedBytes','CompressedReadBufferBlocks','CompressedReadBufferBytes','IOBufferAllocs','IOBufferAllocBytes','ContextLock','RWLockAcquiredReadLocks']
ProfileEvents.Values: [1,1,36,1,10,1,89,16,1]
Settings.Names: ['use_uncompressed_cache','load_balancing','log_queries','max_memory_usage']
Settings.Values: ['0','in_order','1','10000000000']
ProfileEvents: {'Query':1,'SelectQuery':1,'ReadCompressedBytes':36,'CompressedReadBufferBlocks':1,'CompressedReadBufferBytes':10,'IOBufferAllocs':1,'IOBufferAllocBytes':89,'ContextLock':15,'RWLockAcquiredReadLocks':1}
Settings: {'background_pool_size':'32','load_balancing':'random','allow_suspicious_low_cardinality_types':'1','distributed_aggregation_memory_efficient':'1','skip_unavailable_shards':'1','log_queries':'1','max_bytes_before_external_group_by':'20000000000','max_bytes_before_external_sort':'20000000000','allow_introspection_functions':'1'}
1 rows in set. Elapsed: 0.002 sec.
1 rows in set. Elapsed: 0.002 sec.
```
[Original article](https://clickhouse.tech/docs/en/operations/system_tables/processes) <!--hide-->
[Original article](https://clickhouse.tech/docs/en/operations/system-tables/processes) <!--hide-->

View File

@ -84,12 +84,10 @@ Columns:
- `forwarded_for` ([String](../../sql-reference/data-types/string.md)) — HTTP header `X-Forwarded-For` passed in the HTTP query.
- `quota_key` ([String](../../sql-reference/data-types/string.md)) — The `quota key` specified in the [quotas](../../operations/quotas.md) setting (see `keyed`).
- `revision` ([UInt32](../../sql-reference/data-types/int-uint.md)) — ClickHouse revision.
- `ProfileEvents` ([Map(String, UInt64)](../../sql-reference/data-types/array.md)) — ProfileEvents that measure different metrics. The description of them could be found in the table [system.events](../../operations/system-tables/events.md#system_tables-events)
- `Settings` ([Map(String, String)](../../sql-reference/data-types/array.md)) — Settings that were changed when the client ran the query. To enable logging changes to settings, set the `log_query_settings` parameter to 1.
- `log_comment` ([String](../../sql-reference/data-types/string.md)) — Log comment. It can be set to arbitrary string no longer than [max_query_size](../../operations/settings/settings.md#settings-max_query_size). An empty string if it is not defined.
- `thread_ids` ([Array(UInt64)](../../sql-reference/data-types/array.md)) — Thread ids that are participating in query execution.
- `ProfileEvents.Names` ([Array(String)](../../sql-reference/data-types/array.md)) — Counters that measure different metrics. The description of them could be found in the table [system.events](../../operations/system-tables/events.md#system_tables-events)
- `ProfileEvents.Values` ([Array(UInt64)](../../sql-reference/data-types/array.md)) — Values of metrics that are listed in the `ProfileEvents.Names` column.
- `Settings.Names` ([Array(String)](../../sql-reference/data-types/array.md)) — Names of settings that were changed when the client ran the query. To enable logging changes to settings, set the `log_query_settings` parameter to 1.
- `Settings.Values` ([Array(String)](../../sql-reference/data-types/array.md)) — Values of settings that are listed in the `Settings.Names` column.
- `used_aggregate_functions` ([Array(String)](../../sql-reference/data-types/array.md)) — Canonical names of `aggregate functions`, which were used during query execution.
- `used_aggregate_function_combinators` ([Array(String)](../../sql-reference/data-types/array.md)) — Canonical names of `aggregate functions combinators`, which were used during query execution.
- `used_database_engines` ([Array(String)](../../sql-reference/data-types/array.md)) — Canonical names of `database engines`, which were used during query execution.
@ -109,72 +107,53 @@ SELECT * FROM system.query_log WHERE type = 'QueryFinish' AND (query LIKE '%toDa
``` text
Row 1:
──────
type: QueryFinish
event_date: 2021-03-18
event_time: 2021-03-18 20:54:18
event_time_microseconds: 2021-03-18 20:54:18.676686
query_start_time: 2021-03-18 20:54:18
query_start_time_microseconds: 2021-03-18 20:54:18.673934
query_duration_ms: 2
read_rows: 100
read_bytes: 800
written_rows: 0
written_bytes: 0
result_rows: 2
result_bytes: 4858
memory_usage: 0
current_database: default
query: SELECT uniqArray([1, 1, 2]), SUBSTRING('Hello, world', 7, 5), flatten([[[BIT_AND(123)]], [[mod(3, 2)], [CAST('1' AS INTEGER)]]]), week(toDate('2000-12-05')), CAST(arrayJoin([NULL, NULL]) AS Nullable(TEXT)), avgOrDefaultIf(number, number % 2), sumOrNull(number), toTypeName(sumOrNull(number)), countIf(toDate('2000-12-05') + number as d, toDayOfYear(d) % 2) FROM numbers(100)
normalized_query_hash: 17858008518552525706
query_kind: Select
databases: ['_table_function']
tables: ['_table_function.numbers']
columns: ['_table_function.numbers.number']
exception_code: 0
type: QueryStart
event_date: 2020-09-11
event_time: 2020-09-11 10:08:17
event_time_microseconds: 2020-09-11 10:08:17.063321
query_start_time: 2020-09-11 10:08:17
query_start_time_microseconds: 2020-09-11 10:08:17.063321
query_duration_ms: 0
read_rows: 0
read_bytes: 0
written_rows: 0
written_bytes: 0
result_rows: 0
result_bytes: 0
memory_usage: 0
current_database: default
query: INSERT INTO test1 VALUES
exception_code: 0
exception:
stack_trace:
is_initial_query: 1
user: default
query_id: 58f3d392-0fa0-4663-ae1d-29917a1a9c9c
address: ::ffff:127.0.0.1
port: 37486
initial_user: default
initial_query_id: 58f3d392-0fa0-4663-ae1d-29917a1a9c9c
initial_address: ::ffff:127.0.0.1
initial_port: 37486
interface: 1
os_user: sevirov
client_hostname: clickhouse.ru-central1.internal
client_name: ClickHouse
client_revision: 54447
client_version_major: 21
client_version_minor: 4
client_version_patch: 1
http_method: 0
is_initial_query: 1
user: default
query_id: 50a320fd-85a8-49b8-8761-98a86bcbacef
address: ::ffff:127.0.0.1
port: 33452
initial_user: default
initial_query_id: 50a320fd-85a8-49b8-8761-98a86bcbacef
initial_address: ::ffff:127.0.0.1
initial_port: 33452
interface: 1
os_user: bharatnc
client_hostname: tower
client_name: ClickHouse
client_revision: 54437
client_version_major: 20
client_version_minor: 7
client_version_patch: 2
http_method: 0
http_user_agent:
http_referer:
forwarded_for:
quota_key:
revision: 54449
log_comment:
thread_ids: [587,11939]
ProfileEvents.Names: ['Query','SelectQuery','ReadCompressedBytes','CompressedReadBufferBlocks','CompressedReadBufferBytes','IOBufferAllocs','IOBufferAllocBytes','ArenaAllocChunks','ArenaAllocBytes','FunctionExecute','TableFunctionExecute','NetworkSendElapsedMicroseconds','SelectedRows','SelectedBytes','ContextLock','RWLockAcquiredReadLocks','RealTimeMicroseconds','UserTimeMicroseconds','SystemTimeMicroseconds','SoftPageFaults','OSCPUVirtualTimeMicroseconds','OSWriteBytes']
ProfileEvents.Values: [1,1,36,1,10,2,1048680,1,4096,36,1,110,100,800,77,1,3137,1476,1101,8,2577,8192]
Settings.Names: ['load_balancing','max_memory_usage']
Settings.Values: ['random','10000000000']
used_aggregate_functions: ['groupBitAnd','avg','sum','count','uniq']
used_aggregate_function_combinators: ['OrDefault','If','OrNull','Array']
used_database_engines: []
used_data_type_families: ['String','Array','Int32','Nullable']
used_dictionaries: []
used_formats: []
used_functions: ['toWeek','CAST','arrayFlatten','toTypeName','toDayOfYear','addDays','array','toDate','modulo','substring','plus']
used_storages: []
used_table_functions: ['numbers']
revision: 54440
thread_ids: []
ProfileEvents: {'Query':1,'SelectQuery':1,'ReadCompressedBytes':36,'CompressedReadBufferBlocks':1,'CompressedReadBufferBytes':10,'IOBufferAllocs':1,'IOBufferAllocBytes':89,'ContextLock':15,'RWLockAcquiredReadLocks':1}
Settings: {'background_pool_size':'32','load_balancing':'random','allow_suspicious_low_cardinality_types':'1','distributed_aggregation_memory_efficient':'1','skip_unavailable_shards':'1','log_queries':'1','max_bytes_before_external_group_by':'20000000000','max_bytes_before_external_sort':'20000000000','allow_introspection_functions':'1'}
```
**See Also**
- [system.query_thread_log](../../operations/system-tables/query_thread_log.md#system_tables-query_thread_log) — This table contains information about each query execution thread.
[Original article](https://clickhouse.tech/docs/en/operations/system_tables/query_log) <!--hide-->
[Original article](https://clickhouse.tech/docs/en/operations/system-tables/query_log) <!--hide-->

View File

@ -58,8 +58,7 @@ Columns:
- `http_user_agent` ([String](../../sql-reference/data-types/string.md)) — The `UserAgent` header passed in the HTTP request.
- `quota_key` ([String](../../sql-reference/data-types/string.md)) — The “quota key” specified in the [quotas](../../operations/quotas.md) setting (see `keyed`).
- `revision` ([UInt32](../../sql-reference/data-types/int-uint.md)) — ClickHouse revision.
- `ProfileEvents.Names` ([Array(String)](../../sql-reference/data-types/array.md)) — Counters that measure different metrics for this thread. The description of them could be found in the table [system.events](#system_tables-events).
- `ProfileEvents.Values` ([Array(UInt64)](../../sql-reference/data-types/array.md)) — Values of metrics for this thread that are listed in the `ProfileEvents.Names` column.
- `ProfileEvents` ([Map(String, UInt64)](../../sql-reference/data-types/array.md)) — ProfileEvents that measure different metrics for this thread. The description of them could be found in the table [system.events](#system_tables-events).
**Example**
@ -98,21 +97,20 @@ initial_port: 33452
interface: 1
os_user: bharatnc
client_hostname: tower
client_name: ClickHouse
client_name: ClickHouse
client_revision: 54437
client_version_major: 20
client_version_minor: 7
client_version_patch: 2
http_method: 0
http_user_agent:
quota_key:
http_user_agent:
quota_key:
revision: 54440
ProfileEvents.Names: ['Query','InsertQuery','FileOpen','WriteBufferFromFileDescriptorWrite','WriteBufferFromFileDescriptorWriteBytes','ReadCompressedBytes','CompressedReadBufferBlocks','CompressedReadBufferBytes','IOBufferAllocs','IOBufferAllocBytes','FunctionExecute','CreatedWriteBufferOrdinary','DiskWriteElapsedMicroseconds','NetworkReceiveElapsedMicroseconds','NetworkSendElapsedMicroseconds','InsertedRows','InsertedBytes','SelectedRows','SelectedBytes','MergeTreeDataWriterRows','MergeTreeDataWriterUncompressedBytes','MergeTreeDataWriterCompressedBytes','MergeTreeDataWriterBlocks','MergeTreeDataWriterBlocksAlreadySorted','ContextLock','RWLockAcquiredReadLocks','RealTimeMicroseconds','UserTimeMicroseconds','SoftPageFaults','OSCPUVirtualTimeMicroseconds','OSWriteBytes','OSReadChars','OSWriteChars']
ProfileEvents.Values: [1,1,11,11,591,148,3,71,29,6533808,1,11,72,18,47,1,12,1,12,1,12,189,1,1,10,2,70853,2748,49,2747,45056,422,1520]
ProfileEvents: {'Query':1,'SelectQuery':1,'ReadCompressedBytes':36,'CompressedReadBufferBlocks':1,'CompressedReadBufferBytes':10,'IOBufferAllocs':1,'IOBufferAllocBytes':89,'ContextLock':15,'RWLockAcquiredReadLocks':1}
```
**See Also**
- [system.query_log](../../operations/system-tables/query_log.md#system_tables-query_log) — Description of the `query_log` system table which contains common information about queries execution.
[Original article](https://clickhouse.tech/docs/en/operations/system_tables/query_thread_log) <!--hide-->
[Original article](https://clickhouse.tech/docs/en/operations/system-tables/query_thread_log) <!--hide-->

View File

@ -17,3 +17,5 @@ Columns:
- `max_read_rows` ([Nullable](../../sql-reference/data-types/nullable.md)([UInt64](../../sql-reference/data-types/int-uint.md))) — Maximum number of rows read from all tables and table functions participated in queries.
- `max_read_bytes` ([Nullable](../../sql-reference/data-types/nullable.md)([UInt64](../../sql-reference/data-types/int-uint.md))) — Maximum number of bytes read from all tables and table functions participated in queries.
- `max_execution_time` ([Nullable](../../sql-reference/data-types/nullable.md)([Float64](../../sql-reference/data-types/float.md))) — Maximum of the query execution time, in seconds.
[Original article](https://clickhouse.tech/docs/en/operations/system-tables/quota_limits) <!--hide-->

View File

@ -28,3 +28,5 @@ Columns:
## See Also {#see-also}
- [SHOW QUOTA](../../sql-reference/statements/show.md#show-quota-statement)
[Original article](https://clickhouse.tech/docs/en/operations/system-tables/quota_usage) <!--hide-->

View File

@ -24,5 +24,5 @@ Columns:
- [SHOW QUOTAS](../../sql-reference/statements/show.md#show-quotas-statement)
[Original article](https://clickhouse.tech/docs/en/operations/system_tables/quotas) <!--hide-->
[Original article](https://clickhouse.tech/docs/en/operations/system-tables/quotas) <!--hide-->

View File

@ -30,4 +30,6 @@ Columns:
## See Also {#see-also}
- [SHOW QUOTA](../../sql-reference/statements/show.md#show-quota-statement)
- [SHOW QUOTA](../../sql-reference/statements/show.md#show-quota-statement)
[Original article](https://clickhouse.tech/docs/en/operations/system-tables/quotas_usage) <!--hide-->

View File

@ -120,5 +120,5 @@ WHERE
If this query does not return anything, it means that everything is fine.
[Original article](https://clickhouse.tech/docs/en/operations/system_tables/replicas) <!--hide-->
[Original article](https://clickhouse.tech/docs/en/operations/system-tables/replicas) <!--hide-->

View File

@ -18,4 +18,4 @@ Columns:
- 1 — The role has `ADMIN OPTION` privilege.
- 0 — The role without `ADMIN OPTION` privilege.
[Original article](https://clickhouse.tech/docs/en/operations/system_tables/role-grants) <!--hide-->
[Original article](https://clickhouse.tech/docs/en/operations/system-tables/role-grants) <!--hide-->

View File

@ -12,4 +12,4 @@ Columns:
- [SHOW ROLES](../../sql-reference/statements/show.md#show-roles-statement)
[Original article](https://clickhouse.tech/docs/en/operations/system_tables/roles) <!--hide-->
[Original article](https://clickhouse.tech/docs/en/operations/system-tables/roles) <!--hide-->

View File

@ -31,4 +31,4 @@ Columns:
- [SHOW POLICIES](../../sql-reference/statements/show.md#show-policies-statement)
[Original article](https://clickhouse.tech/docs/en/operations/system_tables/row_policies) <!--hide-->
[Original article](https://clickhouse.tech/docs/en/operations/system-tables/row_policies) <!--hide-->

View File

@ -50,4 +50,4 @@ SELECT * FROM system.settings WHERE changed AND name='load_balancing'
- [Constraints on Settings](../../operations/settings/constraints-on-settings.md)
- [SHOW SETTINGS](../../sql-reference/statements/show.md#show-settings) statement
[Original article](https://clickhouse.tech/docs/en/operations/system_tables/settings) <!--hide-->
[Original article](https://clickhouse.tech/docs/en/operations/system-tables/settings) <!--hide-->

Some files were not shown because too many files have changed in this diff Show More