Merge branch 'master' of github.com:ClickHouse/ClickHouse into fix-dynamic-incomplete-type-insert

This commit is contained in:
avogar 2024-09-12 17:01:29 +00:00
commit aab5f9465c
615 changed files with 13611 additions and 3678 deletions

31
CITATION.cff Normal file
View File

@ -0,0 +1,31 @@
# This CITATION.cff file was generated with cffinit.
cff-version: 1.2.0
title: "ClickHouse"
message: "If you use this software, please cite it as below."
type: software
authors:
- family-names: "Milovidov"
given-names: "Alexey"
repository-code: 'https://github.com/ClickHouse/ClickHouse'
url: 'https://clickhouse.com'
license: Apache-2.0
preferred-citation:
type: article
authors:
- family-names: "Schulze"
given-names: "Robert"
- family-names: "Schreiber"
given-names: "Tom"
- family-names: "Yatsishin"
given-names: "Ilya"
- family-names: "Dahimene"
given-names: "Ryadh"
- family-names: "Milovidov"
given-names: "Alexey"
journal: "Proceedings of the VLDB Endowment"
title: "ClickHouse - Lightning Fast Analytics for Everyone"
year: 2024
volume: 17
issue: 12
doi: 10.14778/3685800.3685802

View File

@ -42,28 +42,33 @@ Keep an eye out for upcoming meetups and events around the world. Somewhere else
The following upcoming meetups are featuring creator of ClickHouse & CTO, Alexey Milovidov:
* [ClickHouse Guangzhou User Group Meetup](https://mp.weixin.qq.com/s/GSvo-7xUoVzCsuUvlLTpCw) - August 25
* [San Francisco Meetup (Cloudflare)](https://www.meetup.com/clickhouse-silicon-valley-meetup-group/events/302540575) - September 5
* [Raleigh Meetup (Deutsche Bank)](https://www.meetup.com/triangletechtalks/events/302723486/) - September 9
* [New York Meetup (Rokt)](https://www.meetup.com/clickhouse-new-york-user-group/events/302575342) - September 10
* [Chicago Meetup (Jump Capital)](https://lu.ma/43tvmrfw) - September 12
Other upcoming meetups
* [Seattle Meetup (Statsig)](https://www.meetup.com/clickhouse-seattle-user-group/events/302518075/) - August 27
* [Melbourne Meetup](https://www.meetup.com/clickhouse-australia-user-group/events/302732666/) - August 27
* [Sydney Meetup](https://www.meetup.com/clickhouse-australia-user-group/events/302862966/) - September 5
* [Zurich Meetup](https://www.meetup.com/clickhouse-switzerland-meetup-group/events/302267429/) - September 5
* [Toronto Meetup (Shopify)](https://www.meetup.com/clickhouse-toronto-user-group/events/301490855/) - September 10
* [Austin Meetup](https://www.meetup.com/clickhouse-austin-user-group/events/302558689/) - September 17
* [London Meetup](https://www.meetup.com/clickhouse-london-user-group/events/302977267) - September 17
* [Bangalore Meetup](https://www.meetup.com/clickhouse-bangalore-user-group/events/303208274/) - September 18
* [Tel Aviv Meetup](https://www.meetup.com/clickhouse-meetup-israel/events/303095121) - September 22
* [Jakarta Meetup](https://www.meetup.com/clickhouse-indonesia-user-group/events/303191359/) - October 1
* [Singapore Meetup](https://www.meetup.com/clickhouse-singapore-meetup-group/events/303212064/) - October 3
* [Madrid Meetup](https://www.meetup.com/clickhouse-spain-user-group/events/303096564/) - October 22
* [Barcelona Meetup](https://www.meetup.com/clickhouse-spain-user-group/events/303096876/) - October 29
* [Oslo Meetup](https://www.meetup.com/open-source-real-time-data-warehouse-real-time-analytics/events/302938622) - October 31
* [Ghent Meetup](https://www.meetup.com/clickhouse-belgium-user-group/events/303049405/) - November 19
* [Dubai Meetup](https://www.meetup.com/clickhouse-dubai-meetup-group/events/303096989/) - November 21
* [Paris Meetup](https://www.meetup.com/clickhouse-france-user-group/events/303096434) - November 26
Recently completed events
* [ClickHouse Guangzhou User Group Meetup](https://mp.weixin.qq.com/s/GSvo-7xUoVzCsuUvlLTpCw) - August 25
* [Seattle Meetup (Statsig)](https://www.meetup.com/clickhouse-seattle-user-group/events/302518075/) - August 27
* [Melbourne Meetup](https://www.meetup.com/clickhouse-australia-user-group/events/302732666/) - August 27
* [Sydney Meetup](https://www.meetup.com/clickhouse-australia-user-group/events/302862966/) - September 5
* [Zurich Meetup](https://www.meetup.com/clickhouse-switzerland-meetup-group/events/302267429/) - September 5
* [San Francisco Meetup (Cloudflare)](https://www.meetup.com/clickhouse-silicon-valley-meetup-group/events/302540575) - September 5
## Recent Recordings
* **Recent Meetup Videos**: [Meetup Playlist](https://www.youtube.com/playlist?list=PL0Z2YDlm0b3iNDUzpY1S3L_iV4nARda_U) Whenever possible recordings of the ClickHouse Community Meetups are edited and presented as individual talks. Current featuring "Modern SQL in 2023", "Fast, Concurrent, and Consistent Asynchronous INSERTS in ClickHouse", and "Full-Text Indices: Design and Experiments"

View File

@ -66,13 +66,11 @@ TRAP(gethostbyname)
TRAP(gethostbyname2)
TRAP(gethostent)
TRAP(getlogin)
TRAP(getmntent)
TRAP(getnetbyaddr)
TRAP(getnetbyname)
TRAP(getnetent)
TRAP(getnetgrent)
TRAP(getnetgrent_r)
TRAP(getopt)
TRAP(getopt_long)
TRAP(getopt_long_only)
TRAP(getpass)
@ -133,7 +131,6 @@ TRAP(nrand48)
TRAP(__ppc_get_timebase_freq)
TRAP(ptsname)
TRAP(putchar_unlocked)
TRAP(putenv)
TRAP(pututline)
TRAP(pututxline)
TRAP(putwchar_unlocked)
@ -148,7 +145,6 @@ TRAP(sethostent)
TRAP(sethostid)
TRAP(setkey)
//TRAP(setlocale) // Used by replxx at startup
TRAP(setlogmask)
TRAP(setnetent)
TRAP(setnetgrent)
TRAP(setprotoent)
@ -203,7 +199,6 @@ TRAP(lgammal)
TRAP(nftw)
TRAP(nl_langinfo)
TRAP(putc_unlocked)
TRAP(rand)
/** In the current POSIX.1 specification (POSIX.1-2008), readdir() is not required to be thread-safe. However, in modern
* implementations (including the glibc implementation), concurrent calls to readdir() that specify different directory streams
* are thread-safe. In cases where multiple threads must read from the same directory stream, using readdir() with external
@ -288,4 +283,14 @@ TRAP(tss_get)
TRAP(tss_set)
TRAP(tss_delete)
#ifndef USE_MUSL
/// These produce duplicate symbol errors when statically linking with musl.
/// Maybe we can remove them from the musl fork.
TRAP(getopt)
TRAP(putenv)
TRAP(setlogmask)
TRAP(rand)
TRAP(getmntent)
#endif
#endif

View File

@ -48,25 +48,17 @@ std::string PathImpl::currentImpl()
std::string PathImpl::homeImpl()
{
std::string path;
#if defined(_POSIX_C_SOURCE) || defined(_BSD_SOURCE) || defined(_POSIX_C_SOURCE)
size_t buf_size = 1024; // Same as glibc use for getpwuid
std::vector<char> buf(buf_size);
struct passwd res;
struct passwd* pwd = nullptr;
getpwuid_r(getuid(), &res, buf.data(), buf_size, &pwd);
#else
struct passwd* pwd = getpwuid(getuid());
#endif
if (pwd)
path = pwd->pw_dir;
else
{
#if defined(_POSIX_C_SOURCE) || defined(_BSD_SOURCE) || defined(_POSIX_C_SOURCE)
getpwuid_r(getuid(), &res, buf.data(), buf_size, &pwd);
#else
pwd = getpwuid(geteuid());
#endif
if (pwd)
path = pwd->pw_dir;
else

View File

@ -19,6 +19,8 @@
#include <ios>
#include <memory>
#include <functional>
#include "Poco/Any.h"
#include "Poco/Buffer.h"
#include "Poco/Exception.h"
@ -33,6 +35,27 @@ namespace Net
{
class IHTTPSessionDataHooks
/// Interface to control stream of data bytes being sent or received though socket by HTTPSession
/// It allows to monitor, throttle and schedule data streams with syscall granulatrity
{
public:
virtual ~IHTTPSessionDataHooks() = default;
virtual void atStart(int bytes) = 0;
/// Called before sending/receiving data `bytes` to/from socket.
virtual void atFinish(int bytes) = 0;
/// Called when sending/receiving of data `bytes` is successfully finished.
virtual void atFail() = 0;
/// If an error occurred during send/receive `fail()` is called instead of `finish()`.
};
using HTTPSessionDataHooksPtr = std::shared_ptr<IHTTPSessionDataHooks>;
class Net_API HTTPSession
/// HTTPSession implements basic HTTP session management
/// for both HTTP clients and HTTP servers.
@ -73,6 +96,12 @@ namespace Net
Poco::Timespan getReceiveTimeout() const;
/// Returns receive timeout for the HTTP session.
void setSendDataHooks(const HTTPSessionDataHooksPtr & sendDataHooks = {});
/// Sets data hooks that will be called on every sent to the socket.
void setReceiveDataHooks(const HTTPSessionDataHooksPtr & receiveDataHooks = {});
/// Sets data hooks that will be called on every receive from the socket.
bool connected() const;
/// Returns true if the underlying socket is connected.
@ -211,6 +240,10 @@ namespace Net
Poco::Exception * _pException;
Poco::Any _data;
// Data hooks
HTTPSessionDataHooksPtr _sendDataHooks;
HTTPSessionDataHooksPtr _receiveDataHooks;
friend class HTTPStreamBuf;
friend class HTTPHeaderStreamBuf;
friend class HTTPFixedLengthStreamBuf;
@ -246,6 +279,16 @@ namespace Net
return _receiveTimeout;
}
inline void HTTPSession::setSendDataHooks(const HTTPSessionDataHooksPtr & sendDataHooks)
{
_sendDataHooks = sendDataHooks;
}
inline void HTTPSession::setReceiveDataHooks(const HTTPSessionDataHooksPtr & receiveDataHooks)
{
_receiveDataHooks = receiveDataHooks;
}
inline StreamSocket & HTTPSession::socket()
{
return _socket;

View File

@ -166,10 +166,17 @@ int HTTPSession::write(const char* buffer, std::streamsize length)
{
try
{
return _socket.sendBytes(buffer, (int) length);
if (_sendDataHooks)
_sendDataHooks->atStart((int) length);
int result = _socket.sendBytes(buffer, (int) length);
if (_sendDataHooks)
_sendDataHooks->atFinish(result);
return result;
}
catch (Poco::Exception& exc)
{
if (_sendDataHooks)
_sendDataHooks->atFail();
setException(exc);
throw;
}
@ -180,10 +187,17 @@ int HTTPSession::receive(char* buffer, int length)
{
try
{
return _socket.receiveBytes(buffer, length);
if (_receiveDataHooks)
_receiveDataHooks->atStart(length);
int result = _socket.receiveBytes(buffer, length);
if (_receiveDataHooks)
_receiveDataHooks->atFinish(result);
return result;
}
catch (Poco::Exception& exc)
{
if (_receiveDataHooks)
_receiveDataHooks->atFail();
setException(exc);
throw;
}

View File

@ -18,4 +18,4 @@ target_compile_options (_poco_util
-Wno-zero-as-null-pointer-constant
)
target_include_directories (_poco_util SYSTEM PUBLIC "include")
target_link_libraries (_poco_util PUBLIC Poco::JSON Poco::XML)
target_link_libraries (_poco_util PUBLIC Poco::JSON Poco::XML Poco::Net)

View File

@ -241,6 +241,20 @@ namespace Util
/// If the value contains references to other properties (${<property>}), these
/// are expanded.
std::string getHost(const std::string & key) const;
/// Returns the string value of the host property with the given name.
/// Throws a NotFoundException if the key does not exist.
/// Throws a SyntaxException if the property is not a valid host (IP address or domain).
/// If the value contains references to other properties (${<property>}), these
/// are expanded.
std::string getHost(const std::string & key, const std::string & defaultValue) const;
/// If a property with the given key exists, returns the host property's string value,
/// otherwise returns the given default value.
/// Throws a SyntaxException if the property is not a valid host (IP address or domain).
/// If the value contains references to other properties (${<property>}), these
/// are expanded.
virtual void setString(const std::string & key, const std::string & value);
/// Sets the property with the given key to the given value.
/// An already existing value for the key is overwritten.
@ -339,12 +353,35 @@ namespace Util
static bool parseBool(const std::string & value);
void setRawWithEvent(const std::string & key, std::string value);
static void checkHostValidity(const std::string & value);
/// Throws a SyntaxException if the value is not a valid host (IP address or domain).
virtual ~AbstractConfiguration();
private:
std::string internalExpand(const std::string & value) const;
std::string uncheckedExpand(const std::string & value) const;
static bool isValidIPv4Address(const std::string & value);
/// IPv4 address considered valid if it is "0.0.0.0" or one of those,
/// defined by inet_aton() or inet_addr()
static bool isValidIPv6Address(const std::string & value);
/// IPv6 address considered valid if it is "::" or one of those,
/// defined by inet_pton() with AF_INET6 flag
/// (in this case it may have scope id and may be surrounded by '[', ']')
static bool isValidDomainName(const std::string & value);
/// <domain> ::= <subdomain> [ "." ]
/// <subdomain> ::= <label> | <subdomain> "." <label>
/// <label> ::= <letter> [ [ <ldh-str> ] <let-dig> ]
/// <ldh-str> ::= <let-dig-hyp> | <let-dig-hyp> <ldh-str>
/// <let-dig-hyp> ::= <let-dig> | "-"
/// <let-dig> ::= <letter> | <digit>
/// <letter> ::= any one of the 52 alphabetic characters A through Z in
/// upper case and a through z in lower case
/// <digit> ::= any one of the ten digits 0 through 9
AbstractConfiguration(const AbstractConfiguration &);
AbstractConfiguration & operator=(const AbstractConfiguration &);

View File

@ -18,6 +18,7 @@
#include "Poco/NumberParser.h"
#include "Poco/NumberFormatter.h"
#include "Poco/String.h"
#include "Poco/Net/IPAddressImpl.h"
using Poco::Mutex;
@ -263,6 +264,41 @@ bool AbstractConfiguration::getBool(const std::string& key, bool defaultValue) c
}
std::string AbstractConfiguration::getHost(const std::string& key) const
{
Mutex::ScopedLock lock(_mutex);
std::string value;
if (getRaw(key, value))
{
std::string expandedValue = internalExpand(value);
checkHostValidity(expandedValue);
return expandedValue;
}
else
throw NotFoundException(key);
}
std::string AbstractConfiguration::getHost(const std::string& key, const std::string& defaultValue) const
{
Mutex::ScopedLock lock(_mutex);
std::string value;
if (getRaw(key, value))
{
std::string expandedValue = internalExpand(value);
checkHostValidity(expandedValue);
return expandedValue;
}
else
{
checkHostValidity(defaultValue);
return defaultValue;
}
}
void AbstractConfiguration::setString(const std::string& key, const std::string& value)
{
setRawWithEvent(key, value);
@ -529,4 +565,68 @@ void AbstractConfiguration::setRawWithEvent(const std::string& key, std::string
}
void AbstractConfiguration::checkHostValidity(const std::string& value)
{
if (!isValidIPv4Address(value) && !isValidIPv6Address(value) && !isValidDomainName(value))
{
throw SyntaxException("Property is not a valid host name", value);
}
}
bool AbstractConfiguration::isValidIPv4Address(const std::string& value)
{
using Poco::Net::Impl::IPv4AddressImpl;
IPv4AddressImpl empty4 = IPv4AddressImpl();
IPv4AddressImpl ipAddress = IPv4AddressImpl::parse(value);
return ipAddress != empty4 || value == "0.0.0.0";
}
bool AbstractConfiguration::isValidIPv6Address(const std::string& value)
{
#if defined(POCO_HAVE_IPv6)
using Poco::Net::Impl::IPv6AddressImpl;
IPv6AddressImpl empty6 = IPv6AddressImpl();
IPv6AddressImpl ipAddress = IPv6AddressImpl::parse(value);
return ipAddress != empty6 || value == "::";
#else
return false;
#endif
}
bool AbstractConfiguration::isValidDomainName(const std::string& value)
{
if (value.empty() || value == "." || value.length() > 253)
return false;
int labelLength = 0;
char oldChar = 0;
for (char ch : value)
{
if (ch == '.')
{
if (labelLength == 0 || labelLength > 63 || oldChar == '-')
return false;
labelLength = 0;
}
else if (isalnum(ch) || ch == '-')
{
if (labelLength == 0 && (ch == '-' || isdigit(ch)))
return false;
++labelLength;
}
else
{
return false;
}
oldChar = ch;
}
return oldChar == '.' || (labelLength > 0 && labelLength <= 63 && oldChar != '-');
}
} } // namespace Poco::Util

2
contrib/curl vendored

@ -1 +1 @@
Subproject commit de7b3e89218467159a7af72d58cea8425946e97d
Subproject commit 83bedbd730d62b83744cc26fa0433d3f6e2e4cd6

2
contrib/grpc vendored

@ -1 +1 @@
Subproject commit 1716359d2e28d304a250f9df0e6c0ccad03de8db
Subproject commit 7bc3abe952aba1dc7bce7f2f790dc781cb51a41e

View File

@ -15,162 +15,64 @@ set(ICUDATA_SOURCE_DIR "${ClickHouse_SOURCE_DIR}/contrib/icudata/")
# These lists of sources were generated from build log of the original ICU build system (configure + make).
set(ICUUC_SOURCES
"${ICU_SOURCE_DIR}/common/errorcode.cpp"
"${ICU_SOURCE_DIR}/common/putil.cpp"
"${ICU_SOURCE_DIR}/common/umath.cpp"
"${ICU_SOURCE_DIR}/common/utypes.cpp"
"${ICU_SOURCE_DIR}/common/uinvchar.cpp"
"${ICU_SOURCE_DIR}/common/umutex.cpp"
"${ICU_SOURCE_DIR}/common/ucln_cmn.cpp"
"${ICU_SOURCE_DIR}/common/uinit.cpp"
"${ICU_SOURCE_DIR}/common/uobject.cpp"
"${ICU_SOURCE_DIR}/common/cmemory.cpp"
"${ICU_SOURCE_DIR}/common/charstr.cpp"
"${ICU_SOURCE_DIR}/common/cstr.cpp"
"${ICU_SOURCE_DIR}/common/udata.cpp"
"${ICU_SOURCE_DIR}/common/ucmndata.cpp"
"${ICU_SOURCE_DIR}/common/udatamem.cpp"
"${ICU_SOURCE_DIR}/common/umapfile.cpp"
"${ICU_SOURCE_DIR}/common/udataswp.cpp"
"${ICU_SOURCE_DIR}/common/utrie_swap.cpp"
"${ICU_SOURCE_DIR}/common/ucol_swp.cpp"
"${ICU_SOURCE_DIR}/common/utrace.cpp"
"${ICU_SOURCE_DIR}/common/uhash.cpp"
"${ICU_SOURCE_DIR}/common/uhash_us.cpp"
"${ICU_SOURCE_DIR}/common/uenum.cpp"
"${ICU_SOURCE_DIR}/common/ustrenum.cpp"
"${ICU_SOURCE_DIR}/common/uvector.cpp"
"${ICU_SOURCE_DIR}/common/ustack.cpp"
"${ICU_SOURCE_DIR}/common/uvectr32.cpp"
"${ICU_SOURCE_DIR}/common/uvectr64.cpp"
"${ICU_SOURCE_DIR}/common/ucnv.cpp"
"${ICU_SOURCE_DIR}/common/ucnv_bld.cpp"
"${ICU_SOURCE_DIR}/common/ucnv_cnv.cpp"
"${ICU_SOURCE_DIR}/common/ucnv_io.cpp"
"${ICU_SOURCE_DIR}/common/ucnv_cb.cpp"
"${ICU_SOURCE_DIR}/common/ucnv_err.cpp"
"${ICU_SOURCE_DIR}/common/ucnvlat1.cpp"
"${ICU_SOURCE_DIR}/common/ucnv_u7.cpp"
"${ICU_SOURCE_DIR}/common/ucnv_u8.cpp"
"${ICU_SOURCE_DIR}/common/ucnv_u16.cpp"
"${ICU_SOURCE_DIR}/common/ucnv_u32.cpp"
"${ICU_SOURCE_DIR}/common/ucnvscsu.cpp"
"${ICU_SOURCE_DIR}/common/ucnvbocu.cpp"
"${ICU_SOURCE_DIR}/common/ucnv_ext.cpp"
"${ICU_SOURCE_DIR}/common/ucnvmbcs.cpp"
"${ICU_SOURCE_DIR}/common/ucnv2022.cpp"
"${ICU_SOURCE_DIR}/common/ucnvhz.cpp"
"${ICU_SOURCE_DIR}/common/ucnv_lmb.cpp"
"${ICU_SOURCE_DIR}/common/ucnvisci.cpp"
"${ICU_SOURCE_DIR}/common/ucnvdisp.cpp"
"${ICU_SOURCE_DIR}/common/ucnv_set.cpp"
"${ICU_SOURCE_DIR}/common/ucnv_ct.cpp"
"${ICU_SOURCE_DIR}/common/resource.cpp"
"${ICU_SOURCE_DIR}/common/uresbund.cpp"
"${ICU_SOURCE_DIR}/common/ures_cnv.cpp"
"${ICU_SOURCE_DIR}/common/uresdata.cpp"
"${ICU_SOURCE_DIR}/common/resbund.cpp"
"${ICU_SOURCE_DIR}/common/resbund_cnv.cpp"
"${ICU_SOURCE_DIR}/common/ucurr.cpp"
"${ICU_SOURCE_DIR}/common/localebuilder.cpp"
"${ICU_SOURCE_DIR}/common/localeprioritylist.cpp"
"${ICU_SOURCE_DIR}/common/messagepattern.cpp"
"${ICU_SOURCE_DIR}/common/ucat.cpp"
"${ICU_SOURCE_DIR}/common/locmap.cpp"
"${ICU_SOURCE_DIR}/common/uloc.cpp"
"${ICU_SOURCE_DIR}/common/locid.cpp"
"${ICU_SOURCE_DIR}/common/locutil.cpp"
"${ICU_SOURCE_DIR}/common/locavailable.cpp"
"${ICU_SOURCE_DIR}/common/locdispnames.cpp"
"${ICU_SOURCE_DIR}/common/locdspnm.cpp"
"${ICU_SOURCE_DIR}/common/loclikely.cpp"
"${ICU_SOURCE_DIR}/common/locresdata.cpp"
"${ICU_SOURCE_DIR}/common/lsr.cpp"
"${ICU_SOURCE_DIR}/common/loclikelysubtags.cpp"
"${ICU_SOURCE_DIR}/common/locdistance.cpp"
"${ICU_SOURCE_DIR}/common/localematcher.cpp"
"${ICU_SOURCE_DIR}/common/bytestream.cpp"
"${ICU_SOURCE_DIR}/common/stringpiece.cpp"
"${ICU_SOURCE_DIR}/common/bytesinkutil.cpp"
"${ICU_SOURCE_DIR}/common/stringtriebuilder.cpp"
"${ICU_SOURCE_DIR}/common/bytestriebuilder.cpp"
"${ICU_SOURCE_DIR}/common/bytestrie.cpp"
"${ICU_SOURCE_DIR}/common/bytestrieiterator.cpp"
"${ICU_SOURCE_DIR}/common/ucharstrie.cpp"
"${ICU_SOURCE_DIR}/common/ucharstriebuilder.cpp"
"${ICU_SOURCE_DIR}/common/ucharstrieiterator.cpp"
"${ICU_SOURCE_DIR}/common/dictionarydata.cpp"
"${ICU_SOURCE_DIR}/common/edits.cpp"
"${ICU_SOURCE_DIR}/common/appendable.cpp"
"${ICU_SOURCE_DIR}/common/ustr_cnv.cpp"
"${ICU_SOURCE_DIR}/common/unistr_cnv.cpp"
"${ICU_SOURCE_DIR}/common/unistr.cpp"
"${ICU_SOURCE_DIR}/common/unistr_case.cpp"
"${ICU_SOURCE_DIR}/common/unistr_props.cpp"
"${ICU_SOURCE_DIR}/common/utf_impl.cpp"
"${ICU_SOURCE_DIR}/common/ustring.cpp"
"${ICU_SOURCE_DIR}/common/ustrcase.cpp"
"${ICU_SOURCE_DIR}/common/ucasemap.cpp"
"${ICU_SOURCE_DIR}/common/ucasemap_titlecase_brkiter.cpp"
"${ICU_SOURCE_DIR}/common/cstring.cpp"
"${ICU_SOURCE_DIR}/common/ustrfmt.cpp"
"${ICU_SOURCE_DIR}/common/ustrtrns.cpp"
"${ICU_SOURCE_DIR}/common/ustr_wcs.cpp"
"${ICU_SOURCE_DIR}/common/utext.cpp"
"${ICU_SOURCE_DIR}/common/unistr_case_locale.cpp"
"${ICU_SOURCE_DIR}/common/ustrcase_locale.cpp"
"${ICU_SOURCE_DIR}/common/unistr_titlecase_brkiter.cpp"
"${ICU_SOURCE_DIR}/common/ustr_titlecase_brkiter.cpp"
"${ICU_SOURCE_DIR}/common/normalizer2impl.cpp"
"${ICU_SOURCE_DIR}/common/normalizer2.cpp"
"${ICU_SOURCE_DIR}/common/filterednormalizer2.cpp"
"${ICU_SOURCE_DIR}/common/normlzr.cpp"
"${ICU_SOURCE_DIR}/common/unorm.cpp"
"${ICU_SOURCE_DIR}/common/unormcmp.cpp"
"${ICU_SOURCE_DIR}/common/loadednormalizer2impl.cpp"
"${ICU_SOURCE_DIR}/common/chariter.cpp"
"${ICU_SOURCE_DIR}/common/schriter.cpp"
"${ICU_SOURCE_DIR}/common/uchriter.cpp"
"${ICU_SOURCE_DIR}/common/uiter.cpp"
"${ICU_SOURCE_DIR}/common/patternprops.cpp"
"${ICU_SOURCE_DIR}/common/uchar.cpp"
"${ICU_SOURCE_DIR}/common/uprops.cpp"
"${ICU_SOURCE_DIR}/common/ucase.cpp"
"${ICU_SOURCE_DIR}/common/propname.cpp"
"${ICU_SOURCE_DIR}/common/ubidi_props.cpp"
"${ICU_SOURCE_DIR}/common/characterproperties.cpp"
"${ICU_SOURCE_DIR}/common/ubidi.cpp"
"${ICU_SOURCE_DIR}/common/ubidiwrt.cpp"
"${ICU_SOURCE_DIR}/common/ubidiln.cpp"
"${ICU_SOURCE_DIR}/common/ushape.cpp"
"${ICU_SOURCE_DIR}/common/uscript.cpp"
"${ICU_SOURCE_DIR}/common/uscript_props.cpp"
"${ICU_SOURCE_DIR}/common/usc_impl.cpp"
"${ICU_SOURCE_DIR}/common/unames.cpp"
"${ICU_SOURCE_DIR}/common/utrie.cpp"
"${ICU_SOURCE_DIR}/common/utrie2.cpp"
"${ICU_SOURCE_DIR}/common/utrie2_builder.cpp"
"${ICU_SOURCE_DIR}/common/ucptrie.cpp"
"${ICU_SOURCE_DIR}/common/umutablecptrie.cpp"
"${ICU_SOURCE_DIR}/common/bmpset.cpp"
"${ICU_SOURCE_DIR}/common/unisetspan.cpp"
"${ICU_SOURCE_DIR}/common/uset_props.cpp"
"${ICU_SOURCE_DIR}/common/uniset_props.cpp"
"${ICU_SOURCE_DIR}/common/uniset_closure.cpp"
"${ICU_SOURCE_DIR}/common/uset.cpp"
"${ICU_SOURCE_DIR}/common/uniset.cpp"
"${ICU_SOURCE_DIR}/common/usetiter.cpp"
"${ICU_SOURCE_DIR}/common/ruleiter.cpp"
"${ICU_SOURCE_DIR}/common/caniter.cpp"
"${ICU_SOURCE_DIR}/common/unifilt.cpp"
"${ICU_SOURCE_DIR}/common/unifunct.cpp"
"${ICU_SOURCE_DIR}/common/uarrsort.cpp"
"${ICU_SOURCE_DIR}/common/brkiter.cpp"
"${ICU_SOURCE_DIR}/common/ubrk.cpp"
"${ICU_SOURCE_DIR}/common/brkeng.cpp"
"${ICU_SOURCE_DIR}/common/brkiter.cpp"
"${ICU_SOURCE_DIR}/common/bytesinkutil.cpp"
"${ICU_SOURCE_DIR}/common/bytestream.cpp"
"${ICU_SOURCE_DIR}/common/bytestrie.cpp"
"${ICU_SOURCE_DIR}/common/bytestriebuilder.cpp"
"${ICU_SOURCE_DIR}/common/bytestrieiterator.cpp"
"${ICU_SOURCE_DIR}/common/caniter.cpp"
"${ICU_SOURCE_DIR}/common/characterproperties.cpp"
"${ICU_SOURCE_DIR}/common/chariter.cpp"
"${ICU_SOURCE_DIR}/common/charstr.cpp"
"${ICU_SOURCE_DIR}/common/cmemory.cpp"
"${ICU_SOURCE_DIR}/common/cstr.cpp"
"${ICU_SOURCE_DIR}/common/cstring.cpp"
"${ICU_SOURCE_DIR}/common/cwchar.cpp"
"${ICU_SOURCE_DIR}/common/dictbe.cpp"
"${ICU_SOURCE_DIR}/common/dictionarydata.cpp"
"${ICU_SOURCE_DIR}/common/dtintrv.cpp"
"${ICU_SOURCE_DIR}/common/edits.cpp"
"${ICU_SOURCE_DIR}/common/emojiprops.cpp"
"${ICU_SOURCE_DIR}/common/errorcode.cpp"
"${ICU_SOURCE_DIR}/common/filteredbrk.cpp"
"${ICU_SOURCE_DIR}/common/filterednormalizer2.cpp"
"${ICU_SOURCE_DIR}/common/icudataver.cpp"
"${ICU_SOURCE_DIR}/common/icuplug.cpp"
"${ICU_SOURCE_DIR}/common/loadednormalizer2impl.cpp"
"${ICU_SOURCE_DIR}/common/localebuilder.cpp"
"${ICU_SOURCE_DIR}/common/localematcher.cpp"
"${ICU_SOURCE_DIR}/common/localeprioritylist.cpp"
"${ICU_SOURCE_DIR}/common/locavailable.cpp"
"${ICU_SOURCE_DIR}/common/locbased.cpp"
"${ICU_SOURCE_DIR}/common/locdispnames.cpp"
"${ICU_SOURCE_DIR}/common/locdistance.cpp"
"${ICU_SOURCE_DIR}/common/locdspnm.cpp"
"${ICU_SOURCE_DIR}/common/locid.cpp"
"${ICU_SOURCE_DIR}/common/loclikely.cpp"
"${ICU_SOURCE_DIR}/common/loclikelysubtags.cpp"
"${ICU_SOURCE_DIR}/common/locmap.cpp"
"${ICU_SOURCE_DIR}/common/locresdata.cpp"
"${ICU_SOURCE_DIR}/common/locutil.cpp"
"${ICU_SOURCE_DIR}/common/lsr.cpp"
"${ICU_SOURCE_DIR}/common/lstmbe.cpp"
"${ICU_SOURCE_DIR}/common/messagepattern.cpp"
"${ICU_SOURCE_DIR}/common/mlbe.cpp"
"${ICU_SOURCE_DIR}/common/normalizer2.cpp"
"${ICU_SOURCE_DIR}/common/normalizer2impl.cpp"
"${ICU_SOURCE_DIR}/common/normlzr.cpp"
"${ICU_SOURCE_DIR}/common/parsepos.cpp"
"${ICU_SOURCE_DIR}/common/patternprops.cpp"
"${ICU_SOURCE_DIR}/common/pluralmap.cpp"
"${ICU_SOURCE_DIR}/common/propname.cpp"
"${ICU_SOURCE_DIR}/common/propsvec.cpp"
"${ICU_SOURCE_DIR}/common/punycode.cpp"
"${ICU_SOURCE_DIR}/common/putil.cpp"
"${ICU_SOURCE_DIR}/common/rbbi.cpp"
"${ICU_SOURCE_DIR}/common/rbbi_cache.cpp"
"${ICU_SOURCE_DIR}/common/rbbidata.cpp"
"${ICU_SOURCE_DIR}/common/rbbinode.cpp"
"${ICU_SOURCE_DIR}/common/rbbirb.cpp"
@ -178,166 +80,180 @@ set(ICUUC_SOURCES
"${ICU_SOURCE_DIR}/common/rbbisetb.cpp"
"${ICU_SOURCE_DIR}/common/rbbistbl.cpp"
"${ICU_SOURCE_DIR}/common/rbbitblb.cpp"
"${ICU_SOURCE_DIR}/common/rbbi_cache.cpp"
"${ICU_SOURCE_DIR}/common/resbund.cpp"
"${ICU_SOURCE_DIR}/common/resbund_cnv.cpp"
"${ICU_SOURCE_DIR}/common/resource.cpp"
"${ICU_SOURCE_DIR}/common/restrace.cpp"
"${ICU_SOURCE_DIR}/common/ruleiter.cpp"
"${ICU_SOURCE_DIR}/common/schriter.cpp"
"${ICU_SOURCE_DIR}/common/serv.cpp"
"${ICU_SOURCE_DIR}/common/servnotf.cpp"
"${ICU_SOURCE_DIR}/common/servls.cpp"
"${ICU_SOURCE_DIR}/common/servlk.cpp"
"${ICU_SOURCE_DIR}/common/servlkf.cpp"
"${ICU_SOURCE_DIR}/common/servls.cpp"
"${ICU_SOURCE_DIR}/common/servnotf.cpp"
"${ICU_SOURCE_DIR}/common/servrbf.cpp"
"${ICU_SOURCE_DIR}/common/servslkf.cpp"
"${ICU_SOURCE_DIR}/common/uidna.cpp"
"${ICU_SOURCE_DIR}/common/usprep.cpp"
"${ICU_SOURCE_DIR}/common/uts46.cpp"
"${ICU_SOURCE_DIR}/common/punycode.cpp"
"${ICU_SOURCE_DIR}/common/util.cpp"
"${ICU_SOURCE_DIR}/common/util_props.cpp"
"${ICU_SOURCE_DIR}/common/parsepos.cpp"
"${ICU_SOURCE_DIR}/common/locbased.cpp"
"${ICU_SOURCE_DIR}/common/cwchar.cpp"
"${ICU_SOURCE_DIR}/common/wintz.cpp"
"${ICU_SOURCE_DIR}/common/dtintrv.cpp"
"${ICU_SOURCE_DIR}/common/ucnvsel.cpp"
"${ICU_SOURCE_DIR}/common/propsvec.cpp"
"${ICU_SOURCE_DIR}/common/ulist.cpp"
"${ICU_SOURCE_DIR}/common/uloc_tag.cpp"
"${ICU_SOURCE_DIR}/common/icudataver.cpp"
"${ICU_SOURCE_DIR}/common/icuplug.cpp"
"${ICU_SOURCE_DIR}/common/sharedobject.cpp"
"${ICU_SOURCE_DIR}/common/simpleformatter.cpp"
"${ICU_SOURCE_DIR}/common/unifiedcache.cpp"
"${ICU_SOURCE_DIR}/common/uloc_keytype.cpp"
"${ICU_SOURCE_DIR}/common/ubiditransform.cpp"
"${ICU_SOURCE_DIR}/common/pluralmap.cpp"
"${ICU_SOURCE_DIR}/common/static_unicode_sets.cpp"
"${ICU_SOURCE_DIR}/common/restrace.cpp"
"${ICU_SOURCE_DIR}/common/emojiprops.cpp"
"${ICU_SOURCE_DIR}/common/lstmbe.cpp")
"${ICU_SOURCE_DIR}/common/stringpiece.cpp"
"${ICU_SOURCE_DIR}/common/stringtriebuilder.cpp"
"${ICU_SOURCE_DIR}/common/uarrsort.cpp"
"${ICU_SOURCE_DIR}/common/ubidi.cpp"
"${ICU_SOURCE_DIR}/common/ubidi_props.cpp"
"${ICU_SOURCE_DIR}/common/ubidiln.cpp"
"${ICU_SOURCE_DIR}/common/ubiditransform.cpp"
"${ICU_SOURCE_DIR}/common/ubidiwrt.cpp"
"${ICU_SOURCE_DIR}/common/ubrk.cpp"
"${ICU_SOURCE_DIR}/common/ucase.cpp"
"${ICU_SOURCE_DIR}/common/ucasemap.cpp"
"${ICU_SOURCE_DIR}/common/ucasemap_titlecase_brkiter.cpp"
"${ICU_SOURCE_DIR}/common/ucat.cpp"
"${ICU_SOURCE_DIR}/common/uchar.cpp"
"${ICU_SOURCE_DIR}/common/ucharstrie.cpp"
"${ICU_SOURCE_DIR}/common/ucharstriebuilder.cpp"
"${ICU_SOURCE_DIR}/common/ucharstrieiterator.cpp"
"${ICU_SOURCE_DIR}/common/uchriter.cpp"
"${ICU_SOURCE_DIR}/common/ucln_cmn.cpp"
"${ICU_SOURCE_DIR}/common/ucmndata.cpp"
"${ICU_SOURCE_DIR}/common/ucnv.cpp"
"${ICU_SOURCE_DIR}/common/ucnv2022.cpp"
"${ICU_SOURCE_DIR}/common/ucnv_bld.cpp"
"${ICU_SOURCE_DIR}/common/ucnv_cb.cpp"
"${ICU_SOURCE_DIR}/common/ucnv_cnv.cpp"
"${ICU_SOURCE_DIR}/common/ucnv_ct.cpp"
"${ICU_SOURCE_DIR}/common/ucnv_err.cpp"
"${ICU_SOURCE_DIR}/common/ucnv_ext.cpp"
"${ICU_SOURCE_DIR}/common/ucnv_io.cpp"
"${ICU_SOURCE_DIR}/common/ucnv_lmb.cpp"
"${ICU_SOURCE_DIR}/common/ucnv_set.cpp"
"${ICU_SOURCE_DIR}/common/ucnv_u16.cpp"
"${ICU_SOURCE_DIR}/common/ucnv_u32.cpp"
"${ICU_SOURCE_DIR}/common/ucnv_u7.cpp"
"${ICU_SOURCE_DIR}/common/ucnv_u8.cpp"
"${ICU_SOURCE_DIR}/common/ucnvbocu.cpp"
"${ICU_SOURCE_DIR}/common/ucnvdisp.cpp"
"${ICU_SOURCE_DIR}/common/ucnvhz.cpp"
"${ICU_SOURCE_DIR}/common/ucnvisci.cpp"
"${ICU_SOURCE_DIR}/common/ucnvlat1.cpp"
"${ICU_SOURCE_DIR}/common/ucnvmbcs.cpp"
"${ICU_SOURCE_DIR}/common/ucnvscsu.cpp"
"${ICU_SOURCE_DIR}/common/ucnvsel.cpp"
"${ICU_SOURCE_DIR}/common/ucol_swp.cpp"
"${ICU_SOURCE_DIR}/common/ucptrie.cpp"
"${ICU_SOURCE_DIR}/common/ucurr.cpp"
"${ICU_SOURCE_DIR}/common/udata.cpp"
"${ICU_SOURCE_DIR}/common/udatamem.cpp"
"${ICU_SOURCE_DIR}/common/udataswp.cpp"
"${ICU_SOURCE_DIR}/common/uenum.cpp"
"${ICU_SOURCE_DIR}/common/uhash.cpp"
"${ICU_SOURCE_DIR}/common/uhash_us.cpp"
"${ICU_SOURCE_DIR}/common/uidna.cpp"
"${ICU_SOURCE_DIR}/common/uinit.cpp"
"${ICU_SOURCE_DIR}/common/uinvchar.cpp"
"${ICU_SOURCE_DIR}/common/uiter.cpp"
"${ICU_SOURCE_DIR}/common/ulist.cpp"
"${ICU_SOURCE_DIR}/common/uloc.cpp"
"${ICU_SOURCE_DIR}/common/uloc_keytype.cpp"
"${ICU_SOURCE_DIR}/common/uloc_tag.cpp"
"${ICU_SOURCE_DIR}/common/ulocale.cpp"
"${ICU_SOURCE_DIR}/common/ulocbuilder.cpp"
"${ICU_SOURCE_DIR}/common/umapfile.cpp"
"${ICU_SOURCE_DIR}/common/umath.cpp"
"${ICU_SOURCE_DIR}/common/umutablecptrie.cpp"
"${ICU_SOURCE_DIR}/common/umutex.cpp"
"${ICU_SOURCE_DIR}/common/unames.cpp"
"${ICU_SOURCE_DIR}/common/unifiedcache.cpp"
"${ICU_SOURCE_DIR}/common/unifilt.cpp"
"${ICU_SOURCE_DIR}/common/unifunct.cpp"
"${ICU_SOURCE_DIR}/common/uniset.cpp"
"${ICU_SOURCE_DIR}/common/uniset_closure.cpp"
"${ICU_SOURCE_DIR}/common/uniset_props.cpp"
"${ICU_SOURCE_DIR}/common/unisetspan.cpp"
"${ICU_SOURCE_DIR}/common/unistr.cpp"
"${ICU_SOURCE_DIR}/common/unistr_case.cpp"
"${ICU_SOURCE_DIR}/common/unistr_case_locale.cpp"
"${ICU_SOURCE_DIR}/common/unistr_cnv.cpp"
"${ICU_SOURCE_DIR}/common/unistr_props.cpp"
"${ICU_SOURCE_DIR}/common/unistr_titlecase_brkiter.cpp"
"${ICU_SOURCE_DIR}/common/unorm.cpp"
"${ICU_SOURCE_DIR}/common/unormcmp.cpp"
"${ICU_SOURCE_DIR}/common/uobject.cpp"
"${ICU_SOURCE_DIR}/common/uprops.cpp"
"${ICU_SOURCE_DIR}/common/ures_cnv.cpp"
"${ICU_SOURCE_DIR}/common/uresbund.cpp"
"${ICU_SOURCE_DIR}/common/uresdata.cpp"
"${ICU_SOURCE_DIR}/common/usc_impl.cpp"
"${ICU_SOURCE_DIR}/common/uscript.cpp"
"${ICU_SOURCE_DIR}/common/uscript_props.cpp"
"${ICU_SOURCE_DIR}/common/uset.cpp"
"${ICU_SOURCE_DIR}/common/uset_props.cpp"
"${ICU_SOURCE_DIR}/common/usetiter.cpp"
"${ICU_SOURCE_DIR}/common/ushape.cpp"
"${ICU_SOURCE_DIR}/common/usprep.cpp"
"${ICU_SOURCE_DIR}/common/ustack.cpp"
"${ICU_SOURCE_DIR}/common/ustr_cnv.cpp"
"${ICU_SOURCE_DIR}/common/ustr_titlecase_brkiter.cpp"
"${ICU_SOURCE_DIR}/common/ustr_wcs.cpp"
"${ICU_SOURCE_DIR}/common/ustrcase.cpp"
"${ICU_SOURCE_DIR}/common/ustrcase_locale.cpp"
"${ICU_SOURCE_DIR}/common/ustrenum.cpp"
"${ICU_SOURCE_DIR}/common/ustrfmt.cpp"
"${ICU_SOURCE_DIR}/common/ustring.cpp"
"${ICU_SOURCE_DIR}/common/ustrtrns.cpp"
"${ICU_SOURCE_DIR}/common/utext.cpp"
"${ICU_SOURCE_DIR}/common/utf_impl.cpp"
"${ICU_SOURCE_DIR}/common/util.cpp"
"${ICU_SOURCE_DIR}/common/util_props.cpp"
"${ICU_SOURCE_DIR}/common/utrace.cpp"
"${ICU_SOURCE_DIR}/common/utrie.cpp"
"${ICU_SOURCE_DIR}/common/utrie2.cpp"
"${ICU_SOURCE_DIR}/common/utrie2_builder.cpp"
"${ICU_SOURCE_DIR}/common/utrie_swap.cpp"
"${ICU_SOURCE_DIR}/common/uts46.cpp"
"${ICU_SOURCE_DIR}/common/utypes.cpp"
"${ICU_SOURCE_DIR}/common/uvector.cpp"
"${ICU_SOURCE_DIR}/common/uvectr32.cpp"
"${ICU_SOURCE_DIR}/common/uvectr64.cpp"
"${ICU_SOURCE_DIR}/common/wintz.cpp")
set(ICUI18N_SOURCES
"${ICU_SOURCE_DIR}/i18n/ucln_in.cpp"
"${ICU_SOURCE_DIR}/i18n/fmtable.cpp"
"${ICU_SOURCE_DIR}/i18n/format.cpp"
"${ICU_SOURCE_DIR}/i18n/msgfmt.cpp"
"${ICU_SOURCE_DIR}/i18n/umsg.cpp"
"${ICU_SOURCE_DIR}/i18n/numfmt.cpp"
"${ICU_SOURCE_DIR}/i18n/unum.cpp"
"${ICU_SOURCE_DIR}/i18n/decimfmt.cpp"
"${ICU_SOURCE_DIR}/i18n/dcfmtsym.cpp"
"${ICU_SOURCE_DIR}/i18n/fmtable_cnv.cpp"
"${ICU_SOURCE_DIR}/i18n/choicfmt.cpp"
"${ICU_SOURCE_DIR}/i18n/datefmt.cpp"
"${ICU_SOURCE_DIR}/i18n/smpdtfmt.cpp"
"${ICU_SOURCE_DIR}/i18n/reldtfmt.cpp"
"${ICU_SOURCE_DIR}/i18n/dtfmtsym.cpp"
"${ICU_SOURCE_DIR}/i18n/udat.cpp"
"${ICU_SOURCE_DIR}/i18n/dtptngen.cpp"
"${ICU_SOURCE_DIR}/i18n/udatpg.cpp"
"${ICU_SOURCE_DIR}/i18n/nfrs.cpp"
"${ICU_SOURCE_DIR}/i18n/nfrule.cpp"
"${ICU_SOURCE_DIR}/i18n/nfsubs.cpp"
"${ICU_SOURCE_DIR}/i18n/rbnf.cpp"
"${ICU_SOURCE_DIR}/i18n/numsys.cpp"
"${ICU_SOURCE_DIR}/i18n/unumsys.cpp"
"${ICU_SOURCE_DIR}/i18n/ucsdet.cpp"
"${ICU_SOURCE_DIR}/i18n/ucal.cpp"
"${ICU_SOURCE_DIR}/i18n/calendar.cpp"
"${ICU_SOURCE_DIR}/i18n/gregocal.cpp"
"${ICU_SOURCE_DIR}/i18n/timezone.cpp"
"${ICU_SOURCE_DIR}/i18n/simpletz.cpp"
"${ICU_SOURCE_DIR}/i18n/olsontz.cpp"
"${ICU_SOURCE_DIR}/i18n/alphaindex.cpp"
"${ICU_SOURCE_DIR}/i18n/anytrans.cpp"
"${ICU_SOURCE_DIR}/i18n/astro.cpp"
"${ICU_SOURCE_DIR}/i18n/taiwncal.cpp"
"${ICU_SOURCE_DIR}/i18n/basictz.cpp"
"${ICU_SOURCE_DIR}/i18n/bocsu.cpp"
"${ICU_SOURCE_DIR}/i18n/brktrans.cpp"
"${ICU_SOURCE_DIR}/i18n/buddhcal.cpp"
"${ICU_SOURCE_DIR}/i18n/persncal.cpp"
"${ICU_SOURCE_DIR}/i18n/islamcal.cpp"
"${ICU_SOURCE_DIR}/i18n/japancal.cpp"
"${ICU_SOURCE_DIR}/i18n/gregoimp.cpp"
"${ICU_SOURCE_DIR}/i18n/hebrwcal.cpp"
"${ICU_SOURCE_DIR}/i18n/indiancal.cpp"
"${ICU_SOURCE_DIR}/i18n/chnsecal.cpp"
"${ICU_SOURCE_DIR}/i18n/calendar.cpp"
"${ICU_SOURCE_DIR}/i18n/casetrn.cpp"
"${ICU_SOURCE_DIR}/i18n/cecal.cpp"
"${ICU_SOURCE_DIR}/i18n/coptccal.cpp"
"${ICU_SOURCE_DIR}/i18n/dangical.cpp"
"${ICU_SOURCE_DIR}/i18n/ethpccal.cpp"
"${ICU_SOURCE_DIR}/i18n/chnsecal.cpp"
"${ICU_SOURCE_DIR}/i18n/choicfmt.cpp"
"${ICU_SOURCE_DIR}/i18n/coleitr.cpp"
"${ICU_SOURCE_DIR}/i18n/coll.cpp"
"${ICU_SOURCE_DIR}/i18n/sortkey.cpp"
"${ICU_SOURCE_DIR}/i18n/bocsu.cpp"
"${ICU_SOURCE_DIR}/i18n/ucoleitr.cpp"
"${ICU_SOURCE_DIR}/i18n/ucol.cpp"
"${ICU_SOURCE_DIR}/i18n/ucol_res.cpp"
"${ICU_SOURCE_DIR}/i18n/ucol_sit.cpp"
"${ICU_SOURCE_DIR}/i18n/collation.cpp"
"${ICU_SOURCE_DIR}/i18n/collationsettings.cpp"
"${ICU_SOURCE_DIR}/i18n/collationbuilder.cpp"
"${ICU_SOURCE_DIR}/i18n/collationcompare.cpp"
"${ICU_SOURCE_DIR}/i18n/collationdata.cpp"
"${ICU_SOURCE_DIR}/i18n/collationtailoring.cpp"
"${ICU_SOURCE_DIR}/i18n/collationdatabuilder.cpp"
"${ICU_SOURCE_DIR}/i18n/collationdatareader.cpp"
"${ICU_SOURCE_DIR}/i18n/collationdatawriter.cpp"
"${ICU_SOURCE_DIR}/i18n/collationfastlatin.cpp"
"${ICU_SOURCE_DIR}/i18n/collationfastlatinbuilder.cpp"
"${ICU_SOURCE_DIR}/i18n/collationfcd.cpp"
"${ICU_SOURCE_DIR}/i18n/collationiterator.cpp"
"${ICU_SOURCE_DIR}/i18n/utf16collationiterator.cpp"
"${ICU_SOURCE_DIR}/i18n/utf8collationiterator.cpp"
"${ICU_SOURCE_DIR}/i18n/uitercollationiterator.cpp"
"${ICU_SOURCE_DIR}/i18n/collationsets.cpp"
"${ICU_SOURCE_DIR}/i18n/collationcompare.cpp"
"${ICU_SOURCE_DIR}/i18n/collationfastlatin.cpp"
"${ICU_SOURCE_DIR}/i18n/collationkeys.cpp"
"${ICU_SOURCE_DIR}/i18n/rulebasedcollator.cpp"
"${ICU_SOURCE_DIR}/i18n/collationroot.cpp"
"${ICU_SOURCE_DIR}/i18n/collationrootelements.cpp"
"${ICU_SOURCE_DIR}/i18n/collationdatabuilder.cpp"
"${ICU_SOURCE_DIR}/i18n/collationweights.cpp"
"${ICU_SOURCE_DIR}/i18n/collationruleparser.cpp"
"${ICU_SOURCE_DIR}/i18n/collationbuilder.cpp"
"${ICU_SOURCE_DIR}/i18n/collationfastlatinbuilder.cpp"
"${ICU_SOURCE_DIR}/i18n/listformatter.cpp"
"${ICU_SOURCE_DIR}/i18n/ulistformatter.cpp"
"${ICU_SOURCE_DIR}/i18n/strmatch.cpp"
"${ICU_SOURCE_DIR}/i18n/usearch.cpp"
"${ICU_SOURCE_DIR}/i18n/search.cpp"
"${ICU_SOURCE_DIR}/i18n/stsearch.cpp"
"${ICU_SOURCE_DIR}/i18n/translit.cpp"
"${ICU_SOURCE_DIR}/i18n/utrans.cpp"
"${ICU_SOURCE_DIR}/i18n/esctrn.cpp"
"${ICU_SOURCE_DIR}/i18n/unesctrn.cpp"
"${ICU_SOURCE_DIR}/i18n/funcrepl.cpp"
"${ICU_SOURCE_DIR}/i18n/strrepl.cpp"
"${ICU_SOURCE_DIR}/i18n/tridpars.cpp"
"${ICU_SOURCE_DIR}/i18n/collationsets.cpp"
"${ICU_SOURCE_DIR}/i18n/collationsettings.cpp"
"${ICU_SOURCE_DIR}/i18n/collationtailoring.cpp"
"${ICU_SOURCE_DIR}/i18n/collationweights.cpp"
"${ICU_SOURCE_DIR}/i18n/compactdecimalformat.cpp"
"${ICU_SOURCE_DIR}/i18n/coptccal.cpp"
"${ICU_SOURCE_DIR}/i18n/cpdtrans.cpp"
"${ICU_SOURCE_DIR}/i18n/rbt.cpp"
"${ICU_SOURCE_DIR}/i18n/rbt_data.cpp"
"${ICU_SOURCE_DIR}/i18n/rbt_pars.cpp"
"${ICU_SOURCE_DIR}/i18n/rbt_rule.cpp"
"${ICU_SOURCE_DIR}/i18n/rbt_set.cpp"
"${ICU_SOURCE_DIR}/i18n/nultrans.cpp"
"${ICU_SOURCE_DIR}/i18n/remtrans.cpp"
"${ICU_SOURCE_DIR}/i18n/casetrn.cpp"
"${ICU_SOURCE_DIR}/i18n/titletrn.cpp"
"${ICU_SOURCE_DIR}/i18n/tolowtrn.cpp"
"${ICU_SOURCE_DIR}/i18n/toupptrn.cpp"
"${ICU_SOURCE_DIR}/i18n/anytrans.cpp"
"${ICU_SOURCE_DIR}/i18n/name2uni.cpp"
"${ICU_SOURCE_DIR}/i18n/uni2name.cpp"
"${ICU_SOURCE_DIR}/i18n/nortrans.cpp"
"${ICU_SOURCE_DIR}/i18n/quant.cpp"
"${ICU_SOURCE_DIR}/i18n/transreg.cpp"
"${ICU_SOURCE_DIR}/i18n/brktrans.cpp"
"${ICU_SOURCE_DIR}/i18n/regexcmp.cpp"
"${ICU_SOURCE_DIR}/i18n/rematch.cpp"
"${ICU_SOURCE_DIR}/i18n/repattrn.cpp"
"${ICU_SOURCE_DIR}/i18n/regexst.cpp"
"${ICU_SOURCE_DIR}/i18n/regextxt.cpp"
"${ICU_SOURCE_DIR}/i18n/regeximp.cpp"
"${ICU_SOURCE_DIR}/i18n/uregex.cpp"
"${ICU_SOURCE_DIR}/i18n/uregexc.cpp"
"${ICU_SOURCE_DIR}/i18n/ulocdata.cpp"
"${ICU_SOURCE_DIR}/i18n/measfmt.cpp"
"${ICU_SOURCE_DIR}/i18n/currfmt.cpp"
"${ICU_SOURCE_DIR}/i18n/curramt.cpp"
"${ICU_SOURCE_DIR}/i18n/currunit.cpp"
"${ICU_SOURCE_DIR}/i18n/measure.cpp"
"${ICU_SOURCE_DIR}/i18n/utmscale.cpp"
"${ICU_SOURCE_DIR}/i18n/csdetect.cpp"
"${ICU_SOURCE_DIR}/i18n/csmatch.cpp"
"${ICU_SOURCE_DIR}/i18n/csr2022.cpp"
@ -346,60 +262,80 @@ set(ICUI18N_SOURCES
"${ICU_SOURCE_DIR}/i18n/csrsbcs.cpp"
"${ICU_SOURCE_DIR}/i18n/csrucode.cpp"
"${ICU_SOURCE_DIR}/i18n/csrutf8.cpp"
"${ICU_SOURCE_DIR}/i18n/inputext.cpp"
"${ICU_SOURCE_DIR}/i18n/wintzimpl.cpp"
"${ICU_SOURCE_DIR}/i18n/windtfmt.cpp"
"${ICU_SOURCE_DIR}/i18n/winnmfmt.cpp"
"${ICU_SOURCE_DIR}/i18n/basictz.cpp"
"${ICU_SOURCE_DIR}/i18n/dtrule.cpp"
"${ICU_SOURCE_DIR}/i18n/rbtz.cpp"
"${ICU_SOURCE_DIR}/i18n/tzrule.cpp"
"${ICU_SOURCE_DIR}/i18n/tztrans.cpp"
"${ICU_SOURCE_DIR}/i18n/vtzone.cpp"
"${ICU_SOURCE_DIR}/i18n/zonemeta.cpp"
"${ICU_SOURCE_DIR}/i18n/standardplural.cpp"
"${ICU_SOURCE_DIR}/i18n/upluralrules.cpp"
"${ICU_SOURCE_DIR}/i18n/plurrule.cpp"
"${ICU_SOURCE_DIR}/i18n/plurfmt.cpp"
"${ICU_SOURCE_DIR}/i18n/selfmt.cpp"
"${ICU_SOURCE_DIR}/i18n/curramt.cpp"
"${ICU_SOURCE_DIR}/i18n/currfmt.cpp"
"${ICU_SOURCE_DIR}/i18n/currpinf.cpp"
"${ICU_SOURCE_DIR}/i18n/currunit.cpp"
"${ICU_SOURCE_DIR}/i18n/dangical.cpp"
"${ICU_SOURCE_DIR}/i18n/datefmt.cpp"
"${ICU_SOURCE_DIR}/i18n/dayperiodrules.cpp"
"${ICU_SOURCE_DIR}/i18n/dcfmtsym.cpp"
"${ICU_SOURCE_DIR}/i18n/decContext.cpp"
"${ICU_SOURCE_DIR}/i18n/decNumber.cpp"
"${ICU_SOURCE_DIR}/i18n/decimfmt.cpp"
"${ICU_SOURCE_DIR}/i18n/displayoptions.cpp"
"${ICU_SOURCE_DIR}/i18n/double-conversion-bignum-dtoa.cpp"
"${ICU_SOURCE_DIR}/i18n/double-conversion-bignum.cpp"
"${ICU_SOURCE_DIR}/i18n/double-conversion-cached-powers.cpp"
"${ICU_SOURCE_DIR}/i18n/double-conversion-double-to-string.cpp"
"${ICU_SOURCE_DIR}/i18n/double-conversion-fast-dtoa.cpp"
"${ICU_SOURCE_DIR}/i18n/double-conversion-string-to-double.cpp"
"${ICU_SOURCE_DIR}/i18n/double-conversion-strtod.cpp"
"${ICU_SOURCE_DIR}/i18n/dtfmtsym.cpp"
"${ICU_SOURCE_DIR}/i18n/dtitvfmt.cpp"
"${ICU_SOURCE_DIR}/i18n/dtitvinf.cpp"
"${ICU_SOURCE_DIR}/i18n/udateintervalformat.cpp"
"${ICU_SOURCE_DIR}/i18n/tmunit.cpp"
"${ICU_SOURCE_DIR}/i18n/tmutamt.cpp"
"${ICU_SOURCE_DIR}/i18n/tmutfmt.cpp"
"${ICU_SOURCE_DIR}/i18n/currpinf.cpp"
"${ICU_SOURCE_DIR}/i18n/uspoof.cpp"
"${ICU_SOURCE_DIR}/i18n/uspoof_impl.cpp"
"${ICU_SOURCE_DIR}/i18n/uspoof_build.cpp"
"${ICU_SOURCE_DIR}/i18n/uspoof_conf.cpp"
"${ICU_SOURCE_DIR}/i18n/smpdtfst.cpp"
"${ICU_SOURCE_DIR}/i18n/ztrans.cpp"
"${ICU_SOURCE_DIR}/i18n/zrule.cpp"
"${ICU_SOURCE_DIR}/i18n/vzone.cpp"
"${ICU_SOURCE_DIR}/i18n/dtptngen.cpp"
"${ICU_SOURCE_DIR}/i18n/dtrule.cpp"
"${ICU_SOURCE_DIR}/i18n/erarules.cpp"
"${ICU_SOURCE_DIR}/i18n/esctrn.cpp"
"${ICU_SOURCE_DIR}/i18n/ethpccal.cpp"
"${ICU_SOURCE_DIR}/i18n/fmtable.cpp"
"${ICU_SOURCE_DIR}/i18n/fmtable_cnv.cpp"
"${ICU_SOURCE_DIR}/i18n/format.cpp"
"${ICU_SOURCE_DIR}/i18n/formatted_string_builder.cpp"
"${ICU_SOURCE_DIR}/i18n/formattedval_iterimpl.cpp"
"${ICU_SOURCE_DIR}/i18n/formattedval_sbimpl.cpp"
"${ICU_SOURCE_DIR}/i18n/formattedvalue.cpp"
"${ICU_SOURCE_DIR}/i18n/fphdlimp.cpp"
"${ICU_SOURCE_DIR}/i18n/fpositer.cpp"
"${ICU_SOURCE_DIR}/i18n/ufieldpositer.cpp"
"${ICU_SOURCE_DIR}/i18n/decNumber.cpp"
"${ICU_SOURCE_DIR}/i18n/decContext.cpp"
"${ICU_SOURCE_DIR}/i18n/alphaindex.cpp"
"${ICU_SOURCE_DIR}/i18n/tznames.cpp"
"${ICU_SOURCE_DIR}/i18n/tznames_impl.cpp"
"${ICU_SOURCE_DIR}/i18n/tzgnames.cpp"
"${ICU_SOURCE_DIR}/i18n/tzfmt.cpp"
"${ICU_SOURCE_DIR}/i18n/compactdecimalformat.cpp"
"${ICU_SOURCE_DIR}/i18n/funcrepl.cpp"
"${ICU_SOURCE_DIR}/i18n/gender.cpp"
"${ICU_SOURCE_DIR}/i18n/region.cpp"
"${ICU_SOURCE_DIR}/i18n/scriptset.cpp"
"${ICU_SOURCE_DIR}/i18n/uregion.cpp"
"${ICU_SOURCE_DIR}/i18n/reldatefmt.cpp"
"${ICU_SOURCE_DIR}/i18n/quantityformatter.cpp"
"${ICU_SOURCE_DIR}/i18n/gregocal.cpp"
"${ICU_SOURCE_DIR}/i18n/gregoimp.cpp"
"${ICU_SOURCE_DIR}/i18n/hebrwcal.cpp"
"${ICU_SOURCE_DIR}/i18n/indiancal.cpp"
"${ICU_SOURCE_DIR}/i18n/inputext.cpp"
"${ICU_SOURCE_DIR}/i18n/islamcal.cpp"
"${ICU_SOURCE_DIR}/i18n/iso8601cal.cpp"
"${ICU_SOURCE_DIR}/i18n/japancal.cpp"
"${ICU_SOURCE_DIR}/i18n/listformatter.cpp"
"${ICU_SOURCE_DIR}/i18n/measfmt.cpp"
"${ICU_SOURCE_DIR}/i18n/measunit.cpp"
"${ICU_SOURCE_DIR}/i18n/sharedbreakiterator.cpp"
"${ICU_SOURCE_DIR}/i18n/scientificnumberformatter.cpp"
"${ICU_SOURCE_DIR}/i18n/dayperiodrules.cpp"
"${ICU_SOURCE_DIR}/i18n/measunit_extra.cpp"
"${ICU_SOURCE_DIR}/i18n/measure.cpp"
"${ICU_SOURCE_DIR}/i18n/messageformat2.cpp"
"${ICU_SOURCE_DIR}/i18n/messageformat2_arguments.cpp"
"${ICU_SOURCE_DIR}/i18n/messageformat2_checker.cpp"
"${ICU_SOURCE_DIR}/i18n/messageformat2_data_model.cpp"
"${ICU_SOURCE_DIR}/i18n/messageformat2_errors.cpp"
"${ICU_SOURCE_DIR}/i18n/messageformat2_evaluation.cpp"
"${ICU_SOURCE_DIR}/i18n/messageformat2_formattable.cpp"
"${ICU_SOURCE_DIR}/i18n/messageformat2_formatter.cpp"
"${ICU_SOURCE_DIR}/i18n/messageformat2_function_registry.cpp"
"${ICU_SOURCE_DIR}/i18n/messageformat2_parser.cpp"
"${ICU_SOURCE_DIR}/i18n/messageformat2_serializer.cpp"
"${ICU_SOURCE_DIR}/i18n/msgfmt.cpp"
"${ICU_SOURCE_DIR}/i18n/name2uni.cpp"
"${ICU_SOURCE_DIR}/i18n/nfrs.cpp"
"${ICU_SOURCE_DIR}/i18n/nfrule.cpp"
"${ICU_SOURCE_DIR}/i18n/nfsubs.cpp"
"${ICU_SOURCE_DIR}/i18n/nortrans.cpp"
"${ICU_SOURCE_DIR}/i18n/nultrans.cpp"
"${ICU_SOURCE_DIR}/i18n/number_affixutils.cpp"
"${ICU_SOURCE_DIR}/i18n/number_asformat.cpp"
"${ICU_SOURCE_DIR}/i18n/number_capi.cpp"
"${ICU_SOURCE_DIR}/i18n/number_compact.cpp"
"${ICU_SOURCE_DIR}/i18n/number_currencysymbols.cpp"
"${ICU_SOURCE_DIR}/i18n/number_decimalquantity.cpp"
"${ICU_SOURCE_DIR}/i18n/number_decimfmtprops.cpp"
"${ICU_SOURCE_DIR}/i18n/number_fluent.cpp"
@ -407,7 +343,9 @@ set(ICUI18N_SOURCES
"${ICU_SOURCE_DIR}/i18n/number_grouping.cpp"
"${ICU_SOURCE_DIR}/i18n/number_integerwidth.cpp"
"${ICU_SOURCE_DIR}/i18n/number_longnames.cpp"
"${ICU_SOURCE_DIR}/i18n/number_mapper.cpp"
"${ICU_SOURCE_DIR}/i18n/number_modifiers.cpp"
"${ICU_SOURCE_DIR}/i18n/number_multiplier.cpp"
"${ICU_SOURCE_DIR}/i18n/number_notation.cpp"
"${ICU_SOURCE_DIR}/i18n/number_output.cpp"
"${ICU_SOURCE_DIR}/i18n/number_padding.cpp"
@ -415,46 +353,125 @@ set(ICUI18N_SOURCES
"${ICU_SOURCE_DIR}/i18n/number_patternstring.cpp"
"${ICU_SOURCE_DIR}/i18n/number_rounding.cpp"
"${ICU_SOURCE_DIR}/i18n/number_scientific.cpp"
"${ICU_SOURCE_DIR}/i18n/number_utils.cpp"
"${ICU_SOURCE_DIR}/i18n/number_asformat.cpp"
"${ICU_SOURCE_DIR}/i18n/number_mapper.cpp"
"${ICU_SOURCE_DIR}/i18n/number_multiplier.cpp"
"${ICU_SOURCE_DIR}/i18n/number_currencysymbols.cpp"
"${ICU_SOURCE_DIR}/i18n/number_simple.cpp"
"${ICU_SOURCE_DIR}/i18n/number_skeletons.cpp"
"${ICU_SOURCE_DIR}/i18n/number_capi.cpp"
"${ICU_SOURCE_DIR}/i18n/double-conversion-string-to-double.cpp"
"${ICU_SOURCE_DIR}/i18n/double-conversion-double-to-string.cpp"
"${ICU_SOURCE_DIR}/i18n/double-conversion-bignum-dtoa.cpp"
"${ICU_SOURCE_DIR}/i18n/double-conversion-bignum.cpp"
"${ICU_SOURCE_DIR}/i18n/double-conversion-cached-powers.cpp"
"${ICU_SOURCE_DIR}/i18n/double-conversion-fast-dtoa.cpp"
"${ICU_SOURCE_DIR}/i18n/double-conversion-strtod.cpp"
"${ICU_SOURCE_DIR}/i18n/string_segment.cpp"
"${ICU_SOURCE_DIR}/i18n/numparse_parsednumber.cpp"
"${ICU_SOURCE_DIR}/i18n/numparse_impl.cpp"
"${ICU_SOURCE_DIR}/i18n/numparse_symbols.cpp"
"${ICU_SOURCE_DIR}/i18n/numparse_decimal.cpp"
"${ICU_SOURCE_DIR}/i18n/numparse_scientific.cpp"
"${ICU_SOURCE_DIR}/i18n/numparse_currency.cpp"
"${ICU_SOURCE_DIR}/i18n/numparse_affixes.cpp"
"${ICU_SOURCE_DIR}/i18n/numparse_compositions.cpp"
"${ICU_SOURCE_DIR}/i18n/numparse_validators.cpp"
"${ICU_SOURCE_DIR}/i18n/numrange_fluent.cpp"
"${ICU_SOURCE_DIR}/i18n/numrange_impl.cpp"
"${ICU_SOURCE_DIR}/i18n/erarules.cpp"
"${ICU_SOURCE_DIR}/i18n/formattedvalue.cpp"
"${ICU_SOURCE_DIR}/i18n/formattedval_iterimpl.cpp"
"${ICU_SOURCE_DIR}/i18n/formattedval_sbimpl.cpp"
"${ICU_SOURCE_DIR}/i18n/formatted_string_builder.cpp"
"${ICU_SOURCE_DIR}/i18n/measunit_extra.cpp"
"${ICU_SOURCE_DIR}/i18n/number_symbolswrapper.cpp"
"${ICU_SOURCE_DIR}/i18n/number_usageprefs.cpp"
"${ICU_SOURCE_DIR}/i18n/number_utils.cpp"
"${ICU_SOURCE_DIR}/i18n/numfmt.cpp"
"${ICU_SOURCE_DIR}/i18n/numparse_affixes.cpp"
"${ICU_SOURCE_DIR}/i18n/numparse_compositions.cpp"
"${ICU_SOURCE_DIR}/i18n/numparse_currency.cpp"
"${ICU_SOURCE_DIR}/i18n/numparse_decimal.cpp"
"${ICU_SOURCE_DIR}/i18n/numparse_impl.cpp"
"${ICU_SOURCE_DIR}/i18n/numparse_parsednumber.cpp"
"${ICU_SOURCE_DIR}/i18n/numparse_scientific.cpp"
"${ICU_SOURCE_DIR}/i18n/numparse_symbols.cpp"
"${ICU_SOURCE_DIR}/i18n/numparse_validators.cpp"
"${ICU_SOURCE_DIR}/i18n/numrange_capi.cpp"
"${ICU_SOURCE_DIR}/i18n/numrange_fluent.cpp"
"${ICU_SOURCE_DIR}/i18n/numrange_impl.cpp"
"${ICU_SOURCE_DIR}/i18n/numsys.cpp"
"${ICU_SOURCE_DIR}/i18n/olsontz.cpp"
"${ICU_SOURCE_DIR}/i18n/persncal.cpp"
"${ICU_SOURCE_DIR}/i18n/pluralranges.cpp"
"${ICU_SOURCE_DIR}/i18n/plurfmt.cpp"
"${ICU_SOURCE_DIR}/i18n/plurrule.cpp"
"${ICU_SOURCE_DIR}/i18n/quant.cpp"
"${ICU_SOURCE_DIR}/i18n/quantityformatter.cpp"
"${ICU_SOURCE_DIR}/i18n/rbnf.cpp"
"${ICU_SOURCE_DIR}/i18n/rbt.cpp"
"${ICU_SOURCE_DIR}/i18n/rbt_data.cpp"
"${ICU_SOURCE_DIR}/i18n/rbt_pars.cpp"
"${ICU_SOURCE_DIR}/i18n/rbt_rule.cpp"
"${ICU_SOURCE_DIR}/i18n/rbt_set.cpp"
"${ICU_SOURCE_DIR}/i18n/rbtz.cpp"
"${ICU_SOURCE_DIR}/i18n/regexcmp.cpp"
"${ICU_SOURCE_DIR}/i18n/regeximp.cpp"
"${ICU_SOURCE_DIR}/i18n/regexst.cpp"
"${ICU_SOURCE_DIR}/i18n/regextxt.cpp"
"${ICU_SOURCE_DIR}/i18n/region.cpp"
"${ICU_SOURCE_DIR}/i18n/reldatefmt.cpp"
"${ICU_SOURCE_DIR}/i18n/reldtfmt.cpp"
"${ICU_SOURCE_DIR}/i18n/rematch.cpp"
"${ICU_SOURCE_DIR}/i18n/remtrans.cpp"
"${ICU_SOURCE_DIR}/i18n/repattrn.cpp"
"${ICU_SOURCE_DIR}/i18n/rulebasedcollator.cpp"
"${ICU_SOURCE_DIR}/i18n/scientificnumberformatter.cpp"
"${ICU_SOURCE_DIR}/i18n/scriptset.cpp"
"${ICU_SOURCE_DIR}/i18n/search.cpp"
"${ICU_SOURCE_DIR}/i18n/selfmt.cpp"
"${ICU_SOURCE_DIR}/i18n/sharedbreakiterator.cpp"
"${ICU_SOURCE_DIR}/i18n/simpletz.cpp"
"${ICU_SOURCE_DIR}/i18n/smpdtfmt.cpp"
"${ICU_SOURCE_DIR}/i18n/smpdtfst.cpp"
"${ICU_SOURCE_DIR}/i18n/sortkey.cpp"
"${ICU_SOURCE_DIR}/i18n/standardplural.cpp"
"${ICU_SOURCE_DIR}/i18n/string_segment.cpp"
"${ICU_SOURCE_DIR}/i18n/strmatch.cpp"
"${ICU_SOURCE_DIR}/i18n/strrepl.cpp"
"${ICU_SOURCE_DIR}/i18n/stsearch.cpp"
"${ICU_SOURCE_DIR}/i18n/taiwncal.cpp"
"${ICU_SOURCE_DIR}/i18n/timezone.cpp"
"${ICU_SOURCE_DIR}/i18n/titletrn.cpp"
"${ICU_SOURCE_DIR}/i18n/tmunit.cpp"
"${ICU_SOURCE_DIR}/i18n/tmutamt.cpp"
"${ICU_SOURCE_DIR}/i18n/tmutfmt.cpp"
"${ICU_SOURCE_DIR}/i18n/tolowtrn.cpp"
"${ICU_SOURCE_DIR}/i18n/toupptrn.cpp"
"${ICU_SOURCE_DIR}/i18n/translit.cpp"
"${ICU_SOURCE_DIR}/i18n/transreg.cpp"
"${ICU_SOURCE_DIR}/i18n/tridpars.cpp"
"${ICU_SOURCE_DIR}/i18n/tzfmt.cpp"
"${ICU_SOURCE_DIR}/i18n/tzgnames.cpp"
"${ICU_SOURCE_DIR}/i18n/tznames.cpp"
"${ICU_SOURCE_DIR}/i18n/tznames_impl.cpp"
"${ICU_SOURCE_DIR}/i18n/tzrule.cpp"
"${ICU_SOURCE_DIR}/i18n/tztrans.cpp"
"${ICU_SOURCE_DIR}/i18n/ucal.cpp"
"${ICU_SOURCE_DIR}/i18n/ucln_in.cpp"
"${ICU_SOURCE_DIR}/i18n/ucol.cpp"
"${ICU_SOURCE_DIR}/i18n/ucol_res.cpp"
"${ICU_SOURCE_DIR}/i18n/ucol_sit.cpp"
"${ICU_SOURCE_DIR}/i18n/ucoleitr.cpp"
"${ICU_SOURCE_DIR}/i18n/ucsdet.cpp"
"${ICU_SOURCE_DIR}/i18n/udat.cpp"
"${ICU_SOURCE_DIR}/i18n/udateintervalformat.cpp"
"${ICU_SOURCE_DIR}/i18n/udatpg.cpp"
"${ICU_SOURCE_DIR}/i18n/ufieldpositer.cpp"
"${ICU_SOURCE_DIR}/i18n/uitercollationiterator.cpp"
"${ICU_SOURCE_DIR}/i18n/ulistformatter.cpp"
"${ICU_SOURCE_DIR}/i18n/ulocdata.cpp"
"${ICU_SOURCE_DIR}/i18n/umsg.cpp"
"${ICU_SOURCE_DIR}/i18n/unesctrn.cpp"
"${ICU_SOURCE_DIR}/i18n/uni2name.cpp"
"${ICU_SOURCE_DIR}/i18n/units_complexconverter.cpp"
"${ICU_SOURCE_DIR}/i18n/units_converter.cpp"
"${ICU_SOURCE_DIR}/i18n/units_data.cpp"
"${ICU_SOURCE_DIR}/i18n/units_router.cpp")
"${ICU_SOURCE_DIR}/i18n/units_router.cpp"
"${ICU_SOURCE_DIR}/i18n/unum.cpp"
"${ICU_SOURCE_DIR}/i18n/unumsys.cpp"
"${ICU_SOURCE_DIR}/i18n/upluralrules.cpp"
"${ICU_SOURCE_DIR}/i18n/uregex.cpp"
"${ICU_SOURCE_DIR}/i18n/uregexc.cpp"
"${ICU_SOURCE_DIR}/i18n/uregion.cpp"
"${ICU_SOURCE_DIR}/i18n/usearch.cpp"
"${ICU_SOURCE_DIR}/i18n/uspoof.cpp"
"${ICU_SOURCE_DIR}/i18n/uspoof_build.cpp"
"${ICU_SOURCE_DIR}/i18n/uspoof_conf.cpp"
"${ICU_SOURCE_DIR}/i18n/uspoof_impl.cpp"
"${ICU_SOURCE_DIR}/i18n/utf16collationiterator.cpp"
"${ICU_SOURCE_DIR}/i18n/utf8collationiterator.cpp"
"${ICU_SOURCE_DIR}/i18n/utmscale.cpp"
"${ICU_SOURCE_DIR}/i18n/utrans.cpp"
"${ICU_SOURCE_DIR}/i18n/vtzone.cpp"
"${ICU_SOURCE_DIR}/i18n/vzone.cpp"
"${ICU_SOURCE_DIR}/i18n/windtfmt.cpp"
"${ICU_SOURCE_DIR}/i18n/winnmfmt.cpp"
"${ICU_SOURCE_DIR}/i18n/wintzimpl.cpp"
"${ICU_SOURCE_DIR}/i18n/zonemeta.cpp"
"${ICU_SOURCE_DIR}/i18n/zrule.cpp"
"${ICU_SOURCE_DIR}/i18n/ztrans.cpp")
file(GENERATE OUTPUT "${CMAKE_CURRENT_BINARY_DIR}/empty.cpp" CONTENT " ")
enable_language(ASM)
@ -464,6 +481,11 @@ if (ARCH_S390X)
else()
set(ICUDATA_SOURCE_FILE "${ICUDATA_SOURCE_DIR}/icudt75l_dat.S" )
endif()
# ^^ you might be confused how for different little endian platforms (x86, ARM) the same assembly files can be used.
# These files are indeed assembly but they only contain data ('.long' directive), which makes them portable accross CPUs.
# Only the endianness and the character set (ASCII, EBCDIC) makes a difference, also see
# https://unicode-org.github.io/icu/userguide/icu_data/#sharing-icu-data-between-platforms, 'Sharing ICU Data Between Platforms')
# (and as an experiment, try re-generating the data files on x86 vs. ARM, ... you'll get exactly the same files)
set(ICUDATA_SOURCES
"${ICUDATA_SOURCE_FILE}"

2
contrib/libarchive vendored

@ -1 +1 @@
Subproject commit ee45796171324519f0c0bfd012018dd099296336
Subproject commit 313aa1fa10b657de791e3202c168a6c833bc3543

View File

@ -157,7 +157,7 @@ if (TARGET ch_contrib::zlib)
endif()
if (TARGET ch_contrib::zstd)
target_compile_definitions(_libarchive PUBLIC HAVE_ZSTD_H=1 HAVE_LIBZSTD=1 HAVE_LIBZSTD_COMPRESSOR=1)
target_compile_definitions(_libarchive PUBLIC HAVE_ZSTD_H=1 HAVE_LIBZSTD=1 HAVE_ZSTD_compressStream=1)
target_link_libraries(_libarchive PRIVATE ch_contrib::zstd)
endif()

View File

@ -334,13 +334,16 @@ typedef uint64_t uintmax_t;
/* #undef ARCHIVE_XATTR_LINUX */
/* Version number of bsdcpio */
#define BSDCPIO_VERSION_STRING "3.7.0"
#define BSDCPIO_VERSION_STRING "3.7.4"
/* Version number of bsdtar */
#define BSDTAR_VERSION_STRING "3.7.0"
#define BSDTAR_VERSION_STRING "3.7.4"
/* Version number of bsdcat */
#define BSDCAT_VERSION_STRING "3.7.0"
#define BSDCAT_VERSION_STRING "3.7.4"
/* Version number of bsdunzip */
#define BSDUNZIP_VERSION_STRING "3.7.4"
/* Define to 1 if you have the `acl_create_entry' function. */
/* #undef HAVE_ACL_CREATE_ENTRY */
@ -642,8 +645,8 @@ typedef uint64_t uintmax_t;
/* Define to 1 if you have the `getgrnam_r' function. */
#define HAVE_GETGRNAM_R 1
/* Define to 1 if platform uses `optreset` to reset `getopt` */
#define HAVE_GETOPT_OPTRESET 1
/* Define to 1 if you have the `getline' function. */
#define HAVE_GETLINE 1
/* Define to 1 if you have the `getpid' function. */
#define HAVE_GETPID 1
@ -750,6 +753,12 @@ typedef uint64_t uintmax_t;
/* Define to 1 if you have the `pcreposix' library (-lpcreposix). */
/* #undef HAVE_LIBPCREPOSIX */
/* Define to 1 if you have the `pcre2-8' library (-lpcre2-8). */
/* #undef HAVE_LIBPCRE2 */
/* Define to 1 if you have the `pcreposix' library (-lpcre2posix). */
/* #undef HAVE_LIBPCRE2POSIX */
/* Define to 1 if you have the `xml2' library (-lxml2). */
#define HAVE_LIBXML2 1
@ -765,9 +774,8 @@ typedef uint64_t uintmax_t;
/* Define to 1 if you have the `zstd' library (-lzstd). */
/* #undef HAVE_LIBZSTD */
/* Define to 1 if you have the `zstd' library (-lzstd) with compression
support. */
/* #undef HAVE_LIBZSTD_COMPRESSOR */
/* Define to 1 if you have the ZSTD_compressStream function. */
/* #undef HAVE_ZSTD_compressStream */
/* Define to 1 if you have the <limits.h> header file. */
#define HAVE_LIMITS_H 1
@ -923,6 +931,9 @@ typedef uint64_t uintmax_t;
/* Define to 1 if you have the <pcreposix.h> header file. */
/* #undef HAVE_PCREPOSIX_H */
/* Define to 1 if you have the <pcre2posix.h> header file. */
/* #undef HAVE_PCRE2POSIX_H */
/* Define to 1 if you have the `pipe' function. */
#define HAVE_PIPE 1
@ -1029,6 +1040,12 @@ typedef uint64_t uintmax_t;
/* Define to 1 if you have the `strrchr' function. */
#define HAVE_STRRCHR 1
/* Define to 1 if the system has the type `struct statfs'. */
/* #undef HAVE_STRUCT_STATFS */
/* Define to 1 if `f_iosize' is a member of `struct statfs'. */
/* #undef HAVE_STRUCT_STATFS_F_IOSIZE */
/* Define to 1 if `f_namemax' is a member of `struct statfs'. */
/* #undef HAVE_STRUCT_STATFS_F_NAMEMAX */
@ -1077,6 +1094,9 @@ typedef uint64_t uintmax_t;
/* Define to 1 if you have the `symlink' function. */
#define HAVE_SYMLINK 1
/* Define to 1 if you have the `sysconf' function. */
#define HAVE_SYSCONF 1
/* Define to 1 if you have the <sys/acl.h> header file. */
/* #undef HAVE_SYS_ACL_H */
@ -1276,10 +1296,10 @@ typedef uint64_t uintmax_t;
#define ICONV_CONST
/* Version number of libarchive as a single integer */
#define LIBARCHIVE_VERSION_NUMBER "3007000"
#define LIBARCHIVE_VERSION_NUMBER "3007004"
/* Version number of libarchive */
#define LIBARCHIVE_VERSION_STRING "3.7.0"
#define LIBARCHIVE_VERSION_STRING "3.7.4"
/* Define to 1 if `lstat' dereferences a symlink specified with a trailing
slash. */
@ -1333,7 +1353,7 @@ typedef uint64_t uintmax_t;
#endif /* SAFE_TO_DEFINE_EXTENSIONS */
/* Version number of package */
#define VERSION "3.7.0"
#define VERSION "3.7.4"
/* Number of bits in a file offset, on hosts where this is settable. */
/* #undef _FILE_OFFSET_BITS */

2
contrib/libfiu vendored

@ -1 +1 @@
Subproject commit b85edbde4cf974b1b40d27828a56f0505f4e2ee5
Subproject commit a1290d8cd3d7b4541d6c976e0a54f572ac03f2a3

2
contrib/libuv vendored

@ -1 +1 @@
Subproject commit 4482964660c77eec1166cd7d14fb915e3dbd774a
Subproject commit 714b58b9849568211ade86b44dd91d37f8a2175e

View File

@ -10,6 +10,7 @@ set(uv_sources
src/random.c
src/strscpy.c
src/strtok.c
src/thread-common.c
src/threadpool.c
src/timer.c
src/uv-common.c
@ -70,10 +71,7 @@ if(CMAKE_SYSTEM_NAME STREQUAL "Linux")
list(APPEND uv_defines _GNU_SOURCE _POSIX_C_SOURCE=200112)
list(APPEND uv_libraries rt)
list(APPEND uv_sources
src/unix/epoll.c
src/unix/linux-core.c
src/unix/linux-inotify.c
src/unix/linux-syscalls.c
src/unix/linux.c
src/unix/procfs-exepath.c
src/unix/random-getrandom.c
src/unix/random-sysctl-linux.c)

View File

@ -140,6 +140,12 @@ if (CMAKE_CROSSCOMPILING)
message (STATUS "CROSS COMPILING SET LLVM HOST TRIPLE ${LLVM_HOST_TRIPLE}")
endif()
# llvm-project/llvm/cmake/config-ix.cmake does a weird thing: it defines _LARGEFILE64_SOURCE,
# then checks if lseek64() function exists, then undefines _LARGEFILE64_SOURCE.
# Then the actual code that uses this function *doesn't* define _LARGEFILE64_SOURCE, so lseek64()
# may not exist and compilation fails. This happens with musl.
add_compile_definitions("_LARGEFILE64_SOURCE")
add_subdirectory ("${LLVM_SOURCE_DIR}" "${LLVM_BINARY_DIR}")
set_directory_properties (PROPERTIES

2
contrib/openssl vendored

@ -1 +1 @@
Subproject commit 66deddc1e53cda8706604a019777259372d1bd62
Subproject commit b3e62c440f390e12e77c80675f883af82ad3d5ed

2
contrib/sysroot vendored

@ -1 +1 @@
Subproject commit cc385041b226d1fc28ead14dbab5d40a5f821dd8
Subproject commit 5be834147d5b5dd77ca2b821f356982029320513

View File

@ -34,7 +34,7 @@ RUN arch=${TARGETARCH:-amd64} \
# lts / testing / prestable / etc
ARG REPO_CHANNEL="stable"
ARG REPOSITORY="https://packages.clickhouse.com/tgz/${REPO_CHANNEL}"
ARG VERSION="24.8.2.3"
ARG VERSION="24.8.4.13"
ARG PACKAGES="clickhouse-keeper"
ARG DIRECT_DOWNLOAD_URLS=""

View File

@ -32,7 +32,7 @@ RUN arch=${TARGETARCH:-amd64} \
# lts / testing / prestable / etc
ARG REPO_CHANNEL="stable"
ARG REPOSITORY="https://packages.clickhouse.com/tgz/${REPO_CHANNEL}"
ARG VERSION="24.8.2.3"
ARG VERSION="24.8.4.13"
ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static"
ARG DIRECT_DOWNLOAD_URLS=""

View File

@ -28,7 +28,7 @@ RUN sed -i "s|http://archive.ubuntu.com|${apt_archive}|g" /etc/apt/sources.list
ARG REPO_CHANNEL="stable"
ARG REPOSITORY="deb [signed-by=/usr/share/keyrings/clickhouse-keyring.gpg] https://packages.clickhouse.com/deb ${REPO_CHANNEL} main"
ARG VERSION="24.8.2.3"
ARG VERSION="24.8.4.13"
ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static"
#docker-official-library:off

View File

@ -124,6 +124,8 @@ function setup_logs_replication
check_logs_credentials || return 0
__set_connection_args
echo "My hostname is ${HOSTNAME}"
echo 'Create all configured system logs'
clickhouse-client --query "SYSTEM FLUSH LOGS"
@ -184,7 +186,17 @@ function setup_logs_replication
/^TTL /d
')
echo -e "Creating remote destination table ${table}_${hash} with statement:\n${statement}" >&2
echo -e "Creating remote destination table ${table}_${hash} with statement:" >&2
echo "::group::${table}"
# there's the only way big "$statement" can be printed without causing EAGAIN error
# cat: write error: Resource temporarily unavailable
statement_print="${statement}"
if [ "${#statement_print}" -gt 4000 ]; then
statement_print="${statement::1999}\n…\n${statement:${#statement}-1999}"
fi
echo -e "$statement_print"
echo "::endgroup::"
echo "$statement" | clickhouse-client --database_replicated_initial_query_timeout_sec=10 \
--distributed_ddl_task_timeout=30 --distributed_ddl_output_mode=throw_only_active \

View File

@ -3,6 +3,8 @@
FROM alpine:3.18
RUN apk add --no-cache -U iproute2 \
&& for bin in iptables iptables-restore iptables-save; \
&& for bin in \
iptables iptables-restore iptables-save \
ip6tables ip6tables-restore ip6tables-save; \
do ln -sf xtables-nft-multi "/sbin/$bin"; \
done

View File

@ -13,7 +13,8 @@ entry="/usr/share/clickhouse-test/performance/scripts/entrypoint.sh"
# https://www.kernel.org/doc/Documentation/filesystems/tmpfs.txt
# Double-escaped backslashes are a tribute to the engineering wonder of docker --
# it gives '/bin/sh: 1: [bash,: not found' otherwise.
numactl --hardware
echo > compare.log
numactl --hardware | tee -a compare.log
node=$(( RANDOM % $(numactl --hardware | sed -n 's/^.*available:\(.*\)nodes.*$/\1/p') ));
echo Will bind to NUMA node $node;
echo Will bind to NUMA node $node | tee -a compare.log
numactl --cpunodebind=$node --membind=$node $entry

View File

@ -0,0 +1,32 @@
---
sidebar_position: 1
sidebar_label: 2024
---
# 2024 Changelog
### ClickHouse release v24.3.10.33-lts (37b6502ebf0) FIXME as compared to v24.3.9.5-lts (a939270465e)
#### Improvement
* Backported in [#68870](https://github.com/ClickHouse/ClickHouse/issues/68870): Make allow_experimental_analyzer be controlled by the initiator for distributed queries. This ensures compatibility and correctness during operations in mixed version clusters. [#65777](https://github.com/ClickHouse/ClickHouse/pull/65777) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
* Backported in [#69095](https://github.com/ClickHouse/ClickHouse/issues/69095): Support for the Spanish language in the embedded dictionaries. [#69035](https://github.com/ClickHouse/ClickHouse/pull/69035) ([Vasily Okunev](https://github.com/VOkunev)).
#### Bug Fix (user-visible misbehavior in an official stable release)
* Backported in [#68995](https://github.com/ClickHouse/ClickHouse/issues/68995): Fix the upper bound of the function `fromModifiedJulianDay`. It was supposed to be `9999-12-31` but was mistakenly set to `9999-01-01`. [#67583](https://github.com/ClickHouse/ClickHouse/pull/67583) ([PHO](https://github.com/depressed-pho)).
* Backported in [#68844](https://github.com/ClickHouse/ClickHouse/issues/68844): Fixed crash in Parquet filtering when data types in the file substantially differ from requested types (e.g. `... FROM file('a.parquet', Parquet, 'x String')`, but the file has `x Int64`). Without this fix, use `input_format_parquet_filter_push_down = 0` as a workaround. [#68131](https://github.com/ClickHouse/ClickHouse/pull/68131) ([Michael Kolupaev](https://github.com/al13n321)).
* Backported in [#68881](https://github.com/ClickHouse/ClickHouse/issues/68881): Fixes [#50868](https://github.com/ClickHouse/ClickHouse/issues/50868). Small DateTime64 constant values returned by a nested subquery inside a distributed query were wrongly transformed to Nulls, thus causing errors and possible incorrect query results. [#68323](https://github.com/ClickHouse/ClickHouse/pull/68323) ([Shankar](https://github.com/shiyer7474)).
* Backported in [#69054](https://github.com/ClickHouse/ClickHouse/issues/69054): Added back virtual columns ` _table` and `_database` to distributed tables. They were available until version 24.3. [#68672](https://github.com/ClickHouse/ClickHouse/pull/68672) ([Anton Popov](https://github.com/CurtizJ)).
* Backported in [#68856](https://github.com/ClickHouse/ClickHouse/issues/68856): Fix possible error `Size of permutation (0) is less than required (...)` during Variant column permutation. [#68681](https://github.com/ClickHouse/ClickHouse/pull/68681) ([Kruglov Pavel](https://github.com/Avogar)).
* Backported in [#69152](https://github.com/ClickHouse/ClickHouse/issues/69152): Fix possible wrong result during anyHeavy state merge. [#68950](https://github.com/ClickHouse/ClickHouse/pull/68950) ([Raúl Marín](https://github.com/Algunenano)).
* Backported in [#69112](https://github.com/ClickHouse/ClickHouse/issues/69112): Fix logical error when we have empty async insert. [#69080](https://github.com/ClickHouse/ClickHouse/pull/69080) ([Han Fei](https://github.com/hanfei1991)).
#### NO CL CATEGORY
* Backported in [#68938](https://github.com/ClickHouse/ClickHouse/issues/68938):. [#68897](https://github.com/ClickHouse/ClickHouse/pull/68897) ([Alexander Gololobov](https://github.com/davenger)).
#### NOT FOR CHANGELOG / INSIGNIFICANT
* Backported in [#68826](https://github.com/ClickHouse/ClickHouse/issues/68826): Turn off fault injection for insert in `01396_inactive_replica_cleanup_nodes_zookeeper`. [#68715](https://github.com/ClickHouse/ClickHouse/pull/68715) ([alesapin](https://github.com/alesapin)).
* Backported in [#68754](https://github.com/ClickHouse/ClickHouse/issues/68754): To make patch release possible from every commit on release branch, package_debug build is required and must not be skipped. [#68750](https://github.com/ClickHouse/ClickHouse/pull/68750) ([Max K.](https://github.com/maxknv)).
* Backported in [#69044](https://github.com/ClickHouse/ClickHouse/issues/69044): Fix 01114_database_atomic flakiness. [#68930](https://github.com/ClickHouse/ClickHouse/pull/68930) ([Raúl Marín](https://github.com/Algunenano)).

View File

@ -0,0 +1,17 @@
---
sidebar_position: 1
sidebar_label: 2024
---
# 2024 Changelog
### ClickHouse release v24.3.11.7-lts (28795d0a47e) FIXME as compared to v24.3.10.33-lts (37b6502ebf0)
#### Bug Fix (user-visible misbehavior in an official stable release)
* Backported in [#67479](https://github.com/ClickHouse/ClickHouse/issues/67479): In rare cases ClickHouse could consider parts as broken because of some unexpected projections on disk. Now it's fixed. [#66898](https://github.com/ClickHouse/ClickHouse/pull/66898) ([alesapin](https://github.com/alesapin)).
* Backported in [#69243](https://github.com/ClickHouse/ClickHouse/issues/69243): `UNION` clause in subqueries wasn't handled correctly in queries with parallel replicas and lead to LOGICAL_ERROR `Duplicate announcement received for replica`. [#69146](https://github.com/ClickHouse/ClickHouse/pull/69146) ([Igor Nikonov](https://github.com/devcrafter)).
#### NOT FOR CHANGELOG / INSIGNIFICANT
* Backported in [#69221](https://github.com/ClickHouse/ClickHouse/issues/69221): Disable memory test with sanitizer. [#69193](https://github.com/ClickHouse/ClickHouse/pull/69193) ([alesapin](https://github.com/alesapin)).

View File

@ -0,0 +1,29 @@
---
sidebar_position: 1
sidebar_label: 2024
---
# 2024 Changelog
### ClickHouse release v24.5.7.31-stable (6c185e9aec1) FIXME as compared to v24.5.6.45-stable (bdca8604c29)
#### Bug Fix (user-visible misbehavior in an official stable release)
* Backported in [#68564](https://github.com/ClickHouse/ClickHouse/issues/68564): Fix indexHint function case found by fuzzer. [#66286](https://github.com/ClickHouse/ClickHouse/pull/66286) ([Anton Popov](https://github.com/CurtizJ)).
* Backported in [#68996](https://github.com/ClickHouse/ClickHouse/issues/68996): Fix the upper bound of the function `fromModifiedJulianDay`. It was supposed to be `9999-12-31` but was mistakenly set to `9999-01-01`. [#67583](https://github.com/ClickHouse/ClickHouse/pull/67583) ([PHO](https://github.com/depressed-pho)).
* Backported in [#68865](https://github.com/ClickHouse/ClickHouse/issues/68865): Fixed crash in Parquet filtering when data types in the file substantially differ from requested types (e.g. `... FROM file('a.parquet', Parquet, 'x String')`, but the file has `x Int64`). Without this fix, use `input_format_parquet_filter_push_down = 0` as a workaround. [#68131](https://github.com/ClickHouse/ClickHouse/pull/68131) ([Michael Kolupaev](https://github.com/al13n321)).
* Backported in [#69004](https://github.com/ClickHouse/ClickHouse/issues/69004): After https://github.com/ClickHouse/ClickHouse/pull/61984 `schema_inference_make_columns_nullable=0` still can make columns `Nullable` in Parquet/Arrow formats. The change was backward incompatible and users noticed the changes in the behaviour. This PR makes `schema_inference_make_columns_nullable=0` to work as before (no Nullable columns will be inferred) and introduces new value `auto` for this setting that will make columns `Nullable` only if data has information about nullability. [#68298](https://github.com/ClickHouse/ClickHouse/pull/68298) ([Kruglov Pavel](https://github.com/Avogar)).
* Backported in [#68882](https://github.com/ClickHouse/ClickHouse/issues/68882): Fixes [#50868](https://github.com/ClickHouse/ClickHouse/issues/50868). Small DateTime64 constant values returned by a nested subquery inside a distributed query were wrongly transformed to Nulls, thus causing errors and possible incorrect query results. [#68323](https://github.com/ClickHouse/ClickHouse/pull/68323) ([Shankar](https://github.com/shiyer7474)).
* Backported in [#69023](https://github.com/ClickHouse/ClickHouse/issues/69023): Added back virtual columns ` _table` and `_database` to distributed tables. They were available until version 24.3. [#68672](https://github.com/ClickHouse/ClickHouse/pull/68672) ([Anton Popov](https://github.com/CurtizJ)).
* Backported in [#68858](https://github.com/ClickHouse/ClickHouse/issues/68858): Fix possible error `Size of permutation (0) is less than required (...)` during Variant column permutation. [#68681](https://github.com/ClickHouse/ClickHouse/pull/68681) ([Kruglov Pavel](https://github.com/Avogar)).
* Backported in [#68784](https://github.com/ClickHouse/ClickHouse/issues/68784): Fix issue with materialized constant keys when hashing maps with arrays as keys in functions `sipHash(64/128)Keyed`. [#68731](https://github.com/ClickHouse/ClickHouse/pull/68731) ([Salvatore Mesoraca](https://github.com/aiven-sal)).
* Backported in [#69154](https://github.com/ClickHouse/ClickHouse/issues/69154): Fix possible wrong result during anyHeavy state merge. [#68950](https://github.com/ClickHouse/ClickHouse/pull/68950) ([Raúl Marín](https://github.com/Algunenano)).
#### NO CL CATEGORY
* Backported in [#68940](https://github.com/ClickHouse/ClickHouse/issues/68940):. [#68897](https://github.com/ClickHouse/ClickHouse/pull/68897) ([Alexander Gololobov](https://github.com/davenger)).
#### NOT FOR CHANGELOG / INSIGNIFICANT
* Backported in [#68828](https://github.com/ClickHouse/ClickHouse/issues/68828): Turn off fault injection for insert in `01396_inactive_replica_cleanup_nodes_zookeeper`. [#68715](https://github.com/ClickHouse/ClickHouse/pull/68715) ([alesapin](https://github.com/alesapin)).
* Backported in [#69046](https://github.com/ClickHouse/ClickHouse/issues/69046): Fix 01114_database_atomic flakiness. [#68930](https://github.com/ClickHouse/ClickHouse/pull/68930) ([Raúl Marín](https://github.com/Algunenano)).

View File

@ -0,0 +1,18 @@
---
sidebar_position: 1
sidebar_label: 2024
---
# 2024 Changelog
### ClickHouse release v24.5.8.10-stable (f11729638ea) FIXME as compared to v24.5.7.31-stable (6c185e9aec1)
#### Bug Fix (user-visible misbehavior in an official stable release)
* Backported in [#69295](https://github.com/ClickHouse/ClickHouse/issues/69295): TODO. [#68744](https://github.com/ClickHouse/ClickHouse/pull/68744) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
* Backported in [#69245](https://github.com/ClickHouse/ClickHouse/issues/69245): `UNION` clause in subqueries wasn't handled correctly in queries with parallel replicas and lead to LOGICAL_ERROR `Duplicate announcement received for replica`. [#69146](https://github.com/ClickHouse/ClickHouse/pull/69146) ([Igor Nikonov](https://github.com/devcrafter)).
* Fix crash when using `s3` table function with GLOB paths and filters. [#69176](https://github.com/ClickHouse/ClickHouse/pull/69176) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)).
#### NOT FOR CHANGELOG / INSIGNIFICANT
* Backported in [#69223](https://github.com/ClickHouse/ClickHouse/issues/69223): Disable memory test with sanitizer. [#69193](https://github.com/ClickHouse/ClickHouse/pull/69193) ([alesapin](https://github.com/alesapin)).

View File

@ -0,0 +1,29 @@
---
sidebar_position: 1
sidebar_label: 2024
---
# 2024 Changelog
### ClickHouse release v24.6.5.30-stable (e6e196c92d6) FIXME as compared to v24.6.4.42-stable (c534bb4b4dd)
#### Bug Fix (user-visible misbehavior in an official stable release)
* Backported in [#68969](https://github.com/ClickHouse/ClickHouse/issues/68969): Fix the upper bound of the function `fromModifiedJulianDay`. It was supposed to be `9999-12-31` but was mistakenly set to `9999-01-01`. [#67583](https://github.com/ClickHouse/ClickHouse/pull/67583) ([PHO](https://github.com/depressed-pho)).
* Backported in [#68814](https://github.com/ClickHouse/ClickHouse/issues/68814): Fixed crash in Parquet filtering when data types in the file substantially differ from requested types (e.g. `... FROM file('a.parquet', Parquet, 'x String')`, but the file has `x Int64`). Without this fix, use `input_format_parquet_filter_push_down = 0` as a workaround. [#68131](https://github.com/ClickHouse/ClickHouse/pull/68131) ([Michael Kolupaev](https://github.com/al13n321)).
* Backported in [#69005](https://github.com/ClickHouse/ClickHouse/issues/69005): After https://github.com/ClickHouse/ClickHouse/pull/61984 `schema_inference_make_columns_nullable=0` still can make columns `Nullable` in Parquet/Arrow formats. The change was backward incompatible and users noticed the changes in the behaviour. This PR makes `schema_inference_make_columns_nullable=0` to work as before (no Nullable columns will be inferred) and introduces new value `auto` for this setting that will make columns `Nullable` only if data has information about nullability. [#68298](https://github.com/ClickHouse/ClickHouse/pull/68298) ([Kruglov Pavel](https://github.com/Avogar)).
* Backported in [#68883](https://github.com/ClickHouse/ClickHouse/issues/68883): Fixes [#50868](https://github.com/ClickHouse/ClickHouse/issues/50868). Small DateTime64 constant values returned by a nested subquery inside a distributed query were wrongly transformed to Nulls, thus causing errors and possible incorrect query results. [#68323](https://github.com/ClickHouse/ClickHouse/pull/68323) ([Shankar](https://github.com/shiyer7474)).
* Backported in [#69025](https://github.com/ClickHouse/ClickHouse/issues/69025): Added back virtual columns ` _table` and `_database` to distributed tables. They were available until version 24.3. [#68672](https://github.com/ClickHouse/ClickHouse/pull/68672) ([Anton Popov](https://github.com/CurtizJ)).
* Backported in [#68860](https://github.com/ClickHouse/ClickHouse/issues/68860): Fix possible error `Size of permutation (0) is less than required (...)` during Variant column permutation. [#68681](https://github.com/ClickHouse/ClickHouse/pull/68681) ([Kruglov Pavel](https://github.com/Avogar)).
* Backported in [#68786](https://github.com/ClickHouse/ClickHouse/issues/68786): Fix issue with materialized constant keys when hashing maps with arrays as keys in functions `sipHash(64/128)Keyed`. [#68731](https://github.com/ClickHouse/ClickHouse/pull/68731) ([Salvatore Mesoraca](https://github.com/aiven-sal)).
* Backported in [#69156](https://github.com/ClickHouse/ClickHouse/issues/69156): Fix possible wrong result during anyHeavy state merge. [#68950](https://github.com/ClickHouse/ClickHouse/pull/68950) ([Raúl Marín](https://github.com/Algunenano)).
* Backported in [#69116](https://github.com/ClickHouse/ClickHouse/issues/69116): Fix logical error when we have empty async insert. [#69080](https://github.com/ClickHouse/ClickHouse/pull/69080) ([Han Fei](https://github.com/hanfei1991)).
#### NO CL CATEGORY
* Backported in [#68942](https://github.com/ClickHouse/ClickHouse/issues/68942):. [#68897](https://github.com/ClickHouse/ClickHouse/pull/68897) ([Alexander Gololobov](https://github.com/davenger)).
#### NOT FOR CHANGELOG / INSIGNIFICANT
* Backported in [#68830](https://github.com/ClickHouse/ClickHouse/issues/68830): Turn off fault injection for insert in `01396_inactive_replica_cleanup_nodes_zookeeper`. [#68715](https://github.com/ClickHouse/ClickHouse/pull/68715) ([alesapin](https://github.com/alesapin)).
* Backported in [#69048](https://github.com/ClickHouse/ClickHouse/issues/69048): Fix 01114_database_atomic flakiness. [#68930](https://github.com/ClickHouse/ClickHouse/pull/68930) ([Raúl Marín](https://github.com/Algunenano)).

View File

@ -0,0 +1,16 @@
---
sidebar_position: 1
sidebar_label: 2024
---
# 2024 Changelog
### ClickHouse release v24.6.6.6-stable (a4c4580e639) FIXME as compared to v24.6.5.30-stable (e6e196c92d6)
#### Bug Fix (user-visible misbehavior in an official stable release)
* Backported in [#69197](https://github.com/ClickHouse/ClickHouse/issues/69197): TODO. [#68744](https://github.com/ClickHouse/ClickHouse/pull/68744) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
#### NOT FOR CHANGELOG / INSIGNIFICANT
* Backported in [#69225](https://github.com/ClickHouse/ClickHouse/issues/69225): Disable memory test with sanitizer. [#69193](https://github.com/ClickHouse/ClickHouse/pull/69193) ([alesapin](https://github.com/alesapin)).

View File

@ -0,0 +1,17 @@
---
sidebar_position: 1
sidebar_label: 2024
---
# 2024 Changelog
### ClickHouse release v24.7.6.8-stable (7779883593a) FIXME as compared to v24.7.5.37-stable (f2533ca97be)
#### Bug Fix (user-visible misbehavior in an official stable release)
* Backported in [#69198](https://github.com/ClickHouse/ClickHouse/issues/69198): TODO. [#68744](https://github.com/ClickHouse/ClickHouse/pull/68744) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
* Backported in [#69249](https://github.com/ClickHouse/ClickHouse/issues/69249): `UNION` clause in subqueries wasn't handled correctly in queries with parallel replicas and lead to LOGICAL_ERROR `Duplicate announcement received for replica`. [#69146](https://github.com/ClickHouse/ClickHouse/pull/69146) ([Igor Nikonov](https://github.com/devcrafter)).
#### NOT FOR CHANGELOG / INSIGNIFICANT
* Backported in [#69227](https://github.com/ClickHouse/ClickHouse/issues/69227): Disable memory test with sanitizer. [#69193](https://github.com/ClickHouse/ClickHouse/pull/69193) ([alesapin](https://github.com/alesapin)).

View File

@ -0,0 +1,50 @@
---
sidebar_position: 1
sidebar_label: 2024
---
# 2024 Changelog
### ClickHouse release v24.8.3.59-lts (e729b9fa40e) FIXME as compared to v24.8.2.3-lts (b54f79ed323)
#### New Feature
* Backported in [#68710](https://github.com/ClickHouse/ClickHouse/issues/68710): Query cache entries can now be dropped by tag. For example, the query cache entry created by `SELECT 1 SETTINGS use_query_cache = true, query_cache_tag = 'abc'` can now be dropped by `SYSTEM DROP QUERY CACHE TAG 'abc'` (or of course just: `SYSTEM DROP QUERY CACHE` which will clear the entire query cache). [#68477](https://github.com/ClickHouse/ClickHouse/pull/68477) ([Michał Tabaszewski](https://github.com/pinsvin00)).
#### Improvement
* Backported in [#69097](https://github.com/ClickHouse/ClickHouse/issues/69097): Support for the Spanish language in the embedded dictionaries. [#69035](https://github.com/ClickHouse/ClickHouse/pull/69035) ([Vasily Okunev](https://github.com/VOkunev)).
#### Bug Fix (user-visible misbehavior in an official stable release)
* Backported in [#68973](https://github.com/ClickHouse/ClickHouse/issues/68973): Fix the upper bound of the function `fromModifiedJulianDay`. It was supposed to be `9999-12-31` but was mistakenly set to `9999-01-01`. [#67583](https://github.com/ClickHouse/ClickHouse/pull/67583) ([PHO](https://github.com/depressed-pho)).
* Backported in [#68818](https://github.com/ClickHouse/ClickHouse/issues/68818): Fixed crash in Parquet filtering when data types in the file substantially differ from requested types (e.g. `... FROM file('a.parquet', Parquet, 'x String')`, but the file has `x Int64`). Without this fix, use `input_format_parquet_filter_push_down = 0` as a workaround. [#68131](https://github.com/ClickHouse/ClickHouse/pull/68131) ([Michael Kolupaev](https://github.com/al13n321)).
* Backported in [#68893](https://github.com/ClickHouse/ClickHouse/issues/68893): After https://github.com/ClickHouse/ClickHouse/pull/61984 `schema_inference_make_columns_nullable=0` still can make columns `Nullable` in Parquet/Arrow formats. The change was backward incompatible and users noticed the changes in the behaviour. This PR makes `schema_inference_make_columns_nullable=0` to work as before (no Nullable columns will be inferred) and introduces new value `auto` for this setting that will make columns `Nullable` only if data has information about nullability. [#68298](https://github.com/ClickHouse/ClickHouse/pull/68298) ([Kruglov Pavel](https://github.com/Avogar)).
* Backported in [#68721](https://github.com/ClickHouse/ClickHouse/issues/68721): Fixes [#50868](https://github.com/ClickHouse/ClickHouse/issues/50868). Small DateTime64 constant values returned by a nested subquery inside a distributed query were wrongly transformed to Nulls, thus causing errors and possible incorrect query results. [#68323](https://github.com/ClickHouse/ClickHouse/pull/68323) ([Shankar](https://github.com/shiyer7474)).
* Backported in [#69029](https://github.com/ClickHouse/ClickHouse/issues/69029): Added back virtual columns ` _table` and `_database` to distributed tables. They were available until version 24.3. [#68672](https://github.com/ClickHouse/ClickHouse/pull/68672) ([Anton Popov](https://github.com/CurtizJ)).
* Backported in [#68864](https://github.com/ClickHouse/ClickHouse/issues/68864): Fix possible error `Size of permutation (0) is less than required (...)` during Variant column permutation. [#68681](https://github.com/ClickHouse/ClickHouse/pull/68681) ([Kruglov Pavel](https://github.com/Avogar)).
* Backported in [#68854](https://github.com/ClickHouse/ClickHouse/issues/68854): Fix possible error `DB::Exception: Block structure mismatch in joined block stream: different columns:` with new JSON column. [#68686](https://github.com/ClickHouse/ClickHouse/pull/68686) ([Kruglov Pavel](https://github.com/Avogar)).
* Backported in [#68790](https://github.com/ClickHouse/ClickHouse/issues/68790): Fix issue with materialized constant keys when hashing maps with arrays as keys in functions `sipHash(64/128)Keyed`. [#68731](https://github.com/ClickHouse/ClickHouse/pull/68731) ([Salvatore Mesoraca](https://github.com/aiven-sal)).
* Backported in [#69108](https://github.com/ClickHouse/ClickHouse/issues/69108): TODO. [#68744](https://github.com/ClickHouse/ClickHouse/pull/68744) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
* Backported in [#68850](https://github.com/ClickHouse/ClickHouse/issues/68850): Fix resolving dynamic subcolumns from subqueries in analyzer. [#68824](https://github.com/ClickHouse/ClickHouse/pull/68824) ([Kruglov Pavel](https://github.com/Avogar)).
* Backported in [#68911](https://github.com/ClickHouse/ClickHouse/issues/68911): Fix complex types metadata parsing in DeltaLake. Closes [#68739](https://github.com/ClickHouse/ClickHouse/issues/68739). [#68836](https://github.com/ClickHouse/ClickHouse/pull/68836) ([Kseniia Sumarokova](https://github.com/kssenii)).
* Backported in [#69160](https://github.com/ClickHouse/ClickHouse/issues/69160): Fix possible wrong result during anyHeavy state merge. [#68950](https://github.com/ClickHouse/ClickHouse/pull/68950) ([Raúl Marín](https://github.com/Algunenano)).
* Backported in [#69072](https://github.com/ClickHouse/ClickHouse/issues/69072): Fixed writing to Materialized Views with enabled setting `optimize_functions_to_subcolumns`. [#68951](https://github.com/ClickHouse/ClickHouse/pull/68951) ([Anton Popov](https://github.com/CurtizJ)).
* Backported in [#69016](https://github.com/ClickHouse/ClickHouse/issues/69016): Don't use serializations cache in const Dynamic column methods. It could let to use-of-unitialized value or even race condition during aggregations. [#68953](https://github.com/ClickHouse/ClickHouse/pull/68953) ([Kruglov Pavel](https://github.com/Avogar)).
* Backported in [#69120](https://github.com/ClickHouse/ClickHouse/issues/69120): Fix logical error when we have empty async insert. [#69080](https://github.com/ClickHouse/ClickHouse/pull/69080) ([Han Fei](https://github.com/hanfei1991)).
#### NO CL CATEGORY
* Backported in [#68947](https://github.com/ClickHouse/ClickHouse/issues/68947):. [#68897](https://github.com/ClickHouse/ClickHouse/pull/68897) ([Alexander Gololobov](https://github.com/davenger)).
#### NOT FOR CHANGELOG / INSIGNIFICANT
* Backported in [#68704](https://github.com/ClickHouse/ClickHouse/issues/68704): Fix enumerating dynamic subcolumns. [#68582](https://github.com/ClickHouse/ClickHouse/pull/68582) ([Kruglov Pavel](https://github.com/Avogar)).
* Backported in [#69000](https://github.com/ClickHouse/ClickHouse/issues/69000): Prioritizing of virtual columns in hive partitioning. [#68606](https://github.com/ClickHouse/ClickHouse/pull/68606) ([Yarik Briukhovetskyi](https://github.com/yariks5s)).
* Backported in [#68799](https://github.com/ClickHouse/ClickHouse/issues/68799): CI: Disable SQLLogic job. [#68654](https://github.com/ClickHouse/ClickHouse/pull/68654) ([Max K.](https://github.com/maxknv)).
* Backported in [#68834](https://github.com/ClickHouse/ClickHouse/issues/68834): Turn off fault injection for insert in `01396_inactive_replica_cleanup_nodes_zookeeper`. [#68715](https://github.com/ClickHouse/ClickHouse/pull/68715) ([alesapin](https://github.com/alesapin)).
* Backported in [#68781](https://github.com/ClickHouse/ClickHouse/issues/68781): Fix flaky test 00989_parallel_parts_loading. [#68737](https://github.com/ClickHouse/ClickHouse/pull/68737) ([alesapin](https://github.com/alesapin)).
* Backported in [#68762](https://github.com/ClickHouse/ClickHouse/issues/68762): To make patch release possible from every commit on release branch, package_debug build is required and must not be skipped. [#68750](https://github.com/ClickHouse/ClickHouse/pull/68750) ([Max K.](https://github.com/maxknv)).
* Backported in [#68810](https://github.com/ClickHouse/ClickHouse/issues/68810): Try to disable rerun check if job triggered manually. [#68751](https://github.com/ClickHouse/ClickHouse/pull/68751) ([Max K.](https://github.com/maxknv)).
* Backported in [#68962](https://github.com/ClickHouse/ClickHouse/issues/68962): Fix 2477 timeout. [#68752](https://github.com/ClickHouse/ClickHouse/pull/68752) ([jsc0218](https://github.com/jsc0218)).
* Backported in [#68977](https://github.com/ClickHouse/ClickHouse/issues/68977): Check setting use_json_alias_for_old_object_type in runtime. [#68793](https://github.com/ClickHouse/ClickHouse/pull/68793) ([Kruglov Pavel](https://github.com/Avogar)).
* Backported in [#68852](https://github.com/ClickHouse/ClickHouse/issues/68852): Make dynamic structure selection more consistent. [#68802](https://github.com/ClickHouse/ClickHouse/pull/68802) ([Kruglov Pavel](https://github.com/Avogar)).
* Backported in [#69052](https://github.com/ClickHouse/ClickHouse/issues/69052): Fix 01114_database_atomic flakiness. [#68930](https://github.com/ClickHouse/ClickHouse/pull/68930) ([Raúl Marín](https://github.com/Algunenano)).

View File

@ -0,0 +1,22 @@
---
sidebar_position: 1
sidebar_label: 2024
---
# 2024 Changelog
### ClickHouse release v24.8.4.13-lts (53195bc189b) FIXME as compared to v24.8.3.59-lts (e729b9fa40e)
#### Improvement
* Backported in [#68699](https://github.com/ClickHouse/ClickHouse/issues/68699): Delete old code of named collections from dictionaries and substitute it to the new, which allows to use DDL created named collections in dictionaries. Closes [#60936](https://github.com/ClickHouse/ClickHouse/issues/60936), closes [#36890](https://github.com/ClickHouse/ClickHouse/issues/36890). [#68412](https://github.com/ClickHouse/ClickHouse/pull/68412) ([Kseniia Sumarokova](https://github.com/kssenii)).
#### Bug Fix (user-visible misbehavior in an official stable release)
* Backported in [#69231](https://github.com/ClickHouse/ClickHouse/issues/69231): Fix parsing error when null should be inserted as default in some cases during JSON type parsing. [#68955](https://github.com/ClickHouse/ClickHouse/pull/68955) ([Kruglov Pavel](https://github.com/Avogar)).
* Backported in [#69251](https://github.com/ClickHouse/ClickHouse/issues/69251): `UNION` clause in subqueries wasn't handled correctly in queries with parallel replicas and lead to LOGICAL_ERROR `Duplicate announcement received for replica`. [#69146](https://github.com/ClickHouse/ClickHouse/pull/69146) ([Igor Nikonov](https://github.com/devcrafter)).
#### NOT FOR CHANGELOG / INSIGNIFICANT
* Backported in [#69189](https://github.com/ClickHouse/ClickHouse/issues/69189): Don't create Object type if use_json_alias_for_old_object_type=1 but allow_experimental_object_type=0. [#69150](https://github.com/ClickHouse/ClickHouse/pull/69150) ([Kruglov Pavel](https://github.com/Avogar)).
* Backported in [#69229](https://github.com/ClickHouse/ClickHouse/issues/69229): Disable memory test with sanitizer. [#69193](https://github.com/ClickHouse/ClickHouse/pull/69193) ([alesapin](https://github.com/alesapin)).
* Backported in [#69219](https://github.com/ClickHouse/ClickHouse/issues/69219): Disable perf-like test with sanitizers. [#69194](https://github.com/ClickHouse/ClickHouse/pull/69194) ([alesapin](https://github.com/alesapin)).

View File

@ -0,0 +1,72 @@
---
slug: /en/engines/table-engines/integrations/azure-queue
sidebar_position: 181
sidebar_label: AzureQueue
---
# AzureQueue Table Engine
This engine provides an integration with [Azure Blob Storage](https://azure.microsoft.com/en-us/products/storage/blobs) ecosystem, allowing streaming data import.
## Create Table {#creating-a-table}
``` sql
CREATE TABLE test (name String, value UInt32)
ENGINE = AzureQueue(...)
[SETTINGS]
[mode = '',]
[after_processing = 'keep',]
[keeper_path = '',]
...
```
**Engine parameters**
`AzureQueue` parameters are the same as `AzureBlobStorage` table engine supports. See parameters section [here](../../../engines/table-engines/integrations/azureBlobStorage.md).
**Example**
```sql
CREATE TABLE azure_queue_engine_table (name String, value UInt32)
ENGINE=AzureQueue('DefaultEndpointsProtocol=http;AccountName=devstoreaccount1;AccountKey=Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==;BlobEndpoint=http://azurite1:10000/devstoreaccount1/data/')
SETTINGS
mode = 'unordered'
```
## Settings {#settings}
The set of supported settings is the same as for `S3Queue` table engine, but without `s3queue_` prefix. See [full list of settings settings](../../../engines/table-engines/integrations/s3queue.md#settings).
## Description {#description}
`SELECT` is not particularly useful for streaming import (except for debugging), because each file can be imported only once. It is more practical to create real-time threads using [materialized views](../../../sql-reference/statements/create/view.md). To do this:
1. Use the engine to create a table for consuming from specified path in S3 and consider it a data stream.
2. Create a table with the desired structure.
3. Create a materialized view that converts data from the engine and puts it into a previously created table.
When the `MATERIALIZED VIEW` joins the engine, it starts collecting data in the background.
Example:
``` sql
CREATE TABLE azure_queue_engine_table (name String, value UInt32)
ENGINE=AzureQueue('<endpoint>', 'CSV', 'gzip')
SETTINGS
mode = 'unordered';
CREATE TABLE stats (name String, value UInt32)
ENGINE = MergeTree() ORDER BY name;
CREATE MATERIALIZED VIEW consumer TO stats
AS SELECT name, value FROM azure_queue_engine_table;
SELECT * FROM stats ORDER BY name;
```
## Virtual columns {#virtual-columns}
- `_path` — Path to the file.
- `_file` — Name of the file.
For more information about virtual columns see [here](../../../engines/table-engines/index.md#table_engines-virtual_columns).

View File

@ -35,7 +35,7 @@ CREATE TABLE s3_engine_table (name String, value UInt32)
[SETTINGS ...]
```
### Engine parameters
### Engine parameters {#parameters}
- `path` — Bucket url with path to file. Supports following wildcards in readonly mode: `*`, `**`, `?`, `{abc,def}` and `{N..M}` where `N`, `M` — numbers, `'abc'`, `'def'` — strings. For more information see [below](#wildcards-in-path).
- `NOSIGN` - If this keyword is provided in place of credentials, all the requests will not be signed.

View File

@ -5,6 +5,7 @@ sidebar_label: S3Queue
---
# S3Queue Table Engine
This engine provides integration with [Amazon S3](https://aws.amazon.com/s3/) ecosystem and allows streaming import. This engine is similar to the [Kafka](../../../engines/table-engines/integrations/kafka.md), [RabbitMQ](../../../engines/table-engines/integrations/rabbitmq.md) engines, but provides S3-specific features.
## Create Table {#creating-a-table}
@ -16,27 +17,25 @@ CREATE TABLE s3_queue_engine_table (name String, value UInt32)
[mode = '',]
[after_processing = 'keep',]
[keeper_path = '',]
[s3queue_loading_retries = 0,]
[s3queue_processing_threads_num = 1,]
[s3queue_enable_logging_to_s3queue_log = 0,]
[s3queue_polling_min_timeout_ms = 1000,]
[s3queue_polling_max_timeout_ms = 10000,]
[s3queue_polling_backoff_ms = 0,]
[s3queue_tracked_file_ttl_sec = 0,]
[s3queue_tracked_files_limit = 1000,]
[s3queue_cleanup_interval_min_ms = 10000,]
[s3queue_cleanup_interval_max_ms = 30000,]
[loading_retries = 0,]
[processing_threads_num = 1,]
[enable_logging_to_s3queue_log = 0,]
[polling_min_timeout_ms = 1000,]
[polling_max_timeout_ms = 10000,]
[polling_backoff_ms = 0,]
[tracked_file_ttl_sec = 0,]
[tracked_files_limit = 1000,]
[cleanup_interval_min_ms = 10000,]
[cleanup_interval_max_ms = 30000,]
```
Starting with `24.7` settings without `s3queue_` prefix are also supported.
:::warning
Before `24.7`, it is required to use `s3queue_` prefix for all settings apart from `mode`, `after_processing` and `keeper_path`.
:::
**Engine parameters**
- `path` — Bucket url with path to file. Supports following wildcards in readonly mode: `*`, `**`, `?`, `{abc,def}` and `{N..M}` where `N`, `M` — numbers, `'abc'`, `'def'` — strings. For more information see [below](#wildcards-in-path).
- `NOSIGN` - If this keyword is provided in place of credentials, all the requests will not be signed.
- `format` — The [format](../../../interfaces/formats.md#formats) of the file.
- `aws_access_key_id`, `aws_secret_access_key` - Long-term credentials for the [AWS](https://aws.amazon.com/) account user. You can use these to authenticate your requests. Parameter is optional. If credentials are not specified, they are used from the configuration file. For more information see [Using S3 for Data Storage](../mergetree-family/mergetree.md#table_engine-mergetree-s3).
- `compression` — Compression type. Supported values: `none`, `gzip/gz`, `brotli/br`, `xz/LZMA`, `zstd/zst`. Parameter is optional. By default, it will autodetect compression by file extension.
`S3Queue` parameters are the same as `S3` table engine supports. See parameters section [here](../../../engines/table-engines/integrations/s3.md#parameters).
**Example**

View File

@ -111,15 +111,16 @@ ANN indexes are built during column insertion and merge. As a result, `INSERT` a
tables. ANNIndexes are ideally used only with immutable or rarely changed data, respectively when are far more read requests than write
requests.
ANN indexes support these queries:
ANN indexes support this type of query:
``` sql
SELECT *
FROM table
[WHERE ...]
ORDER BY Distance(vectors, Point)
LIMIT N
```
``` sql
WITH [...] AS reference_vector
SELECT *
FROM table
WHERE ... -- WHERE clause is optional
ORDER BY Distance(vectors, reference_vector)
LIMIT N
```
:::tip
To avoid writing out large vectors, you can use [query

View File

@ -989,19 +989,52 @@ ALTER TABLE tab DROP STATISTICS a;
These lightweight statistics aggregate information about distribution of values in columns. Statistics are stored in every part and updated when every insert comes.
They can be used for prewhere optimization only if we enable `set allow_statistics_optimize = 1`.
#### Available Types of Column Statistics {#available-types-of-column-statistics}
### Available Types of Column Statistics {#available-types-of-column-statistics}
- `MinMax`
The minimum and maximum column value which allows to estimate the selectivity of range filters on numeric columns.
Syntax: `minmax`
- `TDigest`
[TDigest](https://github.com/tdunning/t-digest) sketches which allow to compute approximate percentiles (e.g. the 90th percentile) for numeric columns.
Syntax: `tdigest`
- `Uniq`
[HyperLogLog](https://en.wikipedia.org/wiki/HyperLogLog) sketches which provide an estimation how many distinct values a column contains.
- `count_min`
Syntax: `uniq`
- `CountMin`
[CountMin](https://en.wikipedia.org/wiki/Count%E2%80%93min_sketch) sketches which provide an approximate count of the frequency of each value in a column.
Syntax `countmin`
### Supported Data Types {#supported-data-types}
| | (U)Int*, Float*, Decimal(*), Date*, Boolean, Enum* | String or FixedString |
|-----------|----------------------------------------------------|-----------------------|
| CountMin | ✔ | ✔ |
| MinMax | ✔ | ✗ |
| TDigest | ✔ | ✗ |
| Uniq | ✔ | ✔ |
### Supported Operations {#supported-operations}
| | Equality filters (==) | Range filters (>, >=, <, <=) |
|-----------|-----------------------|------------------------------|
| CountMin | ✔ | ✗ |
| MinMax | ✗ | ✔ |
| TDigest | ✗ | ✔ |
| Uniq | ✔ | ✗ |
[Count-min](https://en.wikipedia.org/wiki/Count%E2%80%93min_sketch) sketches which provide an approximate count of the frequency of each value in a column.
## Column-level Settings {#column-level-settings}

View File

@ -39,6 +39,7 @@ The supported formats are:
| [JSONCompact](#jsoncompact) | ✔ | ✔ |
| [JSONCompactStrings](#jsoncompactstrings) | ✗ | ✔ |
| [JSONCompactColumns](#jsoncompactcolumns) | ✔ | ✔ |
| [JSONCompactWithProgress](#jsoncompactwithprogress) | ✗ | ✔ |
| [JSONEachRow](#jsoneachrow) | ✔ | ✔ |
| [PrettyJSONEachRow](#prettyjsoneachrow) | ✗ | ✔ |
| [JSONEachRowWithProgress](#jsoneachrowwithprogress) | ✗ | ✔ |
@ -988,6 +989,59 @@ Example:
Columns that are not present in the block will be filled with default values (you can use [input_format_defaults_for_omitted_fields](/docs/en/operations/settings/settings-formats.md/#input_format_defaults_for_omitted_fields) setting here)
## JSONCompactWithProgress (#jsoncompactwithprogress)
In this format, ClickHouse outputs each row as a separated, newline-delimited JSON Object.
Each row is either a metadata object, data object, progress information or statistics object:
1. **Metadata Object (`meta`)**
- Describes the structure of the data rows.
- Fields: `name` (column name), `type` (data type, e.g., `UInt32`, `String`, etc.).
- Example: `{"meta": [{"name":"id", "type":"UInt32"}, {"name":"name", "type":"String"}]}`
- Appears before any data objects.
2. **Data Object (`data`)**
- Represents a row of query results.
- Fields: An array with values corresponding to the columns defined in the metadata.
- Example: `{"data":["1", "John Doe"]}`
- Appears after the metadata object, one per row.
3. **Progress Information Object (`progress`)**
- Provides real-time progress feedback during query execution.
- Fields: `read_rows`, `read_bytes`, `written_rows`, `written_bytes`, `total_rows_to_read`, `result_rows`, `result_bytes`, `elapsed_ns`.
- Example: `{"progress":{"read_rows":"8","read_bytes":"168"}}`
- May appear intermittently.
4. **Statistics Object (`statistics`)**
- Summarizes query execution statistics.
- Fields: `rows`, `rows_before_limit_at_least`, `elapsed`, `rows_read`, `bytes_read`.
- Example: `{"statistics": {"rows":2, "elapsed":0.001995, "rows_read":8}}`
- Appears at the end.
5. **Exception Object (`exception`)**
- Represents an error that occurred during query execution.
- Fields: A single text field containing the error message.
- Example: `{"exception": "Code: 395. DB::Exception: Value passed to 'throwIf' function is non-zero..."}`
- Appears when an error is encountered.
6. **Totals Object (`totals`)**
- Provides the totals for each numeric column in the result set.
- Fields: An array with total values corresponding to the columns defined in the metadata.
- Example: `{"totals": ["", "3"]}`
- Appears at the end of the data rows, if applicable.
Example:
```json
{"meta": [{"name":"id", "type":"UInt32"}, {"name":"name", "type":"String"}]}
{"progress":{"read_rows":"8","read_bytes":"168","written_rows":"0","written_bytes":"0","total_rows_to_read":"2","result_rows":"0","result_bytes":"0","elapsed_ns":"0"}}
{"data":["1", "John Doe"]}
{"data":["2", "Joe Doe"]}
{"statistics": {"rows":2, "rows_before_limit_at_least":8, "elapsed":0.001995, "rows_read":8, "bytes_read":168}}
```
## JSONEachRow {#jsoneachrow}
In this format, ClickHouse outputs each row as a separated, newline-delimited JSON Object.

View File

@ -58,7 +58,7 @@ Connection: Close
Content-Type: text/tab-separated-values; charset=UTF-8
X-ClickHouse-Server-Display-Name: clickhouse.ru-central1.internal
X-ClickHouse-Query-Id: 5abe861c-239c-467f-b955-8a201abb8b7f
X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0","elapsed_ns":"662334"}
X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0","elapsed_ns":"662334", "real_time_microseconds": "0"}
1
```
@ -472,7 +472,7 @@ $ curl -v 'http://localhost:8123/predefined_query'
< X-ClickHouse-Format: Template
< X-ClickHouse-Timezone: Asia/Shanghai
< Keep-Alive: timeout=10
< X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0","elapsed_ns":"662334"}
< X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0","elapsed_ns":"662334", "real_time_microseconds":"0"}
<
# HELP "Query" "Number of executing queries"
# TYPE "Query" counter
@ -668,7 +668,7 @@ $ curl -vv -H 'XXX:xxx' 'http://localhost:8123/hi'
< Content-Type: text/html; charset=UTF-8
< Transfer-Encoding: chunked
< Keep-Alive: timeout=10
< X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0","elapsed_ns":"662334"}
< X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0","elapsed_ns":"662334", "real_time_microseconds":"0"}
<
* Connection #0 to host localhost left intact
Say Hi!%
@ -708,7 +708,7 @@ $ curl -v -H 'XXX:xxx' 'http://localhost:8123/get_config_static_handler'
< Content-Type: text/plain; charset=UTF-8
< Transfer-Encoding: chunked
< Keep-Alive: timeout=10
< X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0","elapsed_ns":"662334"}
< X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0","elapsed_ns":"662334", "real_time_microseconds":"0"}
<
* Connection #0 to host localhost left intact
<html ng-app="SMI2"><head><base href="http://ui.tabix.io/"></head><body><div ui-view="" class="content-ui"></div><script src="http://loader.tabix.io/master.js"></script></body></html>%
@ -766,7 +766,7 @@ $ curl -vv -H 'XXX:xxx' 'http://localhost:8123/get_absolute_path_static_handler'
< Content-Type: text/html; charset=UTF-8
< Transfer-Encoding: chunked
< Keep-Alive: timeout=10
< X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0","elapsed_ns":"662334"}
< X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0","elapsed_ns":"662334", "real_time_microseconds":"0"}
<
<html><body>Absolute Path File</body></html>
* Connection #0 to host localhost left intact
@ -785,7 +785,7 @@ $ curl -vv -H 'XXX:xxx' 'http://localhost:8123/get_relative_path_static_handler'
< Content-Type: text/html; charset=UTF-8
< Transfer-Encoding: chunked
< Keep-Alive: timeout=10
< X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0","elapsed_ns":"662334"}
< X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0","elapsed_ns":"662334", "real_time_microseconds":"0"}
<
<html><body>Relative Path File</body></html>
* Connection #0 to host localhost left intact

View File

@ -233,6 +233,16 @@ Features:
- Useful tools: Zookeeper data exploration, query EXPLAIN, kill queries, etc.
- Visualization metric charts: queries and resource usage, number of merges/mutation, merge performance, query performance, etc.
### CKibana {#ckibana}
[CKibana](https://github.com/TongchengOpenSource/ckibana) is a lightweight service that allows you to effortlessly search, explore, and visualize ClickHouse data using the native Kibana UI.
Features:
- Translates chart requests from the native Kibana UI into ClickHouse query syntax.
- Supports advanced features such as sampling and caching to enhance query performance.
- Minimizes the learning cost for users after migrating from ElasticSearch to ClickHouse.
## Commercial {#commercial}
### DataGrip {#datagrip}

View File

@ -6,7 +6,7 @@ import SelfManaged from '@site/docs/en/_snippets/_self_managed_only_no_roadmap.m
<SelfManaged />
[SSL 'strict' option](../server-configuration-parameters/settings.md#server_configuration_parameters-openssl) enables mandatory certificate validation for the incoming connections. In this case, only connections with trusted certificates can be established. Connections with untrusted certificates will be rejected. Thus, certificate validation allows to uniquely authenticate an incoming connection. `Common Name` or `subjectAltName extension` field of the certificate is used to identify the connected user. This allows to associate multiple certificates with the same user. Additionally, reissuing and revoking of the certificates does not affect the ClickHouse configuration.
[SSL 'strict' option](../server-configuration-parameters/settings.md#server_configuration_parameters-openssl) enables mandatory certificate validation for the incoming connections. In this case, only connections with trusted certificates can be established. Connections with untrusted certificates will be rejected. Thus, certificate validation allows to uniquely authenticate an incoming connection. `Common Name` or `subjectAltName extension` field of the certificate is used to identify the connected user. `subjectAltName extension` supports the usage of one wildcard '*' in the server configuration. This allows to associate multiple certificates with the same user. Additionally, reissuing and revoking of the certificates does not affect the ClickHouse configuration.
To enable SSL certificate authentication, a list of `Common Name`'s or `Subject Alt Name`'s for each ClickHouse user must be specified in the settings file `users.xml `:
@ -30,6 +30,12 @@ To enable SSL certificate authentication, a list of `Common Name`'s or `Subject
</ssl_certificates>
<!-- Other settings -->
</user_name_2>
<user_name_3>
<ssl_certificates>
<!-- Wildcard support -->
<subject_alt_name>URI:spiffe://foo.com/*/bar</subject_alt_name>
</ssl_certificates>
</user_name_3>
</users>
</clickhouse>
```

View File

@ -1463,26 +1463,29 @@ Examples:
## logger {#logger}
Logging settings.
The location and format of log messages.
Keys:
- `level` Logging level. Acceptable values: `trace`, `debug`, `information`, `warning`, `error`.
- `log` The log file. Contains all the entries according to `level`.
- `errorlog` Error log file.
- `size` Size of the file. Applies to `log` and `errorlog`. Once the file reaches `size`, ClickHouse archives and renames it, and creates a new log file in its place.
- `count` The number of archived log files that ClickHouse stores.
- `console` Send `log` and `errorlog` to the console instead of file. To enable, set to `1` or `true`.
- `console_log_level` Logging level for console. Default to `level`.
- `use_syslog` - Log to syslog as well.
- `syslog_level` - Logging level for logging to syslog.
- `stream_compress` Compress `log` and `errorlog` with `lz4` stream compression. To enable, set to `1` or `true`.
- `formatting` Specify log format to be printed in console log (currently only `json` supported).
- `level` Log level. Acceptable values: `none` (turn logging off), `fatal`, `critical`, `error`, `warning`, `notice`, `information`,
`debug`, `trace`, `test`
- `log` The path to the log file.
- `errorlog` The path to the error log file.
- `size` Rotation policy: Maximum size of the log files in bytes. Once the log file size exceeds this threshold, it is renamed and archived, and a new log file is created.
- `count` Rotation policy: How many historical log files Clickhouse are kept at most.
- `stream_compress` Compress log messages using LZ4. Set to `1` or `true` to enable.
- `console` Do not write log messages to log files, instead print them in the console. Set to `1` or `true` to enable. Default is
`1` if Clickhouse does not run in daemon mode, `0` otherwise.
- `console_log_level` Log level for console output. Defaults to `level`.
- `formatting` Log format for console output. Currently, only `json` is supported).
- `use_syslog` - Also forward log output to syslog.
- `syslog_level` - Log level for logging to syslog.
Both log and error log file names (only file names, not directories) support date and time format specifiers.
**Log format specifiers**
**Format specifiers**
Using the following format specifiers, you can define a pattern for the resulting file name. “Example” column shows possible results for `2023-07-06 18:32:07`.
File names in `log` and `errorLog` paths support below format specifiers for the resulting file name (the directory part does not support them).
Column “Example” shows the output at `2023-07-06 18:32:07`.
| Specifier | Description | Example |
|-------------|---------------------------------------------------------------------------------------------------------------------|--------------------------|
@ -1537,18 +1540,37 @@ Using the following format specifiers, you can define a pattern for the resultin
</logger>
```
Writing to the console can be configured. Config example:
To print log messages only in the console:
``` xml
<logger>
<level>information</level>
<console>1</console>
<console>true</console>
</logger>
```
**Per-level Overrides**
The log level of individual log names can be overridden. For example, to mute all messages of loggers "Backup" and "RBAC".
```xml
<logger>
<levels>
<logger>
<name>Backup</name>
<level>none</level>
</logger>
<logger>
<name>RBAC</name>
<level>none</level>
</logger>
</levels>
</logger>
```
### syslog
Writing to the syslog is also supported. Config example:
To write log messages additionally to syslog:
``` xml
<logger>
@ -1562,14 +1584,12 @@ Writing to the syslog is also supported. Config example:
</logger>
```
Keys for syslog:
Keys for `<syslog>`:
- use_syslog — Required setting if you want to write to the syslog.
- address — The host\[:port\] of syslogd. If omitted, the local daemon is used.
- hostname — Optional. The name of the host that logs are sent from.
- facility — [The syslog facility keyword](https://en.wikipedia.org/wiki/Syslog#Facility) in uppercase letters with the “LOG_” prefix: (`LOG_USER`, `LOG_DAEMON`, `LOG_LOCAL3`, and so on).
Default value: `LOG_USER` if `address` is specified, `LOG_DAEMON` otherwise.
- format Message format. Possible values: `bsd` and `syslog.`
- `address` — The address of syslog in format `host\[:port\]`. If omitted, the local daemon is used.
- `hostname` — The name of the host from which logs are send. Optional.
- `facility` — The syslog [facility keyword](https://en.wikipedia.org/wiki/Syslog#Facility). Must be specified uppercase with a “LOG_” prefix, e.g. `LOG_USER`, `LOG_DAEMON`, `LOG_LOCAL3`, etc. Default value: `LOG_USER` if `address` is specified, `LOG_DAEMON` otherwise.
- `format` Log message format. Possible values: `bsd` and `syslog.`
### Log formats
@ -1588,6 +1608,7 @@ You can specify the log format that will be outputted in the console log. Curren
"source_line": "192"
}
```
To enable JSON logging support, use the following snippet:
```xml

View File

@ -3226,7 +3226,7 @@ Default value: `0`.
## lightweight_deletes_sync {#lightweight_deletes_sync}
The same as 'mutation_sync', but controls only execution of lightweight deletes.
The same as [`mutations_sync`](#mutations_sync), but controls only execution of lightweight deletes.
Possible values:

View File

@ -499,7 +499,7 @@ Required parameters:
- `type``encrypted`. Otherwise the encrypted disk is not created.
- `disk` — Type of disk for data storage.
- `key` — The key for encryption and decryption. Type: [Uint64](/docs/en/sql-reference/data-types/int-uint.md). You can use `key_hex` parameter to encode the key in hexadecimal form.
You can specify multiple keys using the `id` attribute (see example above).
You can specify multiple keys using the `id` attribute (see example below).
Optional parameters:

View File

@ -47,6 +47,8 @@ keeper foo bar
- `ls '[path]'` -- Lists the nodes for the given path (default: cwd)
- `cd '[path]'` -- Changes the working path (default `.`)
- `cp '<src>' '<dest>'` -- Copies 'src' node to 'dest' path
- `mv '<src>' '<dest>'` -- Moves 'src' node to the 'dest' path
- `exists '<path>'` -- Returns `1` if node exists, `0` otherwise
- `set '<path>' <value> [version]` -- Updates the node's value. Only updates if version matches (default: -1)
- `create '<path>' <value> [mode]` -- Creates new node with the set value

View File

@ -104,7 +104,7 @@ Events that occur at the same second may lay in the sequence in an undefined ord
**Parameters**
- `pattern` — Pattern string. See [Pattern syntax](#sequencematch).
- `pattern` — Pattern string. See [Pattern syntax](#pattern-syntax).
**Returned values**
@ -113,8 +113,7 @@ Events that occur at the same second may lay in the sequence in an undefined ord
Type: `UInt8`.
<a name="sequence-function-pattern-syntax"></a>
**Pattern syntax**
#### Pattern syntax
- `(?N)` — Matches the condition argument at position `N`. Conditions are numbered in the `[1, 32]` range. For example, `(?1)` matches the argument passed to the `cond1` parameter.
@ -196,7 +195,7 @@ sequenceCount(pattern)(timestamp, cond1, cond2, ...)
**Parameters**
- `pattern` — Pattern string. See [Pattern syntax](#sequencematch).
- `pattern` — Pattern string. See [Pattern syntax](#pattern-syntax).
**Returned values**

View File

@ -0,0 +1,44 @@
---
slug: /en/sql-reference/aggregate-functions/reference/distinctdynamictypes
sidebar_position: 215
---
# distinctDynamicTypes
Calculates the list of distinct data types stored in [Dynamic](../../data-types/dynamic.md) column.
**Syntax**
```sql
distinctDynamicTypes(dynamic)
```
**Arguments**
- `dynamic` — [Dynamic](../../data-types/dynamic.md) column.
**Returned Value**
- The sorted list of data type names [Array(String)](../../data-types/array.md).
**Example**
Query:
```sql
DROP TABLE IF EXISTS test_dynamic;
CREATE TABLE test_dynamic(d Dynamic) ENGINE = Memory;
INSERT INTO test_dynamic VALUES (42), (NULL), ('Hello'), ([1, 2, 3]), ('2020-01-01'), (map(1, 2)), (43), ([4, 5]), (NULL), ('World'), (map(3, 4))
```
```sql
SELECT distinctDynamicTypes(d) FROM test_dynamic;
```
Result:
```reference
┌─distinctDynamicTypes(d)──────────────────────────────────────┐
│ ['Array(Int64)','Date','Int64','Map(UInt8, UInt8)','String'] │
└──────────────────────────────────────────────────────────────┘
```

View File

@ -0,0 +1,125 @@
---
slug: /en/sql-reference/aggregate-functions/reference/distinctjsonpaths
sidebar_position: 216
---
# distinctJSONPaths
Calculates the list of distinct paths stored in [JSON](../../data-types/newjson.md) column.
**Syntax**
```sql
distinctJSONPaths(json)
```
**Arguments**
- `json` — [JSON](../../data-types/newjson.md) column.
**Returned Value**
- The sorted list of paths [Array(String)](../../data-types/array.md).
**Example**
Query:
```sql
DROP TABLE IF EXISTS test_json;
CREATE TABLE test_json(json JSON) ENGINE = Memory;
INSERT INTO test_json VALUES ('{"a" : 42, "b" : "Hello"}'), ('{"b" : [1, 2, 3], "c" : {"d" : {"e" : "2020-01-01"}}}'), ('{"a" : 43, "c" : {"d" : {"f" : [{"g" : 42}]}}}')
```
```sql
SELECT distinctJSONPaths(json) FROM test_json;
```
Result:
```reference
┌─distinctJSONPaths(json)───┐
│ ['a','b','c.d.e','c.d.f'] │
└───────────────────────────┘
```
# distinctJSONPathsAndTypes
Calculates the list of distinct paths and their types stored in [JSON](../../data-types/newjson.md) column.
**Syntax**
```sql
distinctJSONPathsAndTypes(json)
```
**Arguments**
- `json` — [JSON](../../data-types/newjson.md) column.
**Returned Value**
- The sorted map of paths and types [Map(String, Array(String))](../../data-types/map.md).
**Example**
Query:
```sql
DROP TABLE IF EXISTS test_json;
CREATE TABLE test_json(json JSON) ENGINE = Memory;
INSERT INTO test_json VALUES ('{"a" : 42, "b" : "Hello"}'), ('{"b" : [1, 2, 3], "c" : {"d" : {"e" : "2020-01-01"}}}'), ('{"a" : 43, "c" : {"d" : {"f" : [{"g" : 42}]}}}')
```
```sql
SELECT distinctJSONPathsAndTypes(json) FROM test_json;
```
Result:
```reference
┌─distinctJSONPathsAndTypes(json)───────────────────────────────────────────────────────────────────────────────────────────────────────────────┐
│ {'a':['Int64'],'b':['Array(Nullable(Int64))','String'],'c.d.e':['Date'],'c.d.f':['Array(JSON(max_dynamic_types=16, max_dynamic_paths=256))']} │
└───────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┘
```
**Note**
If JSON declaration contains paths with specified types, these paths will be always included in the result of `distinctJSONPaths/distinctJSONPathsAndTypes` functions even if input data didn't have values for these paths.
```sql
DROP TABLE IF EXISTS test_json;
CREATE TABLE test_json(json JSON(a UInt32)) ENGINE = Memory;
INSERT INTO test_json VALUES ('{"b" : "Hello"}'), ('{"b" : "World", "c" : [1, 2, 3]}');
```
```sql
SELECT json FROM test_json;
```
```text
┌─json──────────────────────────────────┐
│ {"a":0,"b":"Hello"} │
│ {"a":0,"b":"World","c":["1","2","3"]} │
└───────────────────────────────────────┘
```
```sql
SELECT distinctJSONPaths(json) FROM test_json;
```
```text
┌─distinctJSONPaths(json)─┐
│ ['a','b','c'] │
└─────────────────────────┘
```
```sql
SELECT distinctJSONPathsAndTypes(json) FROM test_json;
```
```text
┌─distinctJSONPathsAndTypes(json)────────────────────────────────┐
│ {'a':['UInt32'],'b':['String'],'c':['Array(Nullable(Int64))']} │
└────────────────────────────────────────────────────────────────┘
```

View File

@ -453,8 +453,8 @@ As we can see, after inserting paths `e` and `f.g` the limit was reached and we
### During merges of data parts in MergeTree table engines
During merge of several data parts in MergeTree table the `JSON` column in the resulting data part can reach the limit of dynamic paths won't be able to store all paths from source parts as subcolumns.
In this case ClickHouse chooses what paths will remain as subcolumns after merge and what types will be stored in the shared data structure. In most cases ClickHouse tries to keep paths that contains
During merge of several data parts in MergeTree table the `JSON` column in the resulting data part can reach the limit of dynamic paths and won't be able to store all paths from source parts as subcolumns.
In this case ClickHouse chooses what paths will remain as subcolumns after merge and what paths will be stored in the shared data structure. In most cases ClickHouse tries to keep paths that contain
the largest number of non-null values and move the rarest paths to the shared data structure, but it depends on the implementation.
Let's see an example of such merge. First, let's create a table with `JSON` column, set the limit of dynamic paths to `3` and insert values with `5` different paths:
@ -505,7 +505,130 @@ As we can see, ClickHouse kept the most frequent paths `a`, `b` and `c` and move
## Introspection functions
There are several functions that can help to inspect the content of the JSON column: [JSONAllPaths](../functions/json-functions.md#jsonallpaths), [JSONAllPathsWithTypes](../functions/json-functions.md#jsonallpathswithtypes), [JSONDynamicPaths](../functions/json-functions.md#jsondynamicpaths), [JSONDynamicPathsWithTypes](../functions/json-functions.md#jsondynamicpathswithtypes), [JSONSharedDataPaths](../functions/json-functions.md#jsonshareddatapaths), [JSONSharedDataPathsWithTypes](../functions/json-functions.md#jsonshareddatapathswithtypes).
There are several functions that can help to inspect the content of the JSON column: [JSONAllPaths](../functions/json-functions.md#jsonallpaths), [JSONAllPathsWithTypes](../functions/json-functions.md#jsonallpathswithtypes), [JSONDynamicPaths](../functions/json-functions.md#jsondynamicpaths), [JSONDynamicPathsWithTypes](../functions/json-functions.md#jsondynamicpathswithtypes), [JSONSharedDataPaths](../functions/json-functions.md#jsonshareddatapaths), [JSONSharedDataPathsWithTypes](../functions/json-functions.md#jsonshareddatapathswithtypes), [distinctDynamicTypes](../aggregate-functions/reference/distinctdynamictypes.md), [distinctJSONPaths and distinctJSONPathsAndTypes](../aggregate-functions/reference/distinctjsonpaths.md)
**Examples**
Let's investigate the content of [GH Archive](https://www.gharchive.org/) dataset for `2020-01-01` date:
```sql
SELECT arrayJoin(distinctJSONPaths(json)) FROM s3('s3://clickhouse-public-datasets/gharchive/original/2020-01-01-*.json.gz', JSONAsObject)
```
```text
┌─arrayJoin(distinctJSONPaths(json))─────────────────────────┐
│ actor.avatar_url │
│ actor.display_login │
│ actor.gravatar_id │
│ actor.id │
│ actor.login │
│ actor.url │
│ created_at │
│ id │
│ org.avatar_url │
│ org.gravatar_id │
│ org.id │
│ org.login │
│ org.url │
│ payload.action │
│ payload.before │
│ payload.comment._links.html.href │
│ payload.comment._links.pull_request.href │
│ payload.comment._links.self.href │
│ payload.comment.author_association │
│ payload.comment.body │
│ payload.comment.commit_id │
│ payload.comment.created_at │
│ payload.comment.diff_hunk │
│ payload.comment.html_url │
│ payload.comment.id │
│ payload.comment.in_reply_to_id │
│ payload.comment.issue_url │
│ payload.comment.line │
│ payload.comment.node_id │
│ payload.comment.original_commit_id │
│ payload.comment.original_position │
│ payload.comment.path │
│ payload.comment.position │
│ payload.comment.pull_request_review_id │
...
│ payload.release.node_id │
│ payload.release.prerelease │
│ payload.release.published_at │
│ payload.release.tag_name │
│ payload.release.tarball_url │
│ payload.release.target_commitish │
│ payload.release.upload_url │
│ payload.release.url │
│ payload.release.zipball_url │
│ payload.size │
│ public │
│ repo.id │
│ repo.name │
│ repo.url │
│ type │
└─arrayJoin(distinctJSONPaths(json))─────────────────────────┘
```
```sql
SELECT arrayJoin(distinctJSONPathsAndTypes(json)) FROM s3('s3://clickhouse-public-datasets/gharchive/original/2020-01-01-*.json.gz', JSONAsObject) SETTINGS date_time_input_format='best_effort'
```
```text
┌─arrayJoin(distinctJSONPathsAndTypes(json))──────────────────┐
│ ('actor.avatar_url',['String']) │
│ ('actor.display_login',['String']) │
│ ('actor.gravatar_id',['String']) │
│ ('actor.id',['Int64']) │
│ ('actor.login',['String']) │
│ ('actor.url',['String']) │
│ ('created_at',['DateTime']) │
│ ('id',['String']) │
│ ('org.avatar_url',['String']) │
│ ('org.gravatar_id',['String']) │
│ ('org.id',['Int64']) │
│ ('org.login',['String']) │
│ ('org.url',['String']) │
│ ('payload.action',['String']) │
│ ('payload.before',['String']) │
│ ('payload.comment._links.html.href',['String']) │
│ ('payload.comment._links.pull_request.href',['String']) │
│ ('payload.comment._links.self.href',['String']) │
│ ('payload.comment.author_association',['String']) │
│ ('payload.comment.body',['String']) │
│ ('payload.comment.commit_id',['String']) │
│ ('payload.comment.created_at',['DateTime']) │
│ ('payload.comment.diff_hunk',['String']) │
│ ('payload.comment.html_url',['String']) │
│ ('payload.comment.id',['Int64']) │
│ ('payload.comment.in_reply_to_id',['Int64']) │
│ ('payload.comment.issue_url',['String']) │
│ ('payload.comment.line',['Int64']) │
│ ('payload.comment.node_id',['String']) │
│ ('payload.comment.original_commit_id',['String']) │
│ ('payload.comment.original_position',['Int64']) │
│ ('payload.comment.path',['String']) │
│ ('payload.comment.position',['Int64']) │
│ ('payload.comment.pull_request_review_id',['Int64']) │
...
│ ('payload.release.node_id',['String']) │
│ ('payload.release.prerelease',['Bool']) │
│ ('payload.release.published_at',['DateTime']) │
│ ('payload.release.tag_name',['String']) │
│ ('payload.release.tarball_url',['String']) │
│ ('payload.release.target_commitish',['String']) │
│ ('payload.release.upload_url',['String']) │
│ ('payload.release.url',['String']) │
│ ('payload.release.zipball_url',['String']) │
│ ('payload.size',['Int64']) │
│ ('public',['Bool']) │
│ ('repo.id',['Int64']) │
│ ('repo.name',['String']) │
│ ('repo.url',['String']) │
│ ('type',['String']) │
└─arrayJoin(distinctJSONPathsAndTypes(json))──────────────────┘
```
## Tips for better usage of the JSON type

View File

@ -2035,6 +2035,7 @@ Query:
SELECT arrayZip(['a', 'b', 'c'], [5, 2, 1]);
```
Result:
``` text
@ -2043,6 +2044,43 @@ Result:
└──────────────────────────────────────┘
```
## arrayZipUnaligned
Combines multiple arrays into a single array, allowing for unaligned arrays. The resulting array contains the corresponding elements of the source arrays grouped into tuples in the listed order of arguments.
**Syntax**
``` sql
arrayZipUnaligned(arr1, arr2, ..., arrN)
```
**Arguments**
- `arrN` — [Array](../data-types/array.md).
The function can take any number of arrays of different types.
**Returned value**
- Array with elements from the source arrays grouped into [tuples](../data-types/tuple.md). Data types in the tuple are the same as types of the input arrays and in the same order as arrays are passed. [Array](../data-types/array.md). If the arrays have different sizes, the shorter arrays will be padded with `null` values.
**Example**
Query:
``` sql
SELECT arrayZipUnaligned(['a'], [1, 2, 3]);
```
Result:
``` text
┌─arrayZipUnaligned(['a'], [1, 2, 3])─┐
│ [('a',1),(NULL,2),(NULL,3)] │
└─────────────────────────────────────┘
```
## arrayAUC
Calculate AUC (Area Under the Curve, which is a concept in machine learning, see more details: <https://en.wikipedia.org/wiki/Receiver_operating_characteristic#Area_under_the_curve>).

View File

@ -1617,45 +1617,348 @@ The calculation is performed relative to specific points in time:
If unit `WEEK` was specified, `toStartOfInterval` assumes that weeks start on Monday. Note that this behavior is different from that of function `toStartOfWeek` in which weeks start by default on Sunday.
**See Also**
**Syntax**
```sql
toStartOfInterval(value, INTERVAL x unit[, time_zone])
toStartOfInterval(value, INTERVAL x unit[, origin[, time_zone]])
```
The second overload emulates TimescaleDB's `time_bucket()` function, respectively PostgreSQL's `date_bin()` function, e.g.
``` SQL
SELECT toStartOfInterval(toDateTime('2023-01-01 14:45:00'), INTERVAL 1 MINUTE, toDateTime('2023-01-01 14:35:30'));
```
**See Also**
- [date_trunc](#date_trunc)
## toTime
Converts a date with time to a certain fixed date, while preserving the time.
**Syntax**
```sql
toTime(date[,timezone])
```
**Arguments**
- `date` — Date to convert to a time. [Date](../data-types/date.md)/[DateTime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md).
- `timezone` (optional) — Timezone for the returned value. [String](../data-types/string.md).
**Returned value**
- DateTime with date equated to `1970-01-02` while preserving the time. [DateTime](../data-types/datetime.md).
:::note
If the `date` input argument contained sub-second components,
they will be dropped in the returned `DateTime` value with second-accuracy.
:::
**Example**
Query:
```sql
SELECT toTime(toDateTime64('1970-12-10 01:20:30.3000',3)) AS result, toTypeName(result);
```
Result:
```response
┌──────────────result─┬─toTypeName(result)─┐
│ 1970-01-02 01:20:30 │ DateTime │
└─────────────────────┴────────────────────┘
```
## toRelativeYearNum
Converts a date, or date with time, to the number of the year, starting from a certain fixed point in the past.
Converts a date, or date with time, to the number of years elapsed since a certain fixed point in the past.
**Syntax**
```sql
toRelativeYearNum(date)
```
**Arguments**
- `date` — Date or date with time. [Date](../data-types/date.md)/[DateTime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md).
**Returned value**
- The number of years from a fixed reference point in the past. [UInt16](../data-types/int-uint.md).
**Example**
Query:
```sql
SELECT
toRelativeYearNum(toDate('2002-12-08')) AS y1,
toRelativeYearNum(toDate('2010-10-26')) AS y2
```
Result:
```response
┌───y1─┬───y2─┐
│ 2002 │ 2010 │
└──────┴──────┘
```
## toRelativeQuarterNum
Converts a date, or date with time, to the number of the quarter, starting from a certain fixed point in the past.
Converts a date, or date with time, to the number of quarters elapsed since a certain fixed point in the past.
**Syntax**
```sql
toRelativeQuarterNum(date)
```
**Arguments**
- `date` — Date or date with time. [Date](../data-types/date.md)/[DateTime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md).
**Returned value**
- The number of quarters from a fixed reference point in the past. [UInt32](../data-types/int-uint.md).
**Example**
Query:
```sql
SELECT
toRelativeQuarterNum(toDate('1993-11-25')) AS q1,
toRelativeQuarterNum(toDate('2005-01-05')) AS q2
```
Result:
```response
┌───q1─┬───q2─┐
│ 7975 │ 8020 │
└──────┴──────┘
```
## toRelativeMonthNum
Converts a date, or date with time, to the number of the month, starting from a certain fixed point in the past.
Converts a date, or date with time, to the number of months elapsed since a certain fixed point in the past.
**Syntax**
```sql
toRelativeMonthNum(date)
```
**Arguments**
- `date` — Date or date with time. [Date](../data-types/date.md)/[DateTime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md).
**Returned value**
- The number of months from a fixed reference point in the past. [UInt32](../data-types/int-uint.md).
**Example**
Query:
```sql
SELECT
toRelativeMonthNum(toDate('2001-04-25')) AS m1,
toRelativeMonthNum(toDate('2009-07-08')) AS m2
```
Result:
```response
┌────m1─┬────m2─┐
│ 24016 │ 24115 │
└───────┴───────┘
```
## toRelativeWeekNum
Converts a date, or date with time, to the number of the week, starting from a certain fixed point in the past.
Converts a date, or date with time, to the number of weeks elapsed since a certain fixed point in the past.
**Syntax**
```sql
toRelativeWeekNum(date)
```
**Arguments**
- `date` — Date or date with time. [Date](../data-types/date.md)/[DateTime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md).
**Returned value**
- The number of weeks from a fixed reference point in the past. [UInt32](../data-types/int-uint.md).
**Example**
Query:
```sql
SELECT
toRelativeWeekNum(toDate('2000-02-29')) AS w1,
toRelativeWeekNum(toDate('2001-01-12')) AS w2
```
Result:
```response
┌───w1─┬───w2─┐
│ 1574 │ 1619 │
└──────┴──────┘
```
## toRelativeDayNum
Converts a date, or date with time, to the number of the day, starting from a certain fixed point in the past.
Converts a date, or date with time, to the number of days elapsed since a certain fixed point in the past.
**Syntax**
```sql
toRelativeDayNum(date)
```
**Arguments**
- `date` — Date or date with time. [Date](../data-types/date.md)/[DateTime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md).
**Returned value**
- The number of days from a fixed reference point in the past. [UInt32](../data-types/int-uint.md).
**Example**
Query:
```sql
SELECT
toRelativeDayNum(toDate('1993-10-05')) AS d1,
toRelativeDayNum(toDate('2000-09-20')) AS d2
```
Result:
```response
┌───d1─┬────d2─┐
│ 8678 │ 11220 │
└──────┴───────┘
```
## toRelativeHourNum
Converts a date, or date with time, to the number of the hour, starting from a certain fixed point in the past.
Converts a date, or date with time, to the number of hours elapsed since a certain fixed point in the past.
**Syntax**
```sql
toRelativeHourNum(date)
```
**Arguments**
- `date` — Date or date with time. [Date](../data-types/date.md)/[DateTime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md).
**Returned value**
- The number of hours from a fixed reference point in the past. [UInt32](../data-types/int-uint.md).
**Example**
Query:
```sql
SELECT
toRelativeHourNum(toDateTime('1993-10-05 05:20:36')) AS h1,
toRelativeHourNum(toDateTime('2000-09-20 14:11:29')) AS h2
```
Result:
```response
┌─────h1─┬─────h2─┐
│ 208276 │ 269292 │
└────────┴────────┘
```
## toRelativeMinuteNum
Converts a date, or date with time, to the number of the minute, starting from a certain fixed point in the past.
Converts a date, or date with time, to the number of minutes elapsed since a certain fixed point in the past.
**Syntax**
```sql
toRelativeMinuteNum(date)
```
**Arguments**
- `date` — Date or date with time. [Date](../data-types/date.md)/[DateTime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md).
**Returned value**
- The number of minutes from a fixed reference point in the past. [UInt32](../data-types/int-uint.md).
**Example**
Query:
```sql
SELECT
toRelativeMinuteNum(toDateTime('1993-10-05 05:20:36')) AS m1,
toRelativeMinuteNum(toDateTime('2000-09-20 14:11:29')) AS m2
```
Result:
```response
┌───────m1─┬───────m2─┐
│ 12496580 │ 16157531 │
└──────────┴──────────┘
```
## toRelativeSecondNum
Converts a date, or date with time, to the number of the second, starting from a certain fixed point in the past.
Converts a date, or date with time, to the number of the seconds elapsed since a certain fixed point in the past.
**Syntax**
```sql
toRelativeSecondNum(date)
```
**Arguments**
- `date` — Date or date with time. [Date](../data-types/date.md)/[DateTime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md).
**Returned value**
- The number of seconds from a fixed reference point in the past. [UInt32](../data-types/int-uint.md).
**Example**
Query:
```sql
SELECT
toRelativeSecondNum(toDateTime('1993-10-05 05:20:36')) AS s1,
toRelativeSecondNum(toDateTime('2000-09-20 14:11:29')) AS s2
```
Result:
```response
┌────────s1─┬────────s2─┐
│ 749794836 │ 969451889 │
└───────────┴───────────┘
```
## toISOYear
@ -2019,7 +2322,7 @@ Alias: `dateTrunc`.
`unit` argument is case-insensitive.
- `value` — Date and time. [DateTime](../data-types/datetime.md) or [DateTime64](../data-types/datetime64.md).
- `value` — Date and time. [Date](../data-types/date.md), [Date32](../data-types/date32.md), [DateTime](../data-types/datetime.md) or [DateTime64](../data-types/datetime64.md).
- `timezone` — [Timezone name](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-timezone) for the returned value (optional). If not specified, the function uses the timezone of the `value` parameter. [String](../data-types/string.md).
**Returned value**
@ -3884,19 +4187,29 @@ Result:
└───────────────────────────────────────────────────────────────────────┘
```
## timeSlots(StartTime, Duration,\[, Size\])
## timeSlots
For a time interval starting at StartTime and continuing for Duration seconds, it returns an array of moments in time, consisting of points from this interval rounded down to the Size in seconds. Size is an optional parameter set to 1800 (30 minutes) by default.
This is necessary, for example, when searching for pageviews in the corresponding session.
Accepts DateTime and DateTime64 as StartTime argument. For DateTime, Duration and Size arguments must be `UInt32`. For DateTime64 they must be `Decimal64`.
Returns an array of DateTime/DateTime64 (return type matches the type of StartTime). For DateTime64, the return value's scale can differ from the scale of StartTime --- the highest scale among all given arguments is taken.
Example:
**Syntax**
```sql
timeSlots(StartTime, Duration,\[, Size\])
```
**Example**
```sql
SELECT timeSlots(toDateTime('2012-01-01 12:20:00'), toUInt32(600));
SELECT timeSlots(toDateTime('1980-12-12 21:01:02', 'UTC'), toUInt32(600), 299);
SELECT timeSlots(toDateTime64('1980-12-12 21:01:02.1234', 4, 'UTC'), toDecimal64(600.1, 1), toDecimal64(299, 0));
```
Result:
``` text
┌─timeSlots(toDateTime('2012-01-01 12:20:00'), toUInt32(600))─┐
│ ['2012-01-01 12:00:00','2012-01-01 12:30:00'] │

View File

@ -20,10 +20,10 @@ overlay(s, replace, offset[, length])
**Parameters**
- `input`: A string type [String](../data-types/string.md).
- `s`: A string type [String](../data-types/string.md).
- `replace`: A string type [String](../data-types/string.md).
- `offset`: An integer type [Int](../data-types/int-uint.md). If `offset` is negative, it is counted from the end of the `input` string.
- `length`: Optional. An integer type [Int](../data-types/int-uint.md). `length` specifies the length of the snippet within input to be replaced. If `length` is not specified, the number of bytes removed from `input` equals the length of `replace`; otherwise `length` bytes are removed.
- `offset`: An integer type [Int](../data-types/int-uint.md) (1-based). If `offset` is negative, it is counted from the end of the string `s`.
- `length`: Optional. An integer type [Int](../data-types/int-uint.md). `length` specifies the length of the snippet within the input string `s` to be replaced. If `length` is not specified, the number of bytes removed from `s` equals the length of `replace`; otherwise `length` bytes are removed.
**Returned value**
@ -32,22 +32,35 @@ overlay(s, replace, offset[, length])
**Example**
```sql
SELECT overlay('ClickHouse SQL', 'CORE', 12) AS res;
SELECT overlay('My father is from Mexico.', 'mother', 4) AS res;
```
Result:
```text
┌─res─────────────┐
│ ClickHouse CORE │
└─────────────────┘
┌─res──────────────────────┐
│ My mother is from Mexico.│
└──────────────────────────┘
```
```sql
SELECT overlay('My father is from Mexico.', 'dad', 4, 6) AS res;
```
Result:
```text
┌─res───────────────────┐
│ My dad is from Mexico.│
└───────────────────────┘
```
## overlayUTF8
Replace part of the string `input` with another string `replace`, starting at the 1-based index `offset`.
Assumes that the string contains valid UTF-8 encoded text. If this assumption is violated, no exception is thrown and the result is undefined.
Assumes that the string contains valid UTF-8 encoded text.
If this assumption is violated, no exception is thrown and the result is undefined.
**Syntax**
@ -59,8 +72,8 @@ overlayUTF8(s, replace, offset[, length])
- `s`: A string type [String](../data-types/string.md).
- `replace`: A string type [String](../data-types/string.md).
- `offset`: An integer type [Int](../data-types/int-uint.md). If `offset` is negative, it is counted from the end of the `input` string.
- `length`: Optional. An integer type [Int](../data-types/int-uint.md). `length` specifies the length of the snippet within input to be replaced. If `length` is not specified, the number of characters removed from `input` equals the length of `replace`; otherwise `length` characters are removed.
- `offset`: An integer type [Int](../data-types/int-uint.md) (1-based). If `offset` is negative, it is counted from the end of the input string `s`.
- `length`: Optional. An integer type [Int](../data-types/int-uint.md). `length` specifies the length of the snippet within the input string `s` to be replaced. If `length` is not specified, the number of characters removed from `s` equals the length of `replace`; otherwise `length` characters are removed.
**Returned value**
@ -69,15 +82,15 @@ overlayUTF8(s, replace, offset[, length])
**Example**
```sql
SELECT overlayUTF8('ClickHouse是一款OLAP数据库', '开源', 12, 2) AS res;
SELECT overlay('Mein Vater ist aus Österreich.', 'der Türkei', 20) AS res;
```
Result:
```text
┌─res────────────────────────┐
ClickHouse是开源OLAP数据库
└────────────────────────────┘
┌─res───────────────────────────
Mein Vater ist aus der Türkei.
└───────────────────────────────
```
## replaceOne

View File

@ -49,6 +49,55 @@ SETTINGS cast_keep_nullable = 1
└──────────────────┴─────────────────────┴──────────────────┘
```
## toBool
Converts an input value to a value of type [`Bool`](../data-types/boolean.md). Throws an exception in case of an error.
**Syntax**
```sql
toBool(expr)
```
**Arguments**
- `expr` — Expression returning a number or a string. [Expression](../syntax.md/#syntax-expressions).
Supported arguments:
- Values of type (U)Int8/16/32/64/128/256.
- Values of type Float32/64.
- Strings `true` or `false` (case-insensitive).
**Returned value**
- Returns `true` or `false` based on evaluation of the argument. [Bool](../data-types/boolean.md).
**Example**
Query:
```sql
SELECT
toBool(toUInt8(1)),
toBool(toInt8(-1)),
toBool(toFloat32(1.01)),
toBool('true'),
toBool('false'),
toBool('FALSE')
FORMAT Vertical
```
Result:
```response
toBool(toUInt8(1)): true
toBool(toInt8(-1)): true
toBool(toFloat32(1.01)): true
toBool('true'): true
toBool('false'): false
toBool('FALSE'): false
```
## toInt8
Converts an input value to a value of type [`Int8`](../data-types/int-uint.md). Throws an exception in case of an error.
@ -3857,7 +3906,7 @@ Result:
## toDateTime64
Converts the argument to the [DateTime64](../data-types/datetime64.md) data type.
Converts an input value to a value of type [DateTime64](../data-types/datetime64.md).
**Syntax**
@ -3869,7 +3918,7 @@ toDateTime64(expr, scale, [timezone])
- `expr` — The value. [String](../data-types/string.md), [UInt32](../data-types/int-uint.md), [Float](../data-types/float.md) or [DateTime](../data-types/datetime.md).
- `scale` - Tick size (precision): 10<sup>-precision</sup> seconds. Valid range: [ 0 : 9 ].
- `timezone` - Time zone of the specified datetime64 object.
- `timezone` (optional) - Time zone of the specified datetime64 object.
**Returned value**
@ -3928,10 +3977,137 @@ SELECT toDateTime64('2019-01-01 00:00:00', 3, 'Asia/Istanbul') AS value, toTypeN
## toDateTime64OrZero
Like [toDateTime64](#todatetime64), this function converts an input value to a value of type [DateTime64](../data-types/datetime64.md) but returns the min value of [DateTime64](../data-types/datetime64.md) if an invalid argument is received.
**Syntax**
``` sql
toDateTime64OrZero(expr, scale, [timezone])
```
**Arguments**
- `expr` — The value. [String](../data-types/string.md), [UInt32](../data-types/int-uint.md), [Float](../data-types/float.md) or [DateTime](../data-types/datetime.md).
- `scale` - Tick size (precision): 10<sup>-precision</sup> seconds. Valid range: [ 0 : 9 ].
- `timezone` (optional) - Time zone of the specified DateTime64 object.
**Returned value**
- A calendar date and time of day, with sub-second precision, otherwise the minimum value of `DateTime64`: `1970-01-01 01:00:00.000`. [DateTime64](../data-types/datetime64.md).
**Example**
Query:
```sql
SELECT toDateTime64OrZero('2008-10-12 00:00:00 00:30:30', 3) AS invalid_arg
```
Result:
```response
┌─────────────invalid_arg─┐
│ 1970-01-01 01:00:00.000 │
└─────────────────────────┘
```
**See also**
- [toDateTime64](#todatetime64).
- [toDateTime64OrNull](#todatetime64ornull).
- [toDateTime64OrDefault](#todatetime64ordefault).
## toDateTime64OrNull
Like [toDateTime64](#todatetime64), this function converts an input value to a value of type [DateTime64](../data-types/datetime64.md) but returns `NULL` if an invalid argument is received.
**Syntax**
``` sql
toDateTime64OrNull(expr, scale, [timezone])
```
**Arguments**
- `expr` — The value. [String](../data-types/string.md), [UInt32](../data-types/int-uint.md), [Float](../data-types/float.md) or [DateTime](../data-types/datetime.md).
- `scale` - Tick size (precision): 10<sup>-precision</sup> seconds. Valid range: [ 0 : 9 ].
- `timezone` (optional) - Time zone of the specified DateTime64 object.
**Returned value**
- A calendar date and time of day, with sub-second precision, otherwise `NULL`. [DateTime64](../data-types/datetime64.md)/[NULL](../data-types/nullable.md).
**Example**
Query:
```sql
SELECT
toDateTime64OrNull('1976-10-18 00:00:00.30', 3) AS valid_arg,
toDateTime64OrNull('1976-10-18 00:00:00 30', 3) AS invalid_arg
```
Result:
```response
┌───────────────valid_arg─┬─invalid_arg─┐
│ 1976-10-18 00:00:00.300 │ ᴺᵁᴸᴸ │
└─────────────────────────┴─────────────┘
```
**See also**
- [toDateTime64](#todatetime64).
- [toDateTime64OrZero](#todatetime64orzero).
- [toDateTime64OrDefault](#todatetime64ordefault).
## toDateTime64OrDefault
Like [toDateTime64](#todatetime64), this function converts an input value to a value of type [DateTime64](../data-types/datetime64.md),
but returns either the default value of [DateTime64](../data-types/datetime64.md)
or the provided default if an invalid argument is received.
**Syntax**
``` sql
toDateTime64OrNull(expr, scale, [timezone, default])
```
**Arguments**
- `expr` — The value. [String](../data-types/string.md), [UInt32](../data-types/int-uint.md), [Float](../data-types/float.md) or [DateTime](../data-types/datetime.md).
- `scale` - Tick size (precision): 10<sup>-precision</sup> seconds. Valid range: [ 0 : 9 ].
- `timezone` (optional) - Time zone of the specified DateTime64 object.
- `default` (optional) - Default value to return if an invalid argument is received. [DateTime64](../data-types/datetime64.md).
**Returned value**
- A calendar date and time of day, with sub-second precision, otherwise the minimum value of `DateTime64` or the `default` value if provided. [DateTime64](../data-types/datetime64.md).
**Example**
Query:
```sql
SELECT
toDateTime64OrDefault('1976-10-18 00:00:00 30', 3) AS invalid_arg,
toDateTime64OrDefault('1976-10-18 00:00:00 30', 3, 'UTC', toDateTime64('2001-01-01 00:00:00.00',3)) AS invalid_arg_with_default
```
Result:
```response
┌─────────────invalid_arg─┬─invalid_arg_with_default─┐
│ 1970-01-01 01:00:00.000 │ 2000-12-31 23:00:00.000 │
└─────────────────────────┴──────────────────────────┘
```
**See also**
- [toDateTime64](#todatetime64).
- [toDateTime64OrZero](#todatetime64orzero).
- [toDateTime64OrNull](#todatetime64ornull).
## toDecimal32
Converts an input value to a value of type [`Decimal(9, S)`](../data-types/decimal.md) with scale of `S`. Throws an exception in case of an error.

View File

@ -265,8 +265,6 @@ SELECT now() AS current_date_time, current_date_time + INTERVAL '4' day + INTERV
└─────────────────────┴────────────────────────────────────────────────────────────┘
```
You can work with dates without using `INTERVAL`, just by adding or subtracting seconds, minutes, and hours. For example, an interval of one day can be set by adding `60*60*24`.
:::note
The `INTERVAL` syntax or `addDays` function are always preferred. Simple addition or subtraction (syntax like `now() + ...`) doesn't consider time settings. For example, daylight saving time.
:::

View File

@ -351,7 +351,7 @@ ALTER TABLE mt DELETE IN PARTITION ID '2' WHERE p = 2;
You can specify the partition expression in `ALTER ... PARTITION` queries in different ways:
- As a value from the `partition` column of the `system.parts` table. For example, `ALTER TABLE visits DETACH PARTITION 201901`.
- Using the keyword `ALL`. It can be used only with DROP/DETACH/ATTACH. For example, `ALTER TABLE visits ATTACH PARTITION ALL`.
- Using the keyword `ALL`. It can be used only with DROP/DETACH/ATTACH/ATTACH FROM. For example, `ALTER TABLE visits ATTACH PARTITION ALL`.
- As a tuple of expressions or constants that matches (in types) the table partitioning keys tuple. In the case of a single element partitioning key, the expression should be wrapped in the `tuple (...)` function. For example, `ALTER TABLE visits DETACH PARTITION tuple(toYYYYMM(toDate('2019-01-25')))`.
- Using the partition ID. Partition ID is a string identifier of the partition (human-readable, if possible) that is used as the names of partitions in the file system and in ZooKeeper. The partition ID must be specified in the `PARTITION ID` clause, in a single quotes. For example, `ALTER TABLE visits DETACH PARTITION ID '201901'`.
- In the [ALTER ATTACH PART](#attach-partitionpart) and [DROP DETACHED PART](#drop-detached-partitionpart) query, to specify the name of a part, use string literal with a value from the `name` column of the [system.detached_parts](/docs/en/operations/system-tables/detached_parts.md/#system_tables-detached_parts) table. For example, `ALTER TABLE visits ATTACH PART '201901_1_1_0'`.

View File

@ -10,10 +10,10 @@ title: The Lightweight DELETE Statement
The lightweight `DELETE` statement removes rows from the table `[db.]table` that match the expression `expr`. It is only available for the *MergeTree table engine family.
``` sql
DELETE FROM [db.]table [ON CLUSTER cluster] WHERE expr;
DELETE FROM [db.]table [ON CLUSTER cluster] [IN PARTITION partition_expr] WHERE expr;
```
It is called "lightweight `DELETE`" to contrast it to the [ALTER table DELETE](/en/sql-reference/statements/alter/delete) command, which is a heavyweight process.
It is called "lightweight `DELETE`" to contrast it to the [ALTER TABLE ... DELETE](/en/sql-reference/statements/alter/delete) command, which is a heavyweight process.
## Examples
@ -22,23 +22,27 @@ It is called "lightweight `DELETE`" to contrast it to the [ALTER table DELETE](/
DELETE FROM hits WHERE Title LIKE '%hello%';
```
## Lightweight `DELETE` does not delete data from storage immediately
## Lightweight `DELETE` does not delete data immediately
With lightweight `DELETE`, deleted rows are internally marked as deleted immediately and will be automatically filtered out of all subsequent queries. However, cleanup of data happens during the next merge. As a result, it is possible that for an unspecified period, data is not actually deleted from storage and is only marked as deleted.
Lightweight `DELETE` is implemented as a [mutation](/en/sql-reference/statements/alter#mutations) that marks rows as deleted but does not immediately physically delete them.
If you need to guarantee that your data is deleted from storage in a predictable time, consider using the [ALTER table DELETE](/en/sql-reference/statements/alter/delete) command. Note that deleting data using `ALTER table DELETE` may consume significant resources as it recreates all affected parts.
By default, `DELETE` statements wait until marking the rows as deleted is completed before returning. This can take a long time if the amount of data is large. Alternatively, you can run it asynchronously in the background using the setting [`lightweight_deletes_sync`](/en/operations/settings/settings#lightweight_deletes_sync). If disabled, the `DELETE` statement is going to return immediately, but the data can still be visible to queries until the background mutation is finished.
The mutation does not physically delete the rows that have been marked as deleted, this will only happen during the next merge. As a result, it is possible that for an unspecified period, data is not actually deleted from storage and is only marked as deleted.
If you need to guarantee that your data is deleted from storage in a predictable time, consider using the table setting [`min_age_to_force_merge_seconds`](https://clickhouse.com/docs/en/operations/settings/merge-tree-settings#min_age_to_force_merge_seconds). Or you can use the [ALTER TABLE ... DELETE](/en/sql-reference/statements/alter/delete) command. Note that deleting data using `ALTER TABLE ... DELETE` may consume significant resources as it recreates all affected parts.
## Deleting large amounts of data
Large deletes can negatively affect ClickHouse performance. If you are attempting to delete all rows from a table, consider using the [`TRUNCATE TABLE`](/en/sql-reference/statements/truncate) command.
If you anticipate frequent deletes, consider using a [custom partitioning key](/en/engines/table-engines/mergetree-family/custom-partitioning-key). You can then use the [`ALTER TABLE...DROP PARTITION`](/en/sql-reference/statements/alter/partition#drop-partitionpart) command to quickly drop all rows associated with that partition.
If you anticipate frequent deletes, consider using a [custom partitioning key](/en/engines/table-engines/mergetree-family/custom-partitioning-key). You can then use the [`ALTER TABLE ... DROP PARTITION`](/en/sql-reference/statements/alter/partition#drop-partitionpart) command to quickly drop all rows associated with that partition.
## Limitations of lightweight `DELETE`
### Lightweight `DELETE`s with projections
By default, `DELETE` does not work for tables with projections. This is because rows in a projection may be affected by a `DELETE` operation. But there is a [MergeTree setting](https://clickhouse.com/docs/en/operations/settings/merge-tree-settings) `lightweight_mutation_projection_mode` can change the behavior.
By default, `DELETE` does not work for tables with projections. This is because rows in a projection may be affected by a `DELETE` operation. But there is a [MergeTree setting](https://clickhouse.com/docs/en/operations/settings/merge-tree-settings) `lightweight_mutation_projection_mode` to change the behavior.
## Performance considerations when using lightweight `DELETE`
@ -48,7 +52,7 @@ The following can also negatively impact lightweight `DELETE` performance:
- A heavy `WHERE` condition in a `DELETE` query.
- If the mutations queue is filled with many other mutations, this can possibly lead to performance issues as all mutations on a table are executed sequentially.
- The affected table having a very large number of data parts.
- The affected table has a very large number of data parts.
- Having a lot of data in compact parts. In a Compact part, all columns are stored in one file.
## Delete permissions
@ -61,31 +65,31 @@ GRANT ALTER DELETE ON db.table to username;
## How lightweight DELETEs work internally in ClickHouse
1. A "mask" is applied to affected rows
1. **A "mask" is applied to affected rows**
When a `DELETE FROM table ...` query is executed, ClickHouse saves a mask where each row is marked as either “existing” or as “deleted”. Those “deleted” rows are omitted for subsequent queries. However, rows are actually only removed later by subsequent merges. Writing this mask is much more lightweight than what is done by an `ALTER table DELETE` query.
When a `DELETE FROM table ...` query is executed, ClickHouse saves a mask where each row is marked as either “existing” or as “deleted”. Those “deleted” rows are omitted for subsequent queries. However, rows are actually only removed later by subsequent merges. Writing this mask is much more lightweight than what is done by an `ALTER TABLE ... DELETE` query.
The mask is implemented as a hidden `_row_exists` system column that stores `True` for all visible rows and `False` for deleted ones. This column is only present in a part if some rows in the part were deleted. This column does not exist when a part has all values equal to `True`.
The mask is implemented as a hidden `_row_exists` system column that stores `True` for all visible rows and `False` for deleted ones. This column is only present in a part if some rows in the part were deleted. This column does not exist when a part has all values equal to `True`.
2. `SELECT` queries are transformed to include the mask
2. **`SELECT` queries are transformed to include the mask**
When a masked column is used in a query, the `SELECT ... FROM table WHERE condition` query internally is extended by the predicate on `_row_exists` and is transformed to:
```sql
SELECT ... FROM table PREWHERE _row_exists WHERE condition
```
At execution time, the column `_row_exists` is read to determine which rows should not be returned. If there are many deleted rows, ClickHouse can determine which granules can be fully skipped when reading the rest of the columns.
When a masked column is used in a query, the `SELECT ... FROM table WHERE condition` query internally is extended by the predicate on `_row_exists` and is transformed to:
```sql
SELECT ... FROM table PREWHERE _row_exists WHERE condition
```
At execution time, the column `_row_exists` is read to determine which rows should not be returned. If there are many deleted rows, ClickHouse can determine which granules can be fully skipped when reading the rest of the columns.
3. `DELETE` queries are transformed to `ALTER table UPDATE` queries
3. **`DELETE` queries are transformed to `ALTER TABLE ... UPDATE` queries**
The `DELETE FROM table WHERE condition` is translated into an `ALTER table UPDATE _row_exists = 0 WHERE condition` mutation.
The `DELETE FROM table WHERE condition` is translated into an `ALTER TABLE table UPDATE _row_exists = 0 WHERE condition` mutation.
Internally, this mutation is executed in two steps:
Internally, this mutation is executed in two steps:
1. A `SELECT count() FROM table WHERE condition` command is executed for each individual part to determine if the part is affected.
1. A `SELECT count() FROM table WHERE condition` command is executed for each individual part to determine if the part is affected.
2. Based on the commands above, affected parts are then mutated, and hardlinks are created for unaffected parts. In the case of wide parts, the `_row_exists` column for each row is updated and all other columns' files are hardlinked. For compact parts, all columns are re-written because they are all stored together in one file.
2. Based on the commands above, affected parts are then mutated, and hardlinks are created for unaffected parts. In the case of wide parts, the `_row_exists` column for each row is updated, and all other columns' files are hardlinked. For compact parts, all columns are re-written because they are all stored together in one file.
From the steps above, we can see that lightweight deletes using the masking technique improves performance over traditional `ALTER table DELETE` commands because `ALTER table DELETE` reads and re-writes all the columns' files for affected parts.
From the steps above, we can see that lightweight `DELETE` using the masking technique improves performance over traditional `ALTER TABLE ... DELETE` because it does not re-write all the columns' files for affected parts.
## Related content

View File

@ -15,7 +15,14 @@ The `FROM` clause specifies the source to read data from:
Subquery is another `SELECT` query that may be specified in parenthesis inside `FROM` clause.
`FROM` clause can contain multiple data sources, separated by commas, which is equivalent of performing [CROSS JOIN](../../../sql-reference/statements/select/join.md) on them.
The `FROM` can contain multiple data sources, separated by commas, which is equivalent of performing [CROSS JOIN](../../../sql-reference/statements/select/join.md) on them.
`FROM` can optionally appear before a `SELECT` clause. This is a ClickHouse-specific extension of standard SQL which makes `SELECT` statements easier to read. Example:
```sql
FROM table
SELECT *
```
## FINAL Modifier
@ -45,19 +52,19 @@ As an alternative to using `FINAL`, it is sometimes possible to use different qu
### Example Usage
**Using the `FINAL` keyword**
Using the `FINAL` keyword
```sql
SELECT x, y FROM mytable FINAL WHERE x > 1;
```
**Using `FINAL` as a query-level setting**
Using `FINAL` as a query-level setting
```sql
SELECT x, y FROM mytable WHERE x > 1 SETTINGS final = 1;
```
**Using `FINAL` as a session-level setting**
Using `FINAL` as a session-level setting
```sql
SET final = 1;

View File

@ -8,14 +8,14 @@ slug: /en/guides/developer/transactional
This is transactional (ACID) if the inserted rows are packed and inserted as a single block (see Notes):
- Atomic: an INSERT succeeds or is rejected as a whole: if a confirmation is sent to the client, then all rows were inserted; if an error is sent to the client, then no rows were inserted.
- Consistent: if there are no table constraints violated, then all rows in an INSERT are inserted and the INSERT succeeds; if constraints are violated, then no rows are inserted.
- Isolated: concurrent clients observe a consistent snapshot of the tablethe state of the table either as it was before the INSERT attempt, or after the successful INSERT; no partial state is seen
- Isolated: concurrent clients observe a consistent snapshot of the tablethe state of the table either as it was before the INSERT attempt, or after the successful INSERT; no partial state is seen. Clients inside of another transaction have [snapshot isolation](https://en.wikipedia.org/wiki/Snapshot_isolation), while clients outside of a transaction have [read uncommitted](https://en.wikipedia.org/wiki/Isolation_(database_systems)#Read_uncommitted) isolation level.
- Durable: a successful INSERT is written to the filesystem before answering to the client, on a single replica or multiple replicas (controlled by the `insert_quorum` setting), and ClickHouse can ask the OS to sync the filesystem data on the storage media (controlled by the `fsync_after_insert` setting).
- INSERT into multiple tables with one statement is possible if materialized views are involved (the INSERT from the client is to a table which has associate materialized views).
## Case 2: INSERT into multiple partitions, of one table, of the MergeTree* family
Same as Case 1 above, with this detail:
- If table has many partitions and INSERT covers many partitionsthen insertion into every partition is transactional on its own
- If table has many partitions and INSERT covers many partitions, then insertion into every partition is transactional on its own
## Case 3: INSERT into one distributed table of the MergeTree* family
@ -38,7 +38,7 @@ Same as Case 1 above, with this detail:
- the insert format is column-based (like Native, Parquet, ORC, etc) and the data contains only one block of data
- the size of the inserted block in general may depend on many settings (for example: `max_block_size`, `max_insert_block_size`, `min_insert_block_size_rows`, `min_insert_block_size_bytes`, `preferred_block_size_bytes`, etc)
- if the client did not receive an answer from the server, the client does not know if the transaction succeeded, and it can repeat the transaction, using exactly-once insertion properties
- ClickHouse is using MVCC with snapshot isolation internally
- ClickHouse is using [MVCC](https://en.wikipedia.org/wiki/Multiversion_concurrency_control) with [snapshot isolation](https://en.wikipedia.org/wiki/Snapshot_isolation) internally for concurrent transactions
- all ACID properties are valid even in the case of server kill/crash
- either insert_quorum into different AZ or fsync should be enabled to ensure durable inserts in the typical setup
- "consistency" in ACID terms does not cover the semantics of distributed systems, see https://jepsen.io/consistency which is controlled by different settings (select_sequential_consistency)
@ -260,7 +260,7 @@ FROM mergetree_table
### Transactions introspection
You can inspect transactions by querying the `system.transactions` table, but note that you cannot query that
table from a session that is in a transactionopen a second `clickhouse client` session to query that table.
table from a session that is in a transaction. Open a second `clickhouse client` session to query that table.
```sql
SELECT *

View File

@ -50,7 +50,7 @@ Connection: Close
Content-Type: text/tab-separated-values; charset=UTF-8
X-ClickHouse-Server-Display-Name: clickhouse.ru-central1.internal
X-ClickHouse-Query-Id: 5abe861c-239c-467f-b955-8a201abb8b7f
X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0","elapsed_ns":"662334"}
X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0","elapsed_ns":"662334", "real_time_microseconds":"0"}
1
```
@ -367,7 +367,7 @@ $ curl -v 'http://localhost:8123/predefined_query'
< X-ClickHouse-Format: Template
< X-ClickHouse-Timezone: Asia/Shanghai
< Keep-Alive: timeout=10
< X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0"}
< X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0", "elapsed_ns":"662334", "real_time_microseconds":"0"}
<
# HELP "Query" "Number of executing queries"
# TYPE "Query" counter
@ -601,7 +601,7 @@ $ curl -v -H 'XXX:xxx' 'http://localhost:8123/get_config_static_handler'
< Content-Type: text/plain; charset=UTF-8
< Transfer-Encoding: chunked
< Keep-Alive: timeout=10
< X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0","elapsed_ns":"662334"}
< X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0","elapsed_ns":"662334", "real_time_microseconds":"0"}
<
* Connection #0 to host localhost left intact
<html ng-app="SMI2"><head><base href="http://ui.tabix.io/"></head><body><div ui-view="" class="content-ui"></div><script src="http://loader.tabix.io/master.js"></script></body></html>%
@ -659,7 +659,7 @@ $ curl -vv -H 'XXX:xxx' 'http://localhost:8123/get_absolute_path_static_handler'
< Content-Type: text/html; charset=UTF-8
< Transfer-Encoding: chunked
< Keep-Alive: timeout=10
< X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0","elapsed_ns":"662334"}
< X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0","elapsed_ns":"662334", "real_time_microseconds":"0"}
<
<html><body>Absolute Path File</body></html>
* Connection #0 to host localhost left intact
@ -678,7 +678,7 @@ $ curl -vv -H 'XXX:xxx' 'http://localhost:8123/get_relative_path_static_handler'
< Content-Type: text/html; charset=UTF-8
< Transfer-Encoding: chunked
< Keep-Alive: timeout=10
< X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0","elapsed_ns":"662334"}
< X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0","elapsed_ns":"662334", "real_time_microseconds":"0"}
<
<html><body>Relative Path File</body></html>
* Connection #0 to host localhost left intact

View File

@ -53,7 +53,7 @@ Connection: Close
Content-Type: text/tab-separated-values; charset=UTF-8
X-ClickHouse-Server-Display-Name: clickhouse.ru-central1.internal
X-ClickHouse-Query-Id: 5abe861c-239c-467f-b955-8a201abb8b7f
X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0","elapsed_ns":"662334"}
X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0","elapsed_ns":"662334","real_time_microseconds":"0"}
1
```
@ -363,7 +363,7 @@ $ curl -v 'http://localhost:8123/predefined_query'
< X-ClickHouse-Format: Template
< X-ClickHouse-Timezone: Asia/Shanghai
< Keep-Alive: timeout=10
< X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0","elapsed_ns":"662334"}
< X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0","elapsed_ns":"662334", "real_time_microseconds":"0"}
<
# HELP "Query" "Number of executing queries"
# TYPE "Query" counter
@ -524,7 +524,7 @@ $ curl -vv -H 'XXX:xxx' 'http://localhost:8123/hi'
< Content-Type: text/html; charset=UTF-8
< Transfer-Encoding: chunked
< Keep-Alive: timeout=10
< X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0","elapsed_ns":"662334"}
< X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0","elapsed_ns":"662334", "real_time_microseconds":"0"}
<
* Connection #0 to host localhost left intact
Say Hi!%
@ -564,7 +564,7 @@ $ curl -v -H 'XXX:xxx' 'http://localhost:8123/get_config_static_handler'
< Content-Type: text/plain; charset=UTF-8
< Transfer-Encoding: chunked
< Keep-Alive: timeout=10
< X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0","elapsed_ns":"662334"}
< X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0","elapsed_ns":"662334","real_time_microseconds":"0"}
<
* Connection #0 to host localhost left intact
<html ng-app="SMI2"><head><base href="http://ui.tabix.io/"></head><body><div ui-view="" class="content-ui"></div><script src="http://loader.tabix.io/master.js"></script></body></html>%
@ -616,7 +616,7 @@ $ curl -vv -H 'XXX:xxx' 'http://localhost:8123/get_absolute_path_static_handler'
< Content-Type: text/html; charset=UTF-8
< Transfer-Encoding: chunked
< Keep-Alive: timeout=10
< X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0","elapsed_ns":"662334"}
< X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0","elapsed_ns":"662334","real_time_microseconds":"0"}
<
<html><body>Absolute Path File</body></html>
* Connection #0 to host localhost left intact
@ -635,7 +635,7 @@ $ curl -vv -H 'XXX:xxx' 'http://localhost:8123/get_relative_path_static_handler'
< Content-Type: text/html; charset=UTF-8
< Transfer-Encoding: chunked
< Keep-Alive: timeout=10
< X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0","elapsed_ns":"662334"}
< X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0","elapsed_ns":"662334","real_time_microseconds":"0"}
<
<html><body>Relative Path File</body></html>
* Connection #0 to host localhost left intact

View File

@ -677,4 +677,122 @@ void GetAllChildrenNumberCommand::execute(const ASTKeeperQuery * query, KeeperCl
std::cout << totalNumChildren << "\n";
}
namespace
{
class CPMVOperation
{
constexpr static UInt64 kTryLimit = 1000;
public:
CPMVOperation(String src_, String dest_, bool remove_src_, KeeperClient * client_)
: src(std::move(src_)), dest(std::move(dest_)), remove_src(remove_src_), client(client_)
{
}
bool isTryLimitReached() const { return failed_tries_count >= kTryLimit; }
bool isCompleted() const { return is_completed; }
void perform()
{
Coordination::Stat src_stat;
String data = client->zookeeper->get(src, &src_stat);
Coordination::Requests ops{
zkutil::makeCheckRequest(src, src_stat.version),
zkutil::makeCreateRequest(dest, data, zkutil::CreateMode::Persistent), // Do we need to copy ACLs here?
};
if (remove_src)
ops.push_back(zkutil::makeRemoveRequest(src, src_stat.version));
Coordination::Responses responses;
auto code = client->zookeeper->tryMulti(ops, responses);
switch (code)
{
case Coordination::Error::ZOK: {
is_completed = true;
return;
}
case Coordination::Error::ZBADVERSION: {
++failed_tries_count;
if (isTryLimitReached())
zkutil::KeeperMultiException::check(code, ops, responses);
return;
}
default:
zkutil::KeeperMultiException::check(code, ops, responses);
}
throw Exception(ErrorCodes::LOGICAL_ERROR, "Unreachable");
}
private:
String src;
String dest;
bool remove_src = false;
KeeperClient * client = nullptr;
bool is_completed = false;
uint64_t failed_tries_count = 0;
};
}
bool CPCommand::parse(IParser::Pos & pos, std::shared_ptr<ASTKeeperQuery> & node, [[maybe_unused]] Expected & expected) const
{
String src_path;
if (!parseKeeperPath(pos, expected, src_path))
return false;
node->args.push_back(std::move(src_path));
String to_path;
if (!parseKeeperPath(pos, expected, to_path))
return false;
node->args.push_back(std::move(to_path));
return true;
}
void CPCommand::execute(const ASTKeeperQuery * query, KeeperClient * client) const
{
auto src = client->getAbsolutePath(query->args[0].safeGet<String>());
auto dest = client->getAbsolutePath(query->args[1].safeGet<String>());
CPMVOperation operation(std::move(src), std::move(dest), /*remove_src_=*/false, /*client_=*/client);
while (!operation.isTryLimitReached() && !operation.isCompleted())
operation.perform();
}
bool MVCommand::parse(IParser::Pos & pos, std::shared_ptr<ASTKeeperQuery> & node, Expected & expected) const
{
String src_path;
if (!parseKeeperPath(pos, expected, src_path))
return false;
node->args.push_back(std::move(src_path));
String to_path;
if (!parseKeeperPath(pos, expected, to_path))
return false;
node->args.push_back(std::move(to_path));
return true;
}
void MVCommand::execute(const ASTKeeperQuery * query, KeeperClient * client) const
{
auto src = client->getAbsolutePath(query->args[0].safeGet<String>());
auto dest = client->getAbsolutePath(query->args[1].safeGet<String>());
CPMVOperation operation(std::move(src), std::move(dest), /*remove_src_=*/true, /*client_=*/client);
while (!operation.isTryLimitReached() && !operation.isCompleted())
operation.perform();
}
}

View File

@ -266,4 +266,32 @@ class GetAllChildrenNumberCommand : public IKeeperClientCommand
}
};
class CPCommand : public IKeeperClientCommand
{
String getName() const override { return "cp"; }
bool parse(IParser::Pos & pos, std::shared_ptr<ASTKeeperQuery> & node, Expected & expected) const override;
void execute(const ASTKeeperQuery * query, KeeperClient * client) const override;
String getHelpMessage() const override
{
return "{} <src> <dest> -- Copies 'src' node to 'dest' path.";
}
};
class MVCommand : public IKeeperClientCommand
{
String getName() const override { return "mv"; }
bool parse(IParser::Pos & pos, std::shared_ptr<ASTKeeperQuery> & node, Expected & expected) const override;
void execute(const ASTKeeperQuery * query, KeeperClient * client) const override;
String getHelpMessage() const override
{
return "{} <src> <dest> -- Moves 'src' node to the 'dest' path.";
}
};
}

View File

@ -212,6 +212,8 @@ void KeeperClient::initialize(Poco::Util::Application & /* self */)
std::make_shared<FourLetterWordCommand>(),
std::make_shared<GetDirectChildrenNumberCommand>(),
std::make_shared<GetAllChildrenNumberCommand>(),
std::make_shared<CPCommand>(),
std::make_shared<MVCommand>(),
});
String home_path;

View File

@ -11,6 +11,7 @@
#include <Core/ServerUUID.h>
#include <Common/logger_useful.h>
#include <Common/CgroupsMemoryUsageObserver.h>
#include <Common/MemoryWorker.h>
#include <Common/ErrorHandlers.h>
#include <Common/assertProcessUserMatchesDataOwner.h>
#include <Common/makeSocketAddress.h>
@ -384,6 +385,9 @@ try
LOG_INFO(log, "Background threads finished in {} ms", watch.elapsedMilliseconds());
});
MemoryWorker memory_worker(config().getUInt64("memory_worker_period_ms", 0));
memory_worker.start();
static ServerErrorHandler error_handler;
Poco::ErrorHandler::set(&error_handler);
@ -425,8 +429,9 @@ try
for (const auto & server : *servers)
metrics.emplace_back(ProtocolServerMetrics{server.getPortName(), server.currentThreads(), server.refusedConnections()});
return metrics;
}
);
},
/*update_jemalloc_epoch_=*/memory_worker.getSource() != MemoryWorker::MemoryUsageSource::Jemalloc,
/*update_rss_=*/memory_worker.getSource() == MemoryWorker::MemoryUsageSource::None);
std::vector<std::string> listen_hosts = DB::getMultipleValuesFromConfig(config(), "", "listen_host");
@ -655,7 +660,6 @@ try
GWPAsan::initFinished();
#endif
LOG_INFO(log, "Ready for connections.");
waitForTerminationRequest();

View File

@ -11,7 +11,6 @@
#include <Poco/Util/HelpFormatter.h>
#include <Poco/Environment.h>
#include <Poco/Config.h>
#include <Common/Jemalloc.h>
#include <Common/scope_guard_safe.h>
#include <Common/logger_useful.h>
#include <base/phdr_cache.h>
@ -25,6 +24,7 @@
#include <base/Numa.h>
#include <Common/PoolId.h>
#include <Common/MemoryTracker.h>
#include <Common/MemoryWorker.h>
#include <Common/ClickHouseRevision.h>
#include <Common/DNSResolver.h>
#include <Common/CgroupsMemoryUsageObserver.h>
@ -111,6 +111,8 @@
#include <filesystem>
#include <unordered_set>
#include <Common/Jemalloc.h>
#include "config.h"
#include <Common/config_version.h>
@ -449,9 +451,12 @@ void checkForUsersNotInMainConfig(
}
}
namespace
{
/// Unused in other builds
#if defined(OS_LINUX)
static String readLine(const String & path)
String readLine(const String & path)
{
ReadBufferFromFile in(path);
String contents;
@ -459,7 +464,7 @@ static String readLine(const String & path)
return contents;
}
static int readNumber(const String & path)
int readNumber(const String & path)
{
ReadBufferFromFile in(path);
int result;
@ -469,7 +474,7 @@ static int readNumber(const String & path)
#endif
static void sanityChecks(Server & server)
void sanityChecks(Server & server)
{
std::string data_path = getCanonicalPath(server.config().getString("path", DBMS_DEFAULT_PATH));
std::string logs_path = server.config().getString("logger.log", "");
@ -590,6 +595,8 @@ static void sanityChecks(Server & server)
}
}
}
void loadStartupScripts(const Poco::Util::AbstractConfiguration & config, ContextMutablePtr context, Poco::Logger * log)
{
try
@ -906,6 +913,8 @@ try
LOG_INFO(log, "Background threads finished in {} ms", watch.elapsedMilliseconds());
});
MemoryWorker memory_worker(global_context->getServerSettings().memory_worker_period_ms);
/// This object will periodically calculate some metrics.
ServerAsynchronousMetrics async_metrics(
global_context,
@ -924,8 +933,9 @@ try
for (const auto & server : servers)
metrics.emplace_back(ProtocolServerMetrics{server.getPortName(), server.currentThreads(), server.refusedConnections()});
return metrics;
}
);
},
/*update_jemalloc_epoch_=*/memory_worker.getSource() != MemoryWorker::MemoryUsageSource::Jemalloc,
/*update_rss_=*/memory_worker.getSource() == MemoryWorker::MemoryUsageSource::None);
/// NOTE: global context should be destroyed *before* GlobalThreadPool::shutdown()
/// Otherwise GlobalThreadPool::shutdown() will hang, since Context holds some threads.
@ -1204,6 +1214,8 @@ try
FailPointInjection::enableFromGlobalConfig(config());
memory_worker.start();
int default_oom_score = 0;
#if !defined(NDEBUG)
@ -1547,15 +1559,6 @@ try
total_memory_tracker.setDescription("(total)");
total_memory_tracker.setMetric(CurrentMetrics::MemoryTracking);
if (cgroups_memory_usage_observer)
{
double hard_limit_ratio = new_server_settings.cgroup_memory_watcher_hard_limit_ratio;
double soft_limit_ratio = new_server_settings.cgroup_memory_watcher_soft_limit_ratio;
cgroups_memory_usage_observer->setMemoryUsageLimits(
static_cast<uint64_t>(max_server_memory_usage * hard_limit_ratio),
static_cast<uint64_t>(max_server_memory_usage * soft_limit_ratio));
}
size_t merges_mutations_memory_usage_soft_limit = new_server_settings.merges_mutations_memory_usage_soft_limit;
size_t default_merges_mutations_server_memory_usage = static_cast<size_t>(current_physical_server_memory * new_server_settings.merges_mutations_memory_usage_to_ram_ratio);
@ -1584,8 +1587,6 @@ try
background_memory_tracker.setDescription("(background)");
background_memory_tracker.setMetric(CurrentMetrics::MergesMutationsMemoryTracking);
total_memory_tracker.setAllowUseJemallocMemory(new_server_settings.allow_use_jemalloc_memory);
auto * global_overcommit_tracker = global_context->getGlobalOvercommitTracker();
total_memory_tracker.setOvercommitTracker(global_overcommit_tracker);

View File

@ -239,15 +239,36 @@ bool Authentication::areCredentialsValid(
throw Authentication::Require<GSSAcceptorContext>(auth_data.getKerberosRealm());
case AuthenticationType::SSL_CERTIFICATE:
{
for (SSLCertificateSubjects::Type type : {SSLCertificateSubjects::Type::CN, SSLCertificateSubjects::Type::SAN})
{
for (const auto & subject : auth_data.getSSLCertificateSubjects().at(type))
{
if (ssl_certificate_credentials->getSSLCertificateSubjects().at(type).contains(subject))
return true;
// Wildcard support (1 only)
if (subject.contains('*'))
{
auto prefix = std::string_view(subject).substr(0, subject.find('*'));
auto suffix = std::string_view(subject).substr(subject.find('*') + 1);
auto slashes = std::count(subject.begin(), subject.end(), '/');
for (const auto & certificate_subject : ssl_certificate_credentials->getSSLCertificateSubjects().at(type))
{
bool matches_wildcard = certificate_subject.starts_with(prefix) && certificate_subject.ends_with(suffix);
// '*' must not represent a '/' in URI, so check if the number of '/' are equal
bool matches_slashes = slashes == count(certificate_subject.begin(), certificate_subject.end(), '/');
if (matches_wildcard && matches_slashes)
return true;
}
}
}
}
return false;
}
case AuthenticationType::SSH_KEY:
#if USE_SSH

View File

@ -68,7 +68,10 @@ public:
if (data().isEqualTo(to.data()))
counter += to.counter;
else if (!data().has() || counter < to.counter)
{
data().set(to.data(), arena);
counter = to.counter - counter;
}
else
counter -= to.counter;
}

View File

@ -0,0 +1,161 @@
#include <unordered_set>
#include <IO/WriteHelpers.h>
#include <IO/ReadHelpers.h>
#include <DataTypes/DataTypeString.h>
#include <DataTypes/DataTypeArray.h>
#include <DataTypes/DataTypesBinaryEncoding.h>
#include <Columns/ColumnDynamic.h>
#include <AggregateFunctions/IAggregateFunction.h>
#include <AggregateFunctions/AggregateFunctionFactory.h>
#include <AggregateFunctions/FactoryHelpers.h>
namespace DB
{
namespace ErrorCodes
{
extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH;
extern const int ILLEGAL_TYPE_OF_ARGUMENT;
extern const int TOO_LARGE_ARRAY_SIZE;
}
struct AggregateFunctionDistinctDynamicTypesData
{
constexpr static size_t MAX_ARRAY_SIZE = 0xFFFFFF;
std::unordered_set<String> data;
void add(const String & type)
{
data.insert(type);
}
void merge(const AggregateFunctionDistinctDynamicTypesData & other)
{
data.insert(other.data.begin(), other.data.end());
}
void serialize(WriteBuffer & buf) const
{
writeVarUInt(data.size(), buf);
for (const auto & type : data)
writeStringBinary(type, buf);
}
void deserialize(ReadBuffer & buf)
{
size_t size;
readVarUInt(size, buf);
if (size > MAX_ARRAY_SIZE)
throw Exception(ErrorCodes::TOO_LARGE_ARRAY_SIZE, "Too large array size (maximum: {}): {}", MAX_ARRAY_SIZE, size);
data.reserve(size);
String type;
for (size_t i = 0; i != size; ++i)
{
readStringBinary(type, buf);
data.insert(type);
}
}
void insertResultInto(IColumn & column)
{
/// Insert types in sorted order for better output.
auto & array_column = assert_cast<ColumnArray &>(column);
auto & string_column = assert_cast<ColumnString &>(array_column.getData());
std::vector<String> sorted_data(data.begin(), data.end());
std::sort(sorted_data.begin(), sorted_data.end());
for (const auto & type : sorted_data)
string_column.insertData(type.data(), type.size());
array_column.getOffsets().push_back(string_column.size());
}
};
/// Calculates the list of distinct data types in Dynamic column.
class AggregateFunctionDistinctDynamicTypes final : public IAggregateFunctionDataHelper<AggregateFunctionDistinctDynamicTypesData, AggregateFunctionDistinctDynamicTypes>
{
public:
explicit AggregateFunctionDistinctDynamicTypes(const DataTypes & argument_types_)
: IAggregateFunctionDataHelper<AggregateFunctionDistinctDynamicTypesData, AggregateFunctionDistinctDynamicTypes>(argument_types_, {}, std::make_shared<DataTypeArray>(std::make_shared<DataTypeString>()))
{
}
String getName() const override { return "distinctDynamicTypes"; }
bool allocatesMemoryInArena() const override { return false; }
void ALWAYS_INLINE add(AggregateDataPtr __restrict place, const IColumn ** columns, size_t row_num, Arena *) const override
{
const auto & dynamic_column = assert_cast<const ColumnDynamic & >(*columns[0]);
if (dynamic_column.isNullAt(row_num))
return;
data(place).add(dynamic_column.getTypeNameAt(row_num));
}
void ALWAYS_INLINE addBatchSinglePlace(
size_t row_begin, size_t row_end, AggregateDataPtr __restrict place, const IColumn ** columns, Arena * arena, ssize_t if_argument_pos)
const override
{
if (if_argument_pos >= 0 || row_begin != 0 || row_end != columns[0]->size())
IAggregateFunctionDataHelper::addBatchSinglePlace(row_begin, row_end, place, columns, arena, if_argument_pos);
/// Optimization for case when we add all rows from the column into single place.
/// In this case we can avoid iterating over all rows because we can get all types
/// in Dynamic column in a more efficient way.
else
assert_cast<const ColumnDynamic & >(*columns[0]).getAllTypeNamesInto(data(place).data);
}
void addManyDefaults(
AggregateDataPtr __restrict /*place*/,
const IColumn ** /*columns*/,
size_t /*length*/,
Arena * /*arena*/) const override
{
/// Default value for Dynamic is NULL, so nothing to add.
}
void merge(AggregateDataPtr __restrict place, ConstAggregateDataPtr rhs, Arena *) const override
{
data(place).merge(data(rhs));
}
void serialize(ConstAggregateDataPtr __restrict place, WriteBuffer & buf, std::optional<size_t> /* version */) const override
{
data(place).serialize(buf);
}
void deserialize(AggregateDataPtr __restrict place, ReadBuffer & buf, std::optional<size_t> /* version */, Arena *) const override
{
data(place).deserialize(buf);
}
void insertResultInto(AggregateDataPtr __restrict place, IColumn & to, Arena *) const override
{
data(place).insertResultInto(to);
}
};
AggregateFunctionPtr createAggregateFunctionDistinctDynamicTypes(
const std::string & name, const DataTypes & argument_types, const Array & parameters, const Settings *)
{
assertNoParameters(name, parameters);
if (argument_types.size() != 1)
throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH,
"Incorrect number of arguments for aggregate function {}. Expected single argument with type Dynamic, got {} arguments", name, argument_types.size());
if (!isDynamic(argument_types[0]))
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Illegal type {} of argument for aggregate function {}. Expected type Dynamic", argument_types[0]->getName(), name);
return std::make_shared<AggregateFunctionDistinctDynamicTypes>(argument_types);
}
void registerAggregateFunctionDistinctDynamicTypes(AggregateFunctionFactory & factory)
{
factory.registerFunction("distinctDynamicTypes", createAggregateFunctionDistinctDynamicTypes);
}
}

View File

@ -0,0 +1,350 @@
#include <unordered_set>
#include <IO/WriteHelpers.h>
#include <IO/ReadHelpers.h>
#include <DataTypes/DataTypeString.h>
#include <DataTypes/DataTypeArray.h>
#include <DataTypes/DataTypeMap.h>
#include <DataTypes/DataTypeObject.h>
#include <DataTypes/DataTypesBinaryEncoding.h>
#include <Columns/ColumnDynamic.h>
#include <Columns/ColumnObject.h>
#include <Columns/ColumnMap.h>
#include <AggregateFunctions/IAggregateFunction.h>
#include <AggregateFunctions/AggregateFunctionFactory.h>
#include <AggregateFunctions/FactoryHelpers.h>
namespace DB
{
namespace ErrorCodes
{
extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH;
extern const int ILLEGAL_TYPE_OF_ARGUMENT;
extern const int TOO_LARGE_ARRAY_SIZE;
}
constexpr static size_t DISTINCT_JSON_PATHS_MAX_ARRAY_SIZE = 0xFFFFFF;
struct AggregateFunctionDistinctJSONPathsData
{
static constexpr auto name = "distinctJSONPaths";
std::unordered_set<String> data;
void add(const ColumnObject & column, size_t row_num, const std::unordered_map<String, String> &)
{
for (const auto & [path, _] : column.getTypedPaths())
data.insert(path);
for (const auto & [path, dynamic_column] : column.getDynamicPathsPtrs())
{
/// Add path from dynamic paths only if it's not NULL in this row.
if (!dynamic_column->isNullAt(row_num))
data.insert(path);
}
/// Iterate over paths in shared data in this row.
const auto [shared_data_paths, _] = column.getSharedDataPathsAndValues();
const auto & shared_data_offsets = column.getSharedDataOffsets();
const size_t start = shared_data_offsets[static_cast<ssize_t>(row_num) - 1];
const size_t end = shared_data_offsets[static_cast<ssize_t>(row_num)];
for (size_t i = start; i != end; ++i)
data.insert(shared_data_paths->getDataAt(i).toString());
}
void addWholeColumn(const ColumnObject & column, const std::unordered_map<String, String> &)
{
for (const auto & [path, _] : column.getTypedPaths())
data.insert(path);
for (const auto & [path, dynamic_column] : column.getDynamicPathsPtrs())
{
/// Add dynamic path only if it has at least one non-null value.
/// getNumberOfDefaultRows for Dynamic column is O(1).
if (dynamic_column->getNumberOfDefaultRows() != dynamic_column->size())
data.insert(path);
}
/// Iterate over all paths in shared data.
const auto [shared_data_paths, _] = column.getSharedDataPathsAndValues();
for (size_t i = 0; i != shared_data_paths->size(); ++i)
data.insert(shared_data_paths->getDataAt(i).toString());
}
void merge(const AggregateFunctionDistinctJSONPathsData & other)
{
data.insert(other.data.begin(), other.data.end());
}
void serialize(WriteBuffer & buf) const
{
writeVarUInt(data.size(), buf);
for (const auto & path : data)
writeStringBinary(path, buf);
}
void deserialize(ReadBuffer & buf)
{
size_t size;
readVarUInt(size, buf);
if (size > DISTINCT_JSON_PATHS_MAX_ARRAY_SIZE)
throw Exception(ErrorCodes::TOO_LARGE_ARRAY_SIZE, "Too large array size (maximum: {}): {}", DISTINCT_JSON_PATHS_MAX_ARRAY_SIZE, size);
String path;
for (size_t i = 0; i != size; ++i)
{
readStringBinary(path, buf);
data.insert(path);
}
}
void insertResultInto(IColumn & column)
{
/// Insert paths in sorted order for better output.
auto & array_column = assert_cast<ColumnArray &>(column);
auto & string_column = assert_cast<ColumnString &>(array_column.getData());
std::vector<String> sorted_data(data.begin(), data.end());
std::sort(sorted_data.begin(), sorted_data.end());
for (const auto & path : sorted_data)
string_column.insertData(path.data(), path.size());
array_column.getOffsets().push_back(string_column.size());
}
static DataTypePtr getResultType()
{
return std::make_shared<DataTypeArray>(std::make_shared<DataTypeString>());
}
};
struct AggregateFunctionDistinctJSONPathsAndTypesData
{
static constexpr auto name = "distinctJSONPathsAndTypes";
std::unordered_map<String, std::unordered_set<String>> data;
void add(const ColumnObject & column, size_t row_num, const std::unordered_map<String, String> & typed_paths_type_names)
{
for (const auto & [path, _] : column.getTypedPaths())
data[path].insert(typed_paths_type_names.at(path));
for (const auto & [path, dynamic_column] : column.getDynamicPathsPtrs())
{
if (!dynamic_column->isNullAt(row_num))
data[path].insert(dynamic_column->getTypeNameAt(row_num));
}
/// Iterate over paths om shared data in this row and decode the data types.
const auto [shared_data_paths, shared_data_values] = column.getSharedDataPathsAndValues();
const auto & shared_data_offsets = column.getSharedDataOffsets();
const size_t start = shared_data_offsets[static_cast<ssize_t>(row_num) - 1];
const size_t end = shared_data_offsets[static_cast<ssize_t>(row_num)];
for (size_t i = start; i != end; ++i)
{
auto path = shared_data_paths->getDataAt(i).toString();
auto value = shared_data_values->getDataAt(i);
ReadBufferFromMemory buf(value.data, value.size);
auto type = decodeDataType(buf);
/// We should not have Nulls here but let's check just in case.
chassert(!isNothing(type));
data[path].insert(type->getName());
}
}
void addWholeColumn(const ColumnObject & column, const std::unordered_map<String, String> & typed_paths_type_names)
{
for (const auto & [path, _] : column.getTypedPaths())
data[path].insert(typed_paths_type_names.at(path));
for (const auto & [path, dynamic_column] : column.getDynamicPathsPtrs())
{
/// Add dynamic path only if it has at least one non-null value.
/// getNumberOfDefaultRows for Dynamic column is O(1).
if (dynamic_column->getNumberOfDefaultRows() != dynamic_column->size())
dynamic_column->getAllTypeNamesInto(data[path]);
}
/// Iterate over all paths in shared data and decode the data types.
const auto [shared_data_paths, shared_data_values] = column.getSharedDataPathsAndValues();
for (size_t i = 0; i != shared_data_paths->size(); ++i)
{
auto path = shared_data_paths->getDataAt(i).toString();
auto value = shared_data_values->getDataAt(i);
ReadBufferFromMemory buf(value.data, value.size);
auto type = decodeDataType(buf);
/// We should not have Nulls here but let's check just in case.
chassert(!isNothing(type));
data[path].insert(type->getName());
}
}
void merge(const AggregateFunctionDistinctJSONPathsAndTypesData & other)
{
for (const auto & [path, types] : other.data)
data[path].insert(types.begin(), types.end());
}
void serialize(WriteBuffer & buf) const
{
writeVarUInt(data.size(), buf);
for (const auto & [path, types] : data)
{
writeStringBinary(path, buf);
writeVarUInt(types.size(), buf);
for (const auto & type : types)
writeStringBinary(type, buf);
}
}
void deserialize(ReadBuffer & buf)
{
size_t paths_size, types_size;
readVarUInt(paths_size, buf);
if (paths_size > DISTINCT_JSON_PATHS_MAX_ARRAY_SIZE)
throw Exception(ErrorCodes::TOO_LARGE_ARRAY_SIZE, "Too large array size for paths (maximum: {}): {}", DISTINCT_JSON_PATHS_MAX_ARRAY_SIZE, paths_size);
data.reserve(paths_size);
String path, type;
for (size_t i = 0; i != paths_size; ++i)
{
readStringBinary(path, buf);
readVarUInt(types_size, buf);
if (types_size > DISTINCT_JSON_PATHS_MAX_ARRAY_SIZE)
throw Exception(ErrorCodes::TOO_LARGE_ARRAY_SIZE, "Too large array size for types (maximum: {}): {}", DISTINCT_JSON_PATHS_MAX_ARRAY_SIZE, types_size);
data[path].reserve(types_size);
for (size_t j = 0; j != types_size; ++j)
{
readStringBinary(type, buf);
data[path].insert(type);
}
}
}
void insertResultInto(IColumn & column)
{
/// Insert sorted paths and types for better output.
auto & array_column = assert_cast<ColumnMap &>(column).getNestedColumn();
auto & tuple_column = assert_cast<ColumnTuple &>(array_column.getData());
auto & key_column = assert_cast<ColumnString &>(tuple_column.getColumn(0));
auto & value_column = assert_cast<ColumnArray &>(tuple_column.getColumn(1));
auto & value_column_data = assert_cast<ColumnString &>(value_column.getData());
std::vector<std::pair<String, std::vector<String>>> sorted_data;
sorted_data.reserve(data.size());
for (const auto & [path, types] : data)
{
std::vector<String> sorted_types(types.begin(), types.end());
std::sort(sorted_types.begin(), sorted_types.end());
sorted_data.emplace_back(path, std::move(sorted_types));
}
std::sort(sorted_data.begin(), sorted_data.end());
for (const auto & [path, types] : sorted_data)
{
key_column.insertData(path.data(), path.size());
for (const auto & type : types)
value_column_data.insertData(type.data(), type.size());
value_column.getOffsets().push_back(value_column_data.size());
}
array_column.getOffsets().push_back(key_column.size());
}
static DataTypePtr getResultType()
{
return std::make_shared<DataTypeMap>(std::make_shared<DataTypeString>(), std::make_shared<DataTypeArray>(std::make_shared<DataTypeString>()));
}
};
/// Calculates the list of distinct paths or pairs (path, type) in JSON column.
template <typename Data>
class AggregateFunctionDistinctJSONPathsAndTypes final : public IAggregateFunctionDataHelper<Data, AggregateFunctionDistinctJSONPathsAndTypes<Data>>
{
public:
explicit AggregateFunctionDistinctJSONPathsAndTypes(const DataTypes & argument_types_)
: IAggregateFunctionDataHelper<Data, AggregateFunctionDistinctJSONPathsAndTypes<Data>>(
argument_types_, {}, Data::getResultType())
{
const auto & typed_paths_types = assert_cast<const DataTypeObject &>(*argument_types_[0]).getTypedPaths();
typed_paths_type_names.reserve(typed_paths_types.size());
for (const auto & [path, type] : typed_paths_types)
typed_paths_type_names[path] = type->getName();
}
String getName() const override { return Data::name; }
bool allocatesMemoryInArena() const override { return false; }
void ALWAYS_INLINE add(AggregateDataPtr __restrict place, const IColumn ** columns, size_t row_num, Arena *) const override
{
const auto & object_column = assert_cast<const ColumnObject & >(*columns[0]);
this->data(place).add(object_column, row_num, typed_paths_type_names);
}
void ALWAYS_INLINE addBatchSinglePlace(
size_t row_begin, size_t row_end, AggregateDataPtr __restrict place, const IColumn ** columns, Arena * arena, ssize_t if_argument_pos)
const override
{
if (if_argument_pos >= 0 || row_begin != 0 || row_end != columns[0]->size())
IAggregateFunctionDataHelper<Data, AggregateFunctionDistinctJSONPathsAndTypes<Data>>::addBatchSinglePlace(row_begin, row_end, place, columns, arena, if_argument_pos);
/// Optimization for case when we add all rows from the column into single place.
/// In this case we can avoid iterating over all rows because we can get all paths
/// and types in JSON column in a more efficient way.
else
this->data(place).addWholeColumn(assert_cast<const ColumnObject & >(*columns[0]), typed_paths_type_names);
}
void addManyDefaults(
AggregateDataPtr __restrict /*place*/,
const IColumn ** /*columns*/,
size_t /*length*/,
Arena * /*arena*/) const override
{
/// Default value for JSON is empty object, so nothing to add.
}
void merge(AggregateDataPtr __restrict place, ConstAggregateDataPtr rhs, Arena *) const override
{
this->data(place).merge(this->data(rhs));
}
void serialize(ConstAggregateDataPtr __restrict place, WriteBuffer & buf, std::optional<size_t> /* version */) const override
{
this->data(place).serialize(buf);
}
void deserialize(AggregateDataPtr __restrict place, ReadBuffer & buf, std::optional<size_t> /* version */, Arena *) const override
{
this->data(place).deserialize(buf);
}
void insertResultInto(AggregateDataPtr __restrict place, IColumn & to, Arena *) const override
{
this->data(place).insertResultInto(to);
}
private:
std::unordered_map<String, String> typed_paths_type_names;
};
template <typename Data>
AggregateFunctionPtr createAggregateFunctionDistinctJSONPathsAndTypes(
const std::string & name, const DataTypes & argument_types, const Array & parameters, const Settings *)
{
assertNoParameters(name, parameters);
if (argument_types.size() != 1)
throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH,
"Incorrect number of arguments for aggregate function {}. Expected single argument with type JSON, got {} arguments", name, argument_types.size());
if (!isObject(argument_types[0]))
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Illegal type {} of argument for aggregate function {}. Expected type JSON", argument_types[0]->getName(), name);
return std::make_shared<AggregateFunctionDistinctJSONPathsAndTypes<Data>>(argument_types);
}
void registerAggregateFunctionDistinctJSONPathsAndTypes(AggregateFunctionFactory & factory)
{
factory.registerFunction("distinctJSONPaths", createAggregateFunctionDistinctJSONPathsAndTypes<AggregateFunctionDistinctJSONPathsData>);
factory.registerFunction("distinctJSONPathsAndTypes", createAggregateFunctionDistinctJSONPathsAndTypes<AggregateFunctionDistinctJSONPathsAndTypesData>);
}
}

View File

@ -116,15 +116,17 @@ class GroupConcatImpl final
SerializationPtr serialization;
UInt64 limit;
const String delimiter;
const DataTypePtr type;
public:
GroupConcatImpl(const DataTypePtr & data_type_, const Array & parameters_, UInt64 limit_, const String & delimiter_)
: IAggregateFunctionDataHelper<GroupConcatData<has_limit>, GroupConcatImpl<has_limit>>(
{data_type_}, parameters_, std::make_shared<DataTypeString>())
, serialization(this->argument_types[0]->getDefaultSerialization())
, limit(limit_)
, delimiter(delimiter_)
, type(data_type_)
{
serialization = isFixedString(type) ? std::make_shared<DataTypeString>()->getDefaultSerialization() : this->argument_types[0]->getDefaultSerialization();
}
String getName() const override { return name; }
@ -140,6 +142,13 @@ public:
if (cur_data.data_size != 0)
cur_data.insertChar(delimiter.c_str(), delimiter.size(), arena);
if (isFixedString(type))
{
ColumnWithTypeAndName col = {columns[0]->getPtr(), type, "column"};
const auto & col_str = castColumn(col, std::make_shared<DataTypeString>());
cur_data.insert(col_str.get(), serialization, row_num, arena);
}
else
cur_data.insert(columns[0], serialization, row_num, arena);
}

View File

@ -459,6 +459,8 @@ public:
bool isParallelizeMergePrepareNeeded() const override { return is_parallelize_merge_prepare_needed; }
constexpr static bool parallelizeMergeWithKey() { return true; }
void parallelizeMergePrepare(AggregateDataPtrs & places, ThreadPool & thread_pool, std::atomic<bool> & is_cancelled) const override
{
if constexpr (is_parallelize_merge_prepare_needed)

View File

@ -145,6 +145,8 @@ public:
virtual bool isParallelizeMergePrepareNeeded() const { return false; }
constexpr static bool parallelizeMergeWithKey() { return false; }
virtual void parallelizeMergePrepare(AggregateDataPtrs & /*places*/, ThreadPool & /*thread_pool*/, std::atomic<bool> & /*is_cancelled*/) const
{
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "parallelizeMergePrepare() with thread pool parameter isn't implemented for {} ", getName());
@ -169,7 +171,7 @@ public:
/// Merges states (on which src places points to) with other states (on which dst places points to) of current aggregation function
/// then destroy states (on which src places points to).
virtual void mergeAndDestroyBatch(AggregateDataPtr * dst_places, AggregateDataPtr * src_places, size_t size, size_t offset, Arena * arena) const = 0;
virtual void mergeAndDestroyBatch(AggregateDataPtr * dst_places, AggregateDataPtr * src_places, size_t size, size_t offset, ThreadPool & thread_pool, std::atomic<bool> & is_cancelled, Arena * arena) const = 0;
/// Serializes state (to transmit it over the network, for example).
virtual void serialize(ConstAggregateDataPtr __restrict place, WriteBuffer & buf, std::optional<size_t> version = std::nullopt) const = 0; /// NOLINT
@ -499,11 +501,15 @@ public:
static_cast<const Derived *>(this)->merge(places[i] + place_offset, rhs[i], arena);
}
void mergeAndDestroyBatch(AggregateDataPtr * dst_places, AggregateDataPtr * rhs_places, size_t size, size_t offset, Arena * arena) const override
void mergeAndDestroyBatch(AggregateDataPtr * dst_places, AggregateDataPtr * rhs_places, size_t size, size_t offset, ThreadPool & thread_pool, std::atomic<bool> & is_cancelled, Arena * arena) const override
{
for (size_t i = 0; i < size; ++i)
{
if constexpr (Derived::parallelizeMergeWithKey())
static_cast<const Derived *>(this)->merge(dst_places[i] + offset, rhs_places[i] + offset, thread_pool, is_cancelled, arena);
else
static_cast<const Derived *>(this)->merge(dst_places[i] + offset, rhs_places[i] + offset, arena);
static_cast<const Derived *>(this)->destroy(rhs_places[i] + offset);
}
}

View File

@ -101,6 +101,13 @@ public:
auto merge(const UniqExactSet & other, ThreadPool * thread_pool = nullptr, std::atomic<bool> * is_cancelled = nullptr)
{
/// If the size is large, we may convert the singleLevelHash to twoLevelHash and merge in parallel.
if (other.size() > 40000)
{
if (isSingleLevel())
convertToTwoLevel();
}
if (isSingleLevel() && other.isTwoLevel())
convertToTwoLevel();

View File

@ -89,6 +89,8 @@ void registerAggregateFunctionAnalysisOfVariance(AggregateFunctionFactory &);
void registerAggregateFunctionFlameGraph(AggregateFunctionFactory &);
void registerAggregateFunctionKolmogorovSmirnovTest(AggregateFunctionFactory & factory);
void registerAggregateFunctionLargestTriangleThreeBuckets(AggregateFunctionFactory & factory);
void registerAggregateFunctionDistinctDynamicTypes(AggregateFunctionFactory & factory);
void registerAggregateFunctionDistinctJSONPathsAndTypes(AggregateFunctionFactory & factory);
class AggregateFunctionCombinatorFactory;
void registerAggregateFunctionCombinatorIf(AggregateFunctionCombinatorFactory &);
@ -191,6 +193,8 @@ void registerAggregateFunctions()
registerAggregateFunctionFlameGraph(factory);
registerAggregateFunctionKolmogorovSmirnovTest(factory);
registerAggregateFunctionLargestTriangleThreeBuckets(factory);
registerAggregateFunctionDistinctDynamicTypes(factory);
registerAggregateFunctionDistinctJSONPathsAndTypes(factory);
registerWindowFunctions(factory);
}

View File

@ -913,11 +913,15 @@ void RestorerFromBackup::createTable(const QualifiedTableName & table_name)
table_info.database = DatabaseCatalog::instance().getDatabase(table_name.database);
DatabasePtr database = table_info.database;
auto query_context = Context::createCopy(context);
query_context->setSetting("database_replicated_allow_explicit_uuid", 3);
query_context->setSetting("database_replicated_allow_replicated_engine_arguments", 3);
/// Execute CREATE TABLE query (we call IDatabase::createTableRestoredFromBackup() to allow the database to do some
/// database-specific things).
database->createTableRestoredFromBackup(
create_table_query,
context,
query_context,
restore_coordination,
std::chrono::duration_cast<std::chrono::milliseconds>(create_table_timeout).count());
}

View File

@ -176,7 +176,7 @@ add_library (clickhouse_new_delete STATIC Common/new_delete.cpp)
target_link_libraries (clickhouse_new_delete PRIVATE clickhouse_common_io)
if (TARGET ch_contrib::jemalloc)
target_link_libraries (clickhouse_new_delete PRIVATE ch_contrib::jemalloc)
target_link_libraries (clickhouse_common_io PRIVATE ch_contrib::jemalloc)
target_link_libraries (clickhouse_common_io PUBLIC ch_contrib::jemalloc)
target_link_libraries (clickhouse_storages_system PRIVATE ch_contrib::jemalloc)
endif()

View File

@ -34,6 +34,7 @@
#include <Parsers/Access/ASTCreateUserQuery.h>
#include <Parsers/Access/ASTAuthenticationData.h>
#include <Parsers/ASTDropQuery.h>
#include <Parsers/ASTExplainQuery.h>
#include <Parsers/ASTSelectQuery.h>
#include <Parsers/ASTSetQuery.h>
#include <Parsers/ASTUseQuery.h>
@ -1895,6 +1896,21 @@ void ClientBase::processParsedSingleQuery(const String & full_query, const Strin
/// Temporarily apply query settings to context.
std::optional<Settings> old_settings;
SCOPE_EXIT_SAFE({
try
{
/// We need to park ParallelFormating threads,
/// because they can use settings from global context
/// and it can lead to data race with `setSettings`
resetOutput();
}
catch (...)
{
if (!have_error)
{
client_exception = std::make_unique<Exception>(getCurrentExceptionMessageAndPattern(print_stack_trace), getCurrentExceptionCode());
have_error = true;
}
}
if (old_settings)
client_context->setSettings(*old_settings);
});
@ -2111,6 +2127,15 @@ MultiQueryProcessingStage ClientBase::analyzeMultiQueryText(
// - Other formats (e.g. FORMAT CSV) are arbitrarily more complex and tricky to parse. For example, we may be unable to distinguish if the semicolon
// is part of the data or ends the statement. In this case, we simply assume that the end of the INSERT statement is determined by \n\n (two newlines).
auto * insert_ast = parsed_query->as<ASTInsertQuery>();
// We also consider the INSERT query in EXPLAIN queries (same as normal INSERT queries)
if (!insert_ast)
{
auto * explain_ast = parsed_query->as<ASTExplainQuery>();
if (explain_ast && explain_ast->getExplainedQuery())
{
insert_ast = explain_ast->getExplainedQuery()->as<ASTInsertQuery>();
}
}
const char * query_to_execute_end = this_query_end;
if (insert_ast && insert_ast->data)
{
@ -2689,14 +2714,6 @@ bool ClientBase::processMultiQueryFromFile(const String & file_name)
ReadBufferFromFile in(file_name);
readStringUntilEOF(queries_from_file, in);
if (!getClientConfiguration().has("log_comment"))
{
Settings settings = client_context->getSettingsCopy();
/// NOTE: cannot use even weakly_canonical() since it fails for /dev/stdin due to resolving of "pipe:[X]"
settings.log_comment = fs::absolute(fs::path(file_name));
client_context->setSettings(settings);
}
return executeMultiQuery(queries_from_file);
}

View File

@ -168,7 +168,7 @@ std::vector<ConnectionPoolWithFailover::TryResult> ConnectionPoolWithFailover::g
{ return tryGetEntry(pool, timeouts, fail_message, settings, &table_to_check, /*async_callback=*/ {}); };
return getManyImpl(settings, pool_mode, try_get_entry,
/*skip_unavailable_endpoints=*/ std::nullopt,
/*skip_unavailable_endpoints=*/ false, /// skip_unavailable_endpoints is used to get the min number of entries, and we need at least one
/*priority_func=*/ {},
settings.distributed_insert_skip_read_only_replicas);
}

View File

@ -42,7 +42,7 @@ public:
size_t max_error_cap = DBMS_CONNECTION_POOL_WITH_FAILOVER_MAX_ERROR_COUNT);
using Entry = IConnectionPool::Entry;
using PoolWithFailoverBase<IConnectionPool>::isTryResultInvalid;
using PoolWithFailoverBase<IConnectionPool>::getValidTryResult;
/** Allocates connection to work. */
Entry get(const ConnectionTimeouts & timeouts) override;
@ -98,7 +98,7 @@ public:
std::vector<Base::ShuffledPool> getShuffledPools(const Settings & settings, GetPriorityFunc priority_func = {}, bool use_slowdown_count = false);
size_t getMaxErrorCup() const { return Base::max_error_cap; }
size_t getMaxErrorCap() const { return Base::max_error_cap; }
void updateSharedError(std::vector<ShuffledPool> & shuffled_pools)
{

View File

@ -327,7 +327,7 @@ HedgedConnectionsFactory::State HedgedConnectionsFactory::processFinishedConnect
ShuffledPool & shuffled_pool = shuffled_pools[index];
LOG_INFO(log, "Connection failed at try №{}, reason: {}", (shuffled_pool.error_count + 1), fail_message);
shuffled_pool.error_count = std::min(pool->getMaxErrorCup(), shuffled_pool.error_count + 1);
shuffled_pool.error_count = std::min(pool->getMaxErrorCap(), shuffled_pool.error_count + 1);
shuffled_pool.slowdown_count = 0;
if (shuffled_pool.error_count >= max_tries)

View File

@ -979,6 +979,41 @@ ColumnPtr ColumnDynamic::compress() const
});
}
String ColumnDynamic::getTypeNameAt(size_t row_num) const
{
const auto & variant_col = getVariantColumn();
const size_t discr = variant_col.globalDiscriminatorAt(row_num);
if (discr == ColumnVariant::NULL_DISCRIMINATOR)
return "";
if (discr == getSharedVariantDiscriminator())
{
const auto value = getSharedVariant().getDataAt(variant_col.offsetAt(row_num));
ReadBufferFromMemory buf(value.data, value.size);
return decodeDataType(buf)->getName();
}
return variant_info.variant_names[discr];
}
void ColumnDynamic::getAllTypeNamesInto(std::unordered_set<String> & names) const
{
auto shared_variant_discr = getSharedVariantDiscriminator();
for (size_t i = 0; i != variant_info.variant_names.size(); ++i)
{
if (i != shared_variant_discr && !variant_column_ptr->getVariantByGlobalDiscriminator(i).empty())
names.insert(variant_info.variant_names[i]);
}
const auto & shared_variant = getSharedVariant();
for (size_t i = 0; i != shared_variant.size(); ++i)
{
const auto value = shared_variant.getDataAt(i);
ReadBufferFromMemory buf(value.data, value.size);
names.insert(decodeDataType(buf)->getName());
}
}
void ColumnDynamic::prepareForSquashing(const Columns & source_columns)
{
if (source_columns.empty())

View File

@ -430,6 +430,9 @@ public:
const SerializationPtr & getVariantSerialization(const DataTypePtr & variant_type) { return getVariantSerialization(variant_type, variant_type->getName()); }
String getTypeNameAt(size_t row_num) const;
void getAllTypeNamesInto(std::unordered_set<String> & names) const;
private:
void createVariantInfo(const DataTypePtr & variant_type);

View File

@ -1,5 +1,3 @@
#include <Common/AsynchronousMetrics.h>
#include <IO/MMappedFileCache.h>
#include <IO/ReadHelpers.h>
#include <IO/UncompressedCache.h>
@ -8,8 +6,10 @@
#include <base/find_symbols.h>
#include <base/getPageSize.h>
#include <sys/resource.h>
#include <Common/AsynchronousMetrics.h>
#include <Common/CurrentMetrics.h>
#include <Common/Exception.h>
#include <Common/Jemalloc.h>
#include <Common/filesystemHelpers.h>
#include <Common/formatReadable.h>
#include <Common/logger_useful.h>
@ -69,10 +69,14 @@ static void openCgroupv2MetricFile(const std::string & filename, std::optional<R
AsynchronousMetrics::AsynchronousMetrics(
unsigned update_period_seconds,
const ProtocolServerMetricsFunc & protocol_server_metrics_func_)
const ProtocolServerMetricsFunc & protocol_server_metrics_func_,
bool update_jemalloc_epoch_,
bool update_rss_)
: update_period(update_period_seconds)
, log(getLogger("AsynchronousMetrics"))
, protocol_server_metrics_func(protocol_server_metrics_func_)
, update_jemalloc_epoch(update_jemalloc_epoch_)
, update_rss(update_rss_)
{
#if defined(OS_LINUX)
openFileIfExists("/proc/cpuinfo", cpuinfo);
@ -411,9 +415,7 @@ Value saveJemallocMetricImpl(
const std::string & jemalloc_full_name,
const std::string & clickhouse_full_name)
{
Value value{};
size_t size = sizeof(value);
mallctl(jemalloc_full_name.c_str(), &value, &size, nullptr, 0);
auto value = getJemallocValue<Value>(jemalloc_full_name.c_str());
values[clickhouse_full_name] = AsynchronousMetricValue(value, "An internal metric of the low-level memory allocator (jemalloc). See https://jemalloc.net/jemalloc.3.html");
return value;
}
@ -768,8 +770,11 @@ void AsynchronousMetrics::update(TimePoint update_time, bool force_update)
// 'epoch' is a special mallctl -- it updates the statistics. Without it, all
// the following calls will return stale values. It increments and returns
// the current epoch number, which might be useful to log as a sanity check.
auto epoch = updateJemallocEpoch();
new_values["jemalloc.epoch"] = { epoch, "An internal incremental update number of the statistics of jemalloc (Jason Evans' memory allocator), used in all other `jemalloc` metrics." };
auto epoch = update_jemalloc_epoch ? updateJemallocEpoch() : getJemallocValue<uint64_t>("epoch");
new_values["jemalloc.epoch"]
= {epoch,
"An internal incremental update number of the statistics of jemalloc (Jason Evans' memory allocator), used in all other "
"`jemalloc` metrics."};
// Collect the statistics themselves.
saveJemallocMetric<size_t>(new_values, "allocated");
@ -782,10 +787,10 @@ void AsynchronousMetrics::update(TimePoint update_time, bool force_update)
saveJemallocMetric<size_t>(new_values, "background_thread.num_threads");
saveJemallocMetric<uint64_t>(new_values, "background_thread.num_runs");
saveJemallocMetric<uint64_t>(new_values, "background_thread.run_intervals");
saveJemallocProf<size_t>(new_values, "active");
saveJemallocProf<bool>(new_values, "active");
saveAllArenasMetric<size_t>(new_values, "pactive");
[[maybe_unused]] size_t je_malloc_pdirty = saveAllArenasMetric<size_t>(new_values, "pdirty");
[[maybe_unused]] size_t je_malloc_pmuzzy = saveAllArenasMetric<size_t>(new_values, "pmuzzy");
saveAllArenasMetric<size_t>(new_values, "pdirty");
saveAllArenasMetric<size_t>(new_values, "pmuzzy");
saveAllArenasMetric<size_t>(new_values, "dirty_purged");
saveAllArenasMetric<size_t>(new_values, "muzzy_purged");
#endif
@ -814,41 +819,8 @@ void AsynchronousMetrics::update(TimePoint update_time, bool force_update)
" It is unspecified whether it includes the per-thread stacks and most of the allocated memory, that is allocated with the 'mmap' system call."
" This metric exists only for completeness reasons. I recommend to use the `MemoryResident` metric for monitoring."};
/// We must update the value of total_memory_tracker periodically.
/// Otherwise it might be calculated incorrectly - it can include a "drift" of memory amount.
/// See https://github.com/ClickHouse/ClickHouse/issues/10293
{
Int64 amount = total_memory_tracker.get();
Int64 peak = total_memory_tracker.getPeak();
Int64 rss = data.resident;
Int64 free_memory_in_allocator_arenas = 0;
#if USE_JEMALLOC
/// According to jemalloc man, pdirty is:
///
/// Number of pages within unused extents that are potentially
/// dirty, and for which madvise() or similar has not been called.
///
/// So they will be subtracted from RSS to make accounting more
/// accurate, since those pages are not really RSS but a memory
/// that can be used at anytime via jemalloc.
free_memory_in_allocator_arenas = je_malloc_pdirty * getPageSize();
#endif
Int64 difference = rss - amount;
/// Log only if difference is high. This is for convenience. The threshold is arbitrary.
if (difference >= 1048576 || difference <= -1048576)
LOG_TRACE(log,
"MemoryTracking: was {}, peak {}, free memory in arenas {}, will set to {} (RSS), difference: {}",
ReadableSize(amount),
ReadableSize(peak),
ReadableSize(free_memory_in_allocator_arenas),
ReadableSize(rss),
ReadableSize(difference));
MemoryTracker::setRSS(rss, free_memory_in_allocator_arenas);
}
if (update_rss)
MemoryTracker::updateRSS(data.resident);
}
{

View File

@ -1,15 +1,14 @@
#pragma once
#include <Common/CgroupsMemoryUsageObserver.h>
#include <Common/MemoryStatisticsOS.h>
#include <Common/ThreadPool.h>
#include <Common/Stopwatch.h>
#include <IO/ReadBufferFromFile.h>
#include <condition_variable>
#include <map>
#include <mutex>
#include <string>
#include <thread>
#include <vector>
#include <optional>
#include <unordered_map>
@ -69,7 +68,9 @@ public:
AsynchronousMetrics(
unsigned update_period_seconds,
const ProtocolServerMetricsFunc & protocol_server_metrics_func_);
const ProtocolServerMetricsFunc & protocol_server_metrics_func_,
bool update_jemalloc_epoch_,
bool update_rss_);
virtual ~AsynchronousMetrics();
@ -112,6 +113,9 @@ private:
MemoryStatisticsOS memory_stat TSA_GUARDED_BY(data_mutex);
#endif
[[maybe_unused]] const bool update_jemalloc_epoch;
[[maybe_unused]] const bool update_rss;
#if defined(OS_LINUX)
std::optional<ReadBufferFromFilePRead> meminfo TSA_GUARDED_BY(data_mutex);
std::optional<ReadBufferFromFilePRead> loadavg TSA_GUARDED_BY(data_mutex);

View File

@ -14,239 +14,21 @@
#include <fmt/ranges.h>
#include <cstdint>
#include <filesystem>
#include <memory>
#include <optional>
#include "config.h"
#if USE_JEMALLOC
# include <jemalloc/jemalloc.h>
#define STRINGIFY_HELPER(x) #x
#define STRINGIFY(x) STRINGIFY_HELPER(x)
#endif
using namespace DB;
namespace fs = std::filesystem;
namespace DB
{
namespace ErrorCodes
{
extern const int FILE_DOESNT_EXIST;
extern const int INCORRECT_DATA;
}
}
namespace
{
/// Format is
/// kernel 5
/// rss 15
/// [...]
using Metrics = std::map<std::string, uint64_t>;
Metrics readAllMetricsFromStatFile(ReadBufferFromFile & buf)
{
Metrics metrics;
while (!buf.eof())
{
std::string current_key;
readStringUntilWhitespace(current_key, buf);
assertChar(' ', buf);
uint64_t value = 0;
readIntText(value, buf);
assertChar('\n', buf);
auto [_, inserted] = metrics.emplace(std::move(current_key), value);
chassert(inserted, "Duplicate keys in stat file");
}
return metrics;
}
uint64_t readMetricFromStatFile(ReadBufferFromFile & buf, const std::string & key)
{
const auto all_metrics = readAllMetricsFromStatFile(buf);
if (const auto it = all_metrics.find(key); it != all_metrics.end())
return it->second;
throw Exception(ErrorCodes::INCORRECT_DATA, "Cannot find '{}' in '{}'", key, buf.getFileName());
}
struct CgroupsV1Reader : ICgroupsReader
{
explicit CgroupsV1Reader(const fs::path & stat_file_dir) : buf(stat_file_dir / "memory.stat") { }
uint64_t readMemoryUsage() override
{
std::lock_guard lock(mutex);
buf.rewind();
return readMetricFromStatFile(buf, "rss");
}
std::string dumpAllStats() override
{
std::lock_guard lock(mutex);
buf.rewind();
return fmt::format("{}", readAllMetricsFromStatFile(buf));
}
private:
std::mutex mutex;
ReadBufferFromFile buf TSA_GUARDED_BY(mutex);
};
struct CgroupsV2Reader : ICgroupsReader
{
explicit CgroupsV2Reader(const fs::path & stat_file_dir)
: current_buf(stat_file_dir / "memory.current"), stat_buf(stat_file_dir / "memory.stat")
{
}
uint64_t readMemoryUsage() override
{
std::lock_guard lock(mutex);
current_buf.rewind();
stat_buf.rewind();
int64_t mem_usage = 0;
/// memory.current contains a single number
/// the reason why we subtract it described here: https://github.com/ClickHouse/ClickHouse/issues/64652#issuecomment-2149630667
readIntText(mem_usage, current_buf);
mem_usage -= readMetricFromStatFile(stat_buf, "inactive_file");
chassert(mem_usage >= 0, "Negative memory usage");
return mem_usage;
}
std::string dumpAllStats() override
{
std::lock_guard lock(mutex);
stat_buf.rewind();
return fmt::format("{}", readAllMetricsFromStatFile(stat_buf));
}
private:
std::mutex mutex;
ReadBufferFromFile current_buf TSA_GUARDED_BY(mutex);
ReadBufferFromFile stat_buf TSA_GUARDED_BY(mutex);
};
/// Caveats:
/// - All of the logic in this file assumes that the current process is the only process in the
/// containing cgroup (or more precisely: the only process with significant memory consumption).
/// If this is not the case, then other processe's memory consumption may affect the internal
/// memory tracker ...
/// - Cgroups v1 and v2 allow nested cgroup hierarchies. As v1 is deprecated for over half a
/// decade and will go away at some point, hierarchical detection is only implemented for v2.
/// - I did not test what happens if a host has v1 and v2 simultaneously enabled. I believe such
/// systems existed only for a short transition period.
std::optional<std::string> getCgroupsV1Path()
{
auto path = default_cgroups_mount / "memory/memory.stat";
if (!fs::exists(path))
return {};
return {default_cgroups_mount / "memory"};
}
std::pair<std::string, CgroupsMemoryUsageObserver::CgroupsVersion> getCgroupsPath()
{
auto v2_path = getCgroupsV2PathContainingFile("memory.current");
if (v2_path.has_value())
return {*v2_path, CgroupsMemoryUsageObserver::CgroupsVersion::V2};
auto v1_path = getCgroupsV1Path();
if (v1_path.has_value())
return {*v1_path, CgroupsMemoryUsageObserver::CgroupsVersion::V1};
throw Exception(ErrorCodes::FILE_DOESNT_EXIST, "Cannot find cgroups v1 or v2 current memory file");
}
}
namespace DB
{
CgroupsMemoryUsageObserver::CgroupsMemoryUsageObserver(std::chrono::seconds wait_time_)
: log(getLogger("CgroupsMemoryUsageObserver")), wait_time(wait_time_)
{
const auto [cgroup_path, version] = getCgroupsPath();
cgroup_reader = createCgroupsReader(version, cgroup_path);
LOG_INFO(
log,
"Will read the current memory usage from '{}' (cgroups version: {}), wait time is {} sec",
cgroup_path,
(version == CgroupsVersion::V1) ? "v1" : "v2",
wait_time.count());
}
{}
CgroupsMemoryUsageObserver::~CgroupsMemoryUsageObserver()
{
stopThread();
}
void CgroupsMemoryUsageObserver::setMemoryUsageLimits(uint64_t hard_limit_, uint64_t soft_limit_)
{
std::lock_guard<std::mutex> limit_lock(limit_mutex);
if (hard_limit_ == hard_limit && soft_limit_ == soft_limit)
return;
hard_limit = hard_limit_;
soft_limit = soft_limit_;
on_hard_limit = [this, hard_limit_](bool up)
{
if (up)
{
LOG_WARNING(log, "Exceeded hard memory limit ({})", ReadableSize(hard_limit_));
/// Update current usage in memory tracker. Also reset free_memory_in_allocator_arenas to zero though we don't know if they are
/// really zero. Trying to avoid OOM ...
MemoryTracker::setRSS(hard_limit_, 0);
}
else
{
LOG_INFO(log, "Dropped below hard memory limit ({})", ReadableSize(hard_limit_));
}
};
on_soft_limit = [this, soft_limit_](bool up)
{
if (up)
{
LOG_WARNING(log, "Exceeded soft memory limit ({})", ReadableSize(soft_limit_));
# if USE_JEMALLOC
LOG_INFO(log, "Purging jemalloc arenas");
mallctl("arena." STRINGIFY(MALLCTL_ARENAS_ALL) ".purge", nullptr, nullptr, nullptr, 0);
# endif
/// Reset current usage in memory tracker. Expect zero for free_memory_in_allocator_arenas as we just purged them.
uint64_t memory_usage = cgroup_reader->readMemoryUsage();
LOG_TRACE(
log,
"Read current memory usage {} bytes ({}) from cgroups, full available stats: {}",
memory_usage,
ReadableSize(memory_usage),
cgroup_reader->dumpAllStats());
MemoryTracker::setRSS(memory_usage, 0);
LOG_INFO(log, "Purged jemalloc arenas. Current memory usage is {}", ReadableSize(memory_usage));
}
else
{
LOG_INFO(log, "Dropped below soft memory limit ({})", ReadableSize(soft_limit_));
}
};
LOG_INFO(log, "Set new limits, soft limit: {}, hard limit: {}", ReadableSize(soft_limit_), ReadableSize(hard_limit_));
}
void CgroupsMemoryUsageObserver::setOnMemoryAmountAvailableChangedFn(OnMemoryAmountAvailableChangedFn on_memory_amount_available_changed_)
{
std::lock_guard<std::mutex> memory_amount_available_changed_lock(memory_amount_available_changed_mutex);
@ -300,35 +82,6 @@ void CgroupsMemoryUsageObserver::runThread()
std::lock_guard<std::mutex> memory_amount_available_changed_lock(memory_amount_available_changed_mutex);
on_memory_amount_available_changed();
}
std::lock_guard<std::mutex> limit_lock(limit_mutex);
if (soft_limit > 0 && hard_limit > 0)
{
uint64_t memory_usage = cgroup_reader->readMemoryUsage();
LOG_TRACE(log, "Read current memory usage {} bytes ({}) from cgroups", memory_usage, ReadableSize(memory_usage));
if (memory_usage > hard_limit)
{
if (last_memory_usage <= hard_limit)
on_hard_limit(true);
}
else
{
if (last_memory_usage > hard_limit)
on_hard_limit(false);
}
if (memory_usage > soft_limit)
{
if (last_memory_usage <= soft_limit)
on_soft_limit(true);
}
else
{
if (last_memory_usage > soft_limit)
on_soft_limit(false);
}
last_memory_usage = memory_usage;
}
}
catch (...)
{
@ -337,13 +90,6 @@ void CgroupsMemoryUsageObserver::runThread()
}
}
std::unique_ptr<ICgroupsReader> createCgroupsReader(CgroupsMemoryUsageObserver::CgroupsVersion version, const fs::path & cgroup_path)
{
if (version == CgroupsMemoryUsageObserver::CgroupsVersion::V2)
return std::make_unique<CgroupsV2Reader>(cgroup_path);
else
return std::make_unique<CgroupsV1Reader>(cgroup_path);
}
}
#endif

View File

@ -3,30 +3,12 @@
#include <Common/ThreadPool.h>
#include <chrono>
#include <memory>
#include <mutex>
namespace DB
{
struct ICgroupsReader
{
virtual ~ICgroupsReader() = default;
virtual uint64_t readMemoryUsage() = 0;
virtual std::string dumpAllStats() = 0;
};
/// Does two things:
/// 1. Periodically reads the memory usage of the process from Linux cgroups.
/// You can specify soft or hard memory limits:
/// - When the soft memory limit is hit, drop jemalloc cache.
/// - When the hard memory limit is hit, update MemoryTracking metric to throw memory exceptions faster.
/// The goal of this is to avoid that the process hits the maximum allowed memory limit at which there is a good
/// chance that the Limux OOM killer terminates it. All of this is done is because internal memory tracking in
/// ClickHouse can unfortunately under-estimate the actually used memory.
/// 2. Periodically reads the the maximum memory available to the process (which can change due to cgroups settings).
/// Periodically reads the the maximum memory available to the process (which can change due to cgroups settings).
/// You can specify a callback to react on changes. The callback typically reloads the configuration, i.e. Server
/// or Keeper configuration file. This reloads settings 'max_server_memory_usage' (Server) and 'max_memory_usage_soft_limit'
/// (Keeper) from which various other internal limits are calculated, including the soft and hard limits for (1.).
@ -37,19 +19,11 @@ struct ICgroupsReader
class CgroupsMemoryUsageObserver
{
public:
using OnMemoryLimitFn = std::function<void(bool)>;
using OnMemoryAmountAvailableChangedFn = std::function<void()>;
enum class CgroupsVersion : uint8_t
{
V1,
V2
};
explicit CgroupsMemoryUsageObserver(std::chrono::seconds wait_time_);
~CgroupsMemoryUsageObserver();
void setMemoryUsageLimits(uint64_t hard_limit_, uint64_t soft_limit_);
void setOnMemoryAmountAvailableChangedFn(OnMemoryAmountAvailableChangedFn on_memory_amount_available_changed_);
void startThread();
@ -60,32 +34,22 @@ private:
const std::chrono::seconds wait_time;
std::mutex limit_mutex;
size_t hard_limit TSA_GUARDED_BY(limit_mutex) = 0;
size_t soft_limit TSA_GUARDED_BY(limit_mutex) = 0;
OnMemoryLimitFn on_hard_limit TSA_GUARDED_BY(limit_mutex);
OnMemoryLimitFn on_soft_limit TSA_GUARDED_BY(limit_mutex);
std::mutex memory_amount_available_changed_mutex;
OnMemoryAmountAvailableChangedFn on_memory_amount_available_changed TSA_GUARDED_BY(memory_amount_available_changed_mutex);
uint64_t last_memory_usage = 0; /// how much memory does the process use
uint64_t last_available_memory_amount; /// how much memory can the process use
void stopThread();
void runThread();
std::unique_ptr<ICgroupsReader> cgroup_reader;
std::mutex thread_mutex;
std::condition_variable cond;
ThreadFromGlobalPool thread;
bool quit = false;
};
std::unique_ptr<ICgroupsReader>
createCgroupsReader(CgroupsMemoryUsageObserver::CgroupsVersion version, const std::filesystem::path & cgroup_path);
#else
class CgroupsMemoryUsageObserver
{

View File

@ -75,9 +75,9 @@
M(GlobalThread, "Number of threads in global thread pool.") \
M(GlobalThreadActive, "Number of threads in global thread pool running a task.") \
M(GlobalThreadScheduled, "Number of queued or active jobs in global thread pool.") \
M(LocalThread, "Number of threads in local thread pools. The threads in local thread pools are taken from the global thread pool.") \
M(LocalThreadActive, "Number of threads in local thread pools running a task.") \
M(LocalThreadScheduled, "Number of queued or active jobs in local thread pools.") \
M(LocalThread, "Obsolete. Number of threads in local thread pools. The threads in local thread pools are taken from the global thread pool.") \
M(LocalThreadActive, "Obsolete. Number of threads in local thread pools running a task.") \
M(LocalThreadScheduled, "Obsolete. Number of queued or active jobs in local thread pools.") \
M(MergeTreeDataSelectExecutorThreads, "Number of threads in the MergeTreeDataSelectExecutor thread pool.") \
M(MergeTreeDataSelectExecutorThreadsActive, "Number of threads in the MergeTreeDataSelectExecutor thread pool running a task.") \
M(MergeTreeDataSelectExecutorThreadsScheduled, "Number of queued or active jobs in the MergeTreeDataSelectExecutor thread pool.") \
@ -292,6 +292,9 @@
M(DistrCacheWriteRequests, "Number of executed Write requests to Distributed Cache") \
M(DistrCacheServerConnections, "Number of open connections to ClickHouse server from Distributed Cache") \
\
M(SchedulerIOReadScheduled, "Number of IO reads are being scheduled currently") \
M(SchedulerIOWriteScheduled, "Number of IO writes are being scheduled currently") \
\
M(StorageConnectionsStored, "Total count of sessions stored in the session pool for storages") \
M(StorageConnectionsTotal, "Total count of all sessions: stored in the pool and actively used right now for storages") \
\

View File

@ -113,6 +113,56 @@ std::string_view CurrentThread::getQueryId()
return current_thread->getQueryId();
}
void CurrentThread::attachReadResource(ResourceLink link)
{
if (unlikely(!current_thread))
return;
if (current_thread->read_resource_link)
throw Exception(ErrorCodes::LOGICAL_ERROR, "Thread #{} has been already attached to read resource", std::to_string(getThreadId()));
current_thread->read_resource_link = link;
}
void CurrentThread::detachReadResource()
{
if (unlikely(!current_thread))
return;
if (!current_thread->read_resource_link)
throw Exception(ErrorCodes::LOGICAL_ERROR, "Thread #{} has not been attached to read resource", std::to_string(getThreadId()));
current_thread->read_resource_link.reset();
}
ResourceLink CurrentThread::getReadResourceLink()
{
if (unlikely(!current_thread))
return {};
return current_thread->read_resource_link;
}
void CurrentThread::attachWriteResource(ResourceLink link)
{
if (unlikely(!current_thread))
return;
if (current_thread->write_resource_link)
throw Exception(ErrorCodes::LOGICAL_ERROR, "Thread #{} has been already attached to write resource", std::to_string(getThreadId()));
current_thread->write_resource_link = link;
}
void CurrentThread::detachWriteResource()
{
if (unlikely(!current_thread))
return;
if (!current_thread->write_resource_link)
throw Exception(ErrorCodes::LOGICAL_ERROR, "Thread #{} has not been attached to write resource", std::to_string(getThreadId()));
current_thread->write_resource_link.reset();
}
ResourceLink CurrentThread::getWriteResourceLink()
{
if (unlikely(!current_thread))
return {};
return current_thread->write_resource_link;
}
MemoryTracker * CurrentThread::getUserMemoryTracker()
{
if (unlikely(!current_thread))

View File

@ -2,6 +2,7 @@
#include <Interpreters/Context_fwd.h>
#include <Common/ThreadStatus.h>
#include <Common/Scheduler/ResourceLink.h>
#include <memory>
#include <string>
@ -23,7 +24,6 @@ class QueryStatus;
struct Progress;
class InternalTextLogsQueue;
/** Collection of static methods to work with thread-local objects.
* Allows to attach and detach query/process (thread group) to a thread
* (to calculate query-related metrics and to allow to obtain query-related data from a thread).
@ -92,6 +92,14 @@ public:
static std::string_view getQueryId();
// For IO Scheduling
static void attachReadResource(ResourceLink link);
static void detachReadResource();
static ResourceLink getReadResourceLink();
static void attachWriteResource(ResourceLink link);
static void detachWriteResource();
static ResourceLink getWriteResourceLink();
/// Initializes query with current thread as master thread in constructor, and detaches it in destructor
struct QueryScope : private boost::noncopyable
{
@ -102,6 +110,39 @@ public:
void logPeakMemoryUsage();
bool log_peak_memory_usage_in_destructor = true;
};
/// Scoped attach/detach of IO resource links
struct IOScope : private boost::noncopyable
{
explicit IOScope(ResourceLink read_resource_link, ResourceLink write_resource_link)
{
if (read_resource_link)
{
attachReadResource(read_resource_link);
read_attached = true;
}
if (write_resource_link)
{
attachWriteResource(write_resource_link);
write_attached = true;
}
}
explicit IOScope(const IOSchedulingSettings & settings)
: IOScope(settings.read_resource_link, settings.write_resource_link)
{}
~IOScope()
{
if (read_attached)
detachReadResource();
if (write_attached)
detachWriteResource();
}
bool read_attached = false;
bool write_attached = false;
};
};
}

View File

@ -2,6 +2,7 @@
#include <Common/HostResolvePool.h>
#include <Common/ProfileEvents.h>
#include <Common/Stopwatch.h>
#include <Common/CurrentMetrics.h>
#include <Common/logger_useful.h>
#include <Common/Exception.h>
@ -9,6 +10,7 @@
#include <Common/ProxyConfiguration.h>
#include <Common/MemoryTrackerSwitcher.h>
#include <Common/SipHash.h>
#include <Common/Scheduler/ResourceGuard.h>
#include <Common/proxyConfigurationToPocoProxyConfig.h>
#include <Poco/Net/HTTPChunkedStream.h>
@ -236,6 +238,59 @@ public:
};
// Session data hooks implementation for integration with resource scheduler.
// Hooks are created per every request-response pair and are registered/unregistered in HTTP session.
// * `atStart()` send resource request to the scheduler every time HTTP session is going to send or receive
// data to/from socket. `start()` waits for the scheduler confirmation. This way scheduler might
// throttle and/or schedule socket data streams.
// * `atFinish()` hook is called on successful socket read/write operation.
// It informs the scheduler that operation is complete, which allows the scheduler to control the total
// amount of in-flight bytes and/or operations.
// * `atFail()` hook is called on failure of socket operation. The purpose is to correct the amount of bytes
// passed through the scheduler queue to ensure fair bandwidth allocation even in presence of errors.
struct ResourceGuardSessionDataHooks : public Poco::Net::IHTTPSessionDataHooks
{
ResourceGuardSessionDataHooks(ResourceLink link_, const ResourceGuard::Metrics * metrics, LoggerPtr log_, const String & method, const String & uri)
: link(link_)
, log(log_)
, http_request(method + " " + uri)
{
request.metrics = metrics;
chassert(link);
}
~ResourceGuardSessionDataHooks() override
{
request.assertFinished(); // Never destruct with an active request
}
void atStart(int bytes) override
{
Stopwatch timer;
request.enqueue(bytes, link);
request.wait();
timer.stop();
if (timer.elapsedMilliseconds() >= 5000)
LOG_INFO(log, "Resource request took too long to finish: {} ms for {}", timer.elapsedMilliseconds(), http_request);
}
void atFinish(int bytes) override
{
request.finish(bytes, link);
}
void atFail() override
{
request.finish(0, link);
}
ResourceLink link;
ResourceGuard::Request request;
LoggerPtr log;
String http_request;
};
// EndpointConnectionPool manage connections to the endpoint
// Features:
// - it uses HostResolver for address selecting. See Common/HostResolver.h for more info.
@ -246,8 +301,6 @@ public:
// - `Session::reconnect()` uses the pool as well
// - comprehensive sensors
// - session is reused according its inner state, automatically
template <class Session>
class EndpointConnectionPool : public std::enable_shared_from_this<EndpointConnectionPool<Session>>, public IExtendedPool
{
@ -337,6 +390,13 @@ private:
std::ostream & sendRequest(Poco::Net::HTTPRequest & request) override
{
auto idle = idleTime();
// Set data hooks for IO scheduling
if (ResourceLink link = CurrentThread::getReadResourceLink())
Session::setReceiveDataHooks(std::make_shared<ResourceGuardSessionDataHooks>(link, ResourceGuard::Metrics::getIORead(), log, request.getMethod(), request.getURI()));
if (ResourceLink link = CurrentThread::getWriteResourceLink())
Session::setSendDataHooks(std::make_shared<ResourceGuardSessionDataHooks>(link, ResourceGuard::Metrics::getIOWrite(), log, request.getMethod(), request.getURI()));
std::ostream & result = Session::sendRequest(request);
result.exceptions(std::ios::badbit);
@ -393,6 +453,8 @@ private:
}
}
response_stream = nullptr;
Session::setSendDataHooks();
Session::setReceiveDataHooks();
group->atConnectionDestroy();

View File

@ -5,7 +5,6 @@
#include <Common/Exception.h>
#include <Common/Stopwatch.h>
#include <Common/logger_useful.h>
#include <jemalloc/jemalloc.h>
#define STRINGIFY_HELPER(x) #x
#define STRINGIFY(x) STRINGIFY_HELPER(x)
@ -26,7 +25,6 @@ namespace ErrorCodes
void purgeJemallocArenas()
{
LOG_TRACE(getLogger("SystemJemalloc"), "Purging unused memory");
Stopwatch watch;
mallctl("arena." STRINGIFY(MALLCTL_ARENAS_ALL) ".purge", nullptr, nullptr, nullptr, 0);
ProfileEvents::increment(ProfileEvents::MemoryAllocatorPurge);
@ -46,20 +44,6 @@ void checkJemallocProfilingEnabled()
"set: MALLOC_CONF=background_thread:true,prof:true");
}
template <typename T>
void setJemallocValue(const char * name, T value)
{
T old_value;
size_t old_value_size = sizeof(T);
if (mallctl(name, &old_value, &old_value_size, reinterpret_cast<void*>(&value), sizeof(T)))
{
LOG_WARNING(getLogger("Jemalloc"), "mallctl for {} failed", name);
return;
}
LOG_INFO(getLogger("Jemalloc"), "Value for {} set to {} (from {})", name, value, old_value);
}
void setJemallocProfileActive(bool value)
{
checkJemallocProfilingEnabled();

View File

@ -5,6 +5,8 @@
#if USE_JEMALLOC
#include <string>
#include <Common/logger_useful.h>
#include <jemalloc/jemalloc.h>
namespace DB
{
@ -21,6 +23,59 @@ void setJemallocBackgroundThreads(bool enabled);
void setJemallocMaxBackgroundThreads(size_t max_threads);
template <typename T>
void setJemallocValue(const char * name, T value)
{
T old_value;
size_t old_value_size = sizeof(T);
mallctl(name, &old_value, &old_value_size, reinterpret_cast<void*>(&value), sizeof(T));
LOG_INFO(getLogger("Jemalloc"), "Value for {} set to {} (from {})", name, value, old_value);
}
template <typename T>
T getJemallocValue(const char * name)
{
T value;
size_t value_size = sizeof(T);
mallctl(name, &value, &value_size, nullptr, 0);
return value;
}
/// Each mallctl call consists of string name lookup which can be expensive.
/// This can be avoided by translating name to "Management Information Base" (MIB)
/// and using it in mallctlbymib calls
template <typename T>
struct JemallocMibCache
{
explicit JemallocMibCache(const char * name)
{
mallctlnametomib(name, mib, &mib_length);
}
void setValue(T value)
{
mallctlbymib(mib, mib_length, nullptr, nullptr, reinterpret_cast<void*>(&value), sizeof(T));
}
T getValue()
{
T value;
size_t value_size = sizeof(T);
mallctlbymib(mib, mib_length, &value, &value_size, nullptr, 0);
return value;
}
void run()
{
mallctlbymib(mib, mib_length, nullptr, nullptr, nullptr, 0);
}
private:
static constexpr size_t max_mib_length = 4;
size_t mib[max_mib_length];
size_t mib_length = max_mib_length;
};
}
#endif

View File

@ -20,13 +20,9 @@
#if USE_JEMALLOC
# include <jemalloc/jemalloc.h>
#define STRINGIFY_HELPER(x) #x
#define STRINGIFY(x) STRINGIFY_HELPER(x)
#endif
#include <atomic>
#include <cmath>
#include <random>
#include <cstdlib>
#include <string>
@ -115,8 +111,6 @@ void AllocationTrace::onFreeImpl(void * ptr, size_t size) const
namespace ProfileEvents
{
extern const Event QueryMemoryLimitExceeded;
extern const Event MemoryAllocatorPurge;
extern const Event MemoryAllocatorPurgeTimeMicroseconds;
}
using namespace std::chrono_literals;
@ -126,15 +120,13 @@ static constexpr size_t log_peak_memory_usage_every = 1ULL << 30;
MemoryTracker total_memory_tracker(nullptr, VariableContext::Global);
MemoryTracker background_memory_tracker(&total_memory_tracker, VariableContext::User, false);
std::atomic<Int64> MemoryTracker::free_memory_in_allocator_arenas;
MemoryTracker::MemoryTracker(VariableContext level_) : parent(&total_memory_tracker), level(level_) {}
MemoryTracker::MemoryTracker(MemoryTracker * parent_, VariableContext level_) : parent(parent_), level(level_) {}
MemoryTracker::MemoryTracker(MemoryTracker * parent_, VariableContext level_, bool log_peak_memory_usage_in_destructor_)
: parent(parent_)
, log_peak_memory_usage_in_destructor(log_peak_memory_usage_in_destructor_)
, level(level_)
{}
: parent(parent_), log_peak_memory_usage_in_destructor(log_peak_memory_usage_in_destructor_), level(level_)
{
}
MemoryTracker::~MemoryTracker()
{
@ -204,10 +196,14 @@ void MemoryTracker::debugLogBigAllocationWithoutCheck(Int64 size [[maybe_unused]
return;
MemoryTrackerBlockerInThread blocker(VariableContext::Global);
LOG_TEST(getLogger("MemoryTracker"), "Too big allocation ({} bytes) without checking memory limits, "
"it may lead to OOM. Stack trace: {}", size, StackTrace().toString());
LOG_TEST(
getLogger("MemoryTracker"),
"Too big allocation ({} bytes) without checking memory limits, "
"it may lead to OOM. Stack trace: {}",
size,
StackTrace().toString());
#else
return; /// Avoid trash logging in release builds
/// Avoid trash logging in release builds
#endif
}
@ -228,6 +224,7 @@ AllocationTrace MemoryTracker::allocImpl(Int64 size, bool throw_if_memory_exceed
{
/// For global memory tracker always update memory usage.
amount.fetch_add(size, std::memory_order_relaxed);
rss.fetch_add(size, std::memory_order_relaxed);
auto metric_loaded = metric.load(std::memory_order_relaxed);
if (metric_loaded != CurrentMetrics::end())
@ -249,6 +246,7 @@ AllocationTrace MemoryTracker::allocImpl(Int64 size, bool throw_if_memory_exceed
* So, we allow over-allocations.
*/
Int64 will_be = size ? size + amount.fetch_add(size, std::memory_order_relaxed) : amount.load(std::memory_order_relaxed);
Int64 will_be_rss = size ? size + rss.fetch_add(size, std::memory_order_relaxed) : rss.load(std::memory_order_relaxed);
auto metric_loaded = metric.load(std::memory_order_relaxed);
if (metric_loaded != CurrentMetrics::end() && size)
@ -275,6 +273,7 @@ AllocationTrace MemoryTracker::allocImpl(Int64 size, bool throw_if_memory_exceed
{
/// Revert
amount.fetch_sub(size, std::memory_order_relaxed);
rss.fetch_sub(size, std::memory_order_relaxed);
/// Prevent recursion. Exception::ctor -> std::string -> new[] -> MemoryTracker::alloc
MemoryTrackerBlockerInThread untrack_lock(VariableContext::Global);
@ -297,33 +296,8 @@ AllocationTrace MemoryTracker::allocImpl(Int64 size, bool throw_if_memory_exceed
}
}
Int64 limit_to_check = current_hard_limit;
#if USE_JEMALLOC
if (level == VariableContext::Global && allow_use_jemalloc_memory.load(std::memory_order_relaxed))
{
/// Jemalloc arenas may keep some extra memory.
/// This memory was substucted from RSS to decrease memory drift.
/// In case memory is close to limit, try to pugre the arenas.
/// This is needed to avoid OOM, because some allocations are directly done with mmap.
Int64 current_free_memory_in_allocator_arenas = free_memory_in_allocator_arenas.load(std::memory_order_relaxed);
if (current_free_memory_in_allocator_arenas > 0 && current_hard_limit && current_free_memory_in_allocator_arenas + will_be > current_hard_limit)
{
if (free_memory_in_allocator_arenas.exchange(-current_free_memory_in_allocator_arenas) > 0)
{
Stopwatch watch;
mallctl("arena." STRINGIFY(MALLCTL_ARENAS_ALL) ".purge", nullptr, nullptr, nullptr, 0);
ProfileEvents::increment(ProfileEvents::MemoryAllocatorPurge);
ProfileEvents::increment(ProfileEvents::MemoryAllocatorPurgeTimeMicroseconds, watch.elapsedMicroseconds());
}
}
limit_to_check += abs(current_free_memory_in_allocator_arenas);
}
#endif
if (unlikely(current_hard_limit && will_be > limit_to_check))
if (unlikely(
current_hard_limit && (will_be > current_hard_limit || (level == VariableContext::Global && will_be_rss > current_hard_limit))))
{
if (memoryTrackerCanThrow(level, false) && throw_if_memory_exceeded)
{
@ -335,6 +309,7 @@ AllocationTrace MemoryTracker::allocImpl(Int64 size, bool throw_if_memory_exceed
{
/// Revert
amount.fetch_sub(size, std::memory_order_relaxed);
rss.fetch_sub(size, std::memory_order_relaxed);
/// Prevent recursion. Exception::ctor -> std::string -> new[] -> MemoryTracker::alloc
MemoryTrackerBlockerInThread untrack_lock(VariableContext::Global);
@ -343,12 +318,13 @@ AllocationTrace MemoryTracker::allocImpl(Int64 size, bool throw_if_memory_exceed
throw DB::Exception(
DB::ErrorCodes::MEMORY_LIMIT_EXCEEDED,
"Memory limit{}{} exceeded: "
"would use {} (attempt to allocate chunk of {} bytes), maximum: {}."
"would use {} (attempt to allocate chunk of {} bytes), current RSS {}, maximum: {}."
"{}{}",
description ? " " : "",
description ? description : "",
formatReadableSizeWithBinarySuffix(will_be),
size,
formatReadableSizeWithBinarySuffix(rss.load(std::memory_order_relaxed)),
formatReadableSizeWithBinarySuffix(current_hard_limit),
overcommit_result == OvercommitResult::NONE ? "" : " OvercommitTracker decision: ",
toDescription(overcommit_result));
@ -442,6 +418,7 @@ AllocationTrace MemoryTracker::free(Int64 size, double _sample_probability)
{
/// For global memory tracker always update memory usage.
amount.fetch_sub(size, std::memory_order_relaxed);
rss.fetch_sub(size, std::memory_order_relaxed);
auto metric_loaded = metric.load(std::memory_order_relaxed);
if (metric_loaded != CurrentMetrics::end())
CurrentMetrics::sub(metric_loaded, size);
@ -455,7 +432,12 @@ AllocationTrace MemoryTracker::free(Int64 size, double _sample_probability)
}
Int64 accounted_size = size;
if (level == VariableContext::Thread || level == VariableContext::Global)
if (level == VariableContext::Global)
{
amount.fetch_sub(accounted_size, std::memory_order_relaxed);
rss.fetch_sub(accounted_size, std::memory_order_relaxed);
}
else if (level == VariableContext::Thread)
{
/// Could become negative if memory allocated in this thread is freed in another one
amount.fetch_sub(accounted_size, std::memory_order_relaxed);
@ -529,21 +511,29 @@ void MemoryTracker::reset()
}
void MemoryTracker::setRSS(Int64 rss_, Int64 free_memory_in_allocator_arenas_)
void MemoryTracker::updateRSS(Int64 rss_)
{
Int64 new_amount = rss_;
total_memory_tracker.rss.store(rss_, std::memory_order_relaxed);
}
void MemoryTracker::updateAllocated(Int64 allocated_)
{
Int64 new_amount = allocated_;
LOG_INFO(
getLogger("MemoryTracker"),
"Correcting the value of global memory tracker from {} to {}",
ReadableSize(total_memory_tracker.amount.load(std::memory_order_relaxed)),
ReadableSize(allocated_));
total_memory_tracker.amount.store(new_amount, std::memory_order_relaxed);
free_memory_in_allocator_arenas.store(free_memory_in_allocator_arenas_, std::memory_order_relaxed);
auto metric_loaded = total_memory_tracker.metric.load(std::memory_order_relaxed);
if (metric_loaded != CurrentMetrics::end())
CurrentMetrics::set(metric_loaded, new_amount);
bool log_memory_usage = true;
total_memory_tracker.updatePeak(rss_, log_memory_usage);
total_memory_tracker.updatePeak(new_amount, log_memory_usage);
}
void MemoryTracker::setSoftLimit(Int64 value)
{
soft_limit.store(value, std::memory_order_relaxed);

View File

@ -2,7 +2,6 @@
#include <atomic>
#include <chrono>
#include <optional>
#include <base/types.h>
#include <Common/CurrentMetrics.h>
#include <Common/VariableContext.h>
@ -57,9 +56,8 @@ private:
std::atomic<Int64> soft_limit {0};
std::atomic<Int64> hard_limit {0};
std::atomic<Int64> profiler_limit {0};
std::atomic_bool allow_use_jemalloc_memory {true};
static std::atomic<Int64> free_memory_in_allocator_arenas;
std::atomic<Int64> rss{0};
Int64 profiler_step = 0;
@ -122,6 +120,11 @@ public:
return amount.load(std::memory_order_relaxed);
}
Int64 getRSS() const
{
return rss.load(std::memory_order_relaxed);
}
// Merges and mutations may pass memory ownership to other threads thus in the end of execution
// MemoryTracker for background task may have a non-zero counter.
// This method is intended to fix the counter inside of background_memory_tracker.
@ -154,14 +157,6 @@ public:
{
return soft_limit.load(std::memory_order_relaxed);
}
void setAllowUseJemallocMemory(bool value)
{
allow_use_jemalloc_memory.store(value, std::memory_order_relaxed);
}
bool getAllowUseJemallocMmemory() const
{
return allow_use_jemalloc_memory.load(std::memory_order_relaxed);
}
/** Set limit if it was not set.
* Otherwise, set limit to new value, if new value is greater than previous limit.
@ -249,10 +244,9 @@ public:
/// Reset the accumulated data.
void reset();
/// Reset current counter to an RSS value.
/// Jemalloc may have pre-allocated arenas, they are accounted in RSS.
/// We can free this arenas in case of exception to avoid OOM.
static void setRSS(Int64 rss_, Int64 free_memory_in_allocator_arenas_);
/// update values based on external information (e.g. jemalloc's stat)
static void updateRSS(Int64 rss_);
static void updateAllocated(Int64 allocated_);
/// Prints info about peak memory consumption into log.
void logPeakMemoryUsage();

333
src/Common/MemoryWorker.cpp Normal file
View File

@ -0,0 +1,333 @@
#include <Common/MemoryWorker.h>
#include <IO/ReadBufferFromFile.h>
#include <IO/ReadBufferFromFileDescriptor.h>
#include <IO/ReadHelpers.h>
#include <base/cgroupsv2.h>
#include <Common/Jemalloc.h>
#include <Common/MemoryTracker.h>
#include <Common/ProfileEvents.h>
#include <Common/formatReadable.h>
#include <Common/logger_useful.h>
#include <fmt/ranges.h>
#include <filesystem>
#include <optional>
namespace fs = std::filesystem;
namespace ProfileEvents
{
extern const Event MemoryAllocatorPurge;
extern const Event MemoryAllocatorPurgeTimeMicroseconds;
extern const Event MemoryWorkerRun;
extern const Event MemoryWorkerRunElapsedMicroseconds;
}
namespace DB
{
namespace ErrorCodes
{
extern const int FILE_DOESNT_EXIST;
extern const int LOGICAL_ERROR;
}
#if defined(OS_LINUX)
namespace
{
using Metrics = std::map<std::string, uint64_t>;
/// Format is
/// kernel 5
/// rss 15
/// [...]
Metrics readAllMetricsFromStatFile(ReadBufferFromFile & buf)
{
Metrics metrics;
while (!buf.eof())
{
std::string current_key;
readStringUntilWhitespace(current_key, buf);
assertChar(' ', buf);
uint64_t value = 0;
readIntText(value, buf);
assertChar('\n', buf);
auto [_, inserted] = metrics.emplace(std::move(current_key), value);
chassert(inserted, "Duplicate keys in stat file");
}
return metrics;
}
uint64_t readMetricFromStatFile(ReadBufferFromFile & buf, std::string_view key)
{
while (!buf.eof())
{
std::string current_key;
readStringUntilWhitespace(current_key, buf);
if (current_key != key)
{
std::string dummy;
readStringUntilNewlineInto(dummy, buf);
buf.ignore();
continue;
}
assertChar(' ', buf);
uint64_t value = 0;
readIntText(value, buf);
return value;
}
LOG_ERROR(getLogger("CgroupsReader"), "Cannot find '{}' in '{}'", key, buf.getFileName());
return 0;
}
struct CgroupsV1Reader : ICgroupsReader
{
explicit CgroupsV1Reader(const fs::path & stat_file_dir) : buf(stat_file_dir / "memory.stat") { }
uint64_t readMemoryUsage() override
{
std::lock_guard lock(mutex);
buf.rewind();
return readMetricFromStatFile(buf, "rss");
}
std::string dumpAllStats() override
{
std::lock_guard lock(mutex);
buf.rewind();
return fmt::format("{}", readAllMetricsFromStatFile(buf));
}
private:
std::mutex mutex;
ReadBufferFromFile buf TSA_GUARDED_BY(mutex);
};
struct CgroupsV2Reader : ICgroupsReader
{
explicit CgroupsV2Reader(const fs::path & stat_file_dir) : stat_buf(stat_file_dir / "memory.stat") { }
uint64_t readMemoryUsage() override
{
std::lock_guard lock(mutex);
stat_buf.rewind();
return readMetricFromStatFile(stat_buf, "anon");
}
std::string dumpAllStats() override
{
std::lock_guard lock(mutex);
stat_buf.rewind();
return fmt::format("{}", readAllMetricsFromStatFile(stat_buf));
}
private:
std::mutex mutex;
ReadBufferFromFile stat_buf TSA_GUARDED_BY(mutex);
};
/// Caveats:
/// - All of the logic in this file assumes that the current process is the only process in the
/// containing cgroup (or more precisely: the only process with significant memory consumption).
/// If this is not the case, then other processe's memory consumption may affect the internal
/// memory tracker ...
/// - Cgroups v1 and v2 allow nested cgroup hierarchies. As v1 is deprecated for over half a
/// decade and will go away at some point, hierarchical detection is only implemented for v2.
/// - I did not test what happens if a host has v1 and v2 simultaneously enabled. I believe such
/// systems existed only for a short transition period.
std::optional<std::string> getCgroupsV1Path()
{
auto path = default_cgroups_mount / "memory/memory.stat";
if (!fs::exists(path))
return {};
return {default_cgroups_mount / "memory"};
}
std::pair<std::string, ICgroupsReader::CgroupsVersion> getCgroupsPath()
{
auto v2_path = getCgroupsV2PathContainingFile("memory.current");
if (v2_path.has_value())
return {*v2_path, ICgroupsReader::CgroupsVersion::V2};
auto v1_path = getCgroupsV1Path();
if (v1_path.has_value())
return {*v1_path, ICgroupsReader::CgroupsVersion::V1};
throw Exception(ErrorCodes::FILE_DOESNT_EXIST, "Cannot find cgroups v1 or v2 current memory file");
}
}
std::shared_ptr<ICgroupsReader> ICgroupsReader::createCgroupsReader(ICgroupsReader::CgroupsVersion version, const std::filesystem::path & cgroup_path)
{
if (version == CgroupsVersion::V2)
return std::make_shared<CgroupsV2Reader>(cgroup_path);
else
{
chassert(version == CgroupsVersion::V1);
return std::make_shared<CgroupsV1Reader>(cgroup_path);
}
}
#endif
namespace
{
std::string_view sourceToString(MemoryWorker::MemoryUsageSource source)
{
switch (source)
{
case MemoryWorker::MemoryUsageSource::Cgroups: return "Cgroups";
case MemoryWorker::MemoryUsageSource::Jemalloc: return "Jemalloc";
case MemoryWorker::MemoryUsageSource::None: return "None";
}
}
}
/// We try to pick the best possible supported source for reading memory usage.
/// Supported sources in order of priority
/// - reading from cgroups' pseudo-files (fastest and most accurate)
/// - reading jemalloc's resident stat (doesn't take into account allocations that didn't use jemalloc)
/// Also, different tick rates are used because not all options are equally fast
MemoryWorker::MemoryWorker(uint64_t period_ms_)
: log(getLogger("MemoryWorker"))
, period_ms(period_ms_)
{
#if defined(OS_LINUX)
try
{
static constexpr uint64_t cgroups_memory_usage_tick_ms{50};
const auto [cgroup_path, version] = getCgroupsPath();
LOG_INFO(
getLogger("CgroupsReader"),
"Will create cgroup reader from '{}' (cgroups version: {})",
cgroup_path,
(version == ICgroupsReader::CgroupsVersion::V1) ? "v1" : "v2");
cgroups_reader = ICgroupsReader::createCgroupsReader(version, cgroup_path);
source = MemoryUsageSource::Cgroups;
if (period_ms == 0)
period_ms = cgroups_memory_usage_tick_ms;
return;
}
catch (...)
{
tryLogCurrentException(log, "Cannot use cgroups reader");
}
#endif
#if USE_JEMALLOC
static constexpr uint64_t jemalloc_memory_usage_tick_ms{100};
source = MemoryUsageSource::Jemalloc;
if (period_ms == 0)
period_ms = jemalloc_memory_usage_tick_ms;
#endif
}
MemoryWorker::MemoryUsageSource MemoryWorker::getSource()
{
return source;
}
void MemoryWorker::start()
{
if (source == MemoryUsageSource::None)
return;
LOG_INFO(
getLogger("MemoryWorker"),
"Starting background memory thread with period of {}ms, using {} as source",
period_ms,
sourceToString(source));
background_thread = ThreadFromGlobalPool([this] { backgroundThread(); });
}
MemoryWorker::~MemoryWorker()
{
{
std::unique_lock lock(mutex);
shutdown = true;
}
cv.notify_all();
if (background_thread.joinable())
background_thread.join();
}
uint64_t MemoryWorker::getMemoryUsage()
{
switch (source)
{
case MemoryUsageSource::Cgroups:
return cgroups_reader != nullptr ? cgroups_reader->readMemoryUsage() : 0;
case MemoryUsageSource::Jemalloc:
#if USE_JEMALLOC
return resident_mib.getValue();
#else
return 0;
#endif
case MemoryUsageSource::None:
throw DB::Exception(ErrorCodes::LOGICAL_ERROR, "Trying to fetch memory usage while no memory source can be used");
}
}
void MemoryWorker::backgroundThread()
{
std::chrono::milliseconds chrono_period_ms{period_ms};
[[maybe_unused]] bool first_run = true;
std::unique_lock lock(mutex);
while (true)
{
cv.wait_for(lock, chrono_period_ms, [this] { return shutdown; });
if (shutdown)
return;
Stopwatch total_watch;
#if USE_JEMALLOC
if (source == MemoryUsageSource::Jemalloc)
epoch_mib.setValue(0);
#endif
Int64 resident = getMemoryUsage();
MemoryTracker::updateRSS(resident);
#if USE_JEMALLOC
if (resident > total_memory_tracker.getHardLimit())
{
Stopwatch purge_watch;
purge_mib.run();
ProfileEvents::increment(ProfileEvents::MemoryAllocatorPurge);
ProfileEvents::increment(ProfileEvents::MemoryAllocatorPurgeTimeMicroseconds, purge_watch.elapsedMicroseconds());
}
#endif
#if USE_JEMALLOC
if (unlikely(first_run || total_memory_tracker.get() < 0))
{
if (source != MemoryUsageSource::Jemalloc)
epoch_mib.setValue(0);
MemoryTracker::updateAllocated(allocated_mib.getValue());
}
#endif
ProfileEvents::increment(ProfileEvents::MemoryWorkerRun);
ProfileEvents::increment(ProfileEvents::MemoryWorkerRunElapsedMicroseconds, total_watch.elapsedMicroseconds());
first_run = false;
}
}
}

84
src/Common/MemoryWorker.h Normal file
View File

@ -0,0 +1,84 @@
#pragma once
#include <Common/CgroupsMemoryUsageObserver.h>
#include <Common/ThreadPool.h>
#include <Common/Jemalloc.h>
namespace DB
{
struct ICgroupsReader
{
enum class CgroupsVersion : uint8_t
{
V1,
V2
};
#if defined(OS_LINUX)
static std::shared_ptr<ICgroupsReader>
createCgroupsReader(ICgroupsReader::CgroupsVersion version, const std::filesystem::path & cgroup_path);
#endif
virtual ~ICgroupsReader() = default;
virtual uint64_t readMemoryUsage() = 0;
virtual std::string dumpAllStats() = 0;
};
/// Correct MemoryTracker based on external information (e.g. Cgroups or stats.resident from jemalloc)
/// The worker spawns a background thread which periodically reads current resident memory from the source,
/// whose value is sent to global MemoryTracker.
/// It can do additional things like purging jemalloc dirty pages if the current memory usage is higher than global hard limit.
class MemoryWorker
{
public:
explicit MemoryWorker(uint64_t period_ms_);
enum class MemoryUsageSource : uint8_t
{
None,
Cgroups,
Jemalloc
};
MemoryUsageSource getSource();
void start();
~MemoryWorker();
private:
uint64_t getMemoryUsage();
void backgroundThread();
ThreadFromGlobalPool background_thread;
std::mutex mutex;
std::condition_variable cv;
bool shutdown = false;
LoggerPtr log;
uint64_t period_ms;
MemoryUsageSource source{MemoryUsageSource::None};
std::shared_ptr<ICgroupsReader> cgroups_reader;
#if USE_JEMALLOC
JemallocMibCache<uint64_t> epoch_mib{"epoch"};
JemallocMibCache<size_t> resident_mib{"stats.resident"};
JemallocMibCache<size_t> allocated_mib{"stats.allocated"};
#define STRINGIFY_HELPER(x) #x
#define STRINGIFY(x) STRINGIFY_HELPER(x)
JemallocMibCache<size_t> purge_mib{"arena." STRINGIFY(MALLCTL_ARENAS_ALL) ".purge"};
#undef STRINGIFY
#undef STRINGIFY_HELPER
#endif
};
}

Some files were not shown because too many files have changed in this diff Show More