mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-23 08:02:02 +00:00
Merge remote-tracking branch 'ClickHouse/master' into fix-vector-index
This commit is contained in:
commit
c5df9f13bf
@ -19,6 +19,8 @@
|
||||
|
||||
|
||||
#include <ios>
|
||||
#include <memory>
|
||||
#include <functional>
|
||||
#include "Poco/Any.h"
|
||||
#include "Poco/Buffer.h"
|
||||
#include "Poco/Exception.h"
|
||||
@ -33,6 +35,27 @@ namespace Net
|
||||
{
|
||||
|
||||
|
||||
class IHTTPSessionDataHooks
|
||||
/// Interface to control stream of data bytes being sent or received though socket by HTTPSession
|
||||
/// It allows to monitor, throttle and schedule data streams with syscall granulatrity
|
||||
{
|
||||
public:
|
||||
virtual ~IHTTPSessionDataHooks() = default;
|
||||
|
||||
virtual void atStart(int bytes) = 0;
|
||||
/// Called before sending/receiving data `bytes` to/from socket.
|
||||
|
||||
virtual void atFinish(int bytes) = 0;
|
||||
/// Called when sending/receiving of data `bytes` is successfully finished.
|
||||
|
||||
virtual void atFail() = 0;
|
||||
/// If an error occurred during send/receive `fail()` is called instead of `finish()`.
|
||||
};
|
||||
|
||||
|
||||
using HTTPSessionDataHooksPtr = std::shared_ptr<IHTTPSessionDataHooks>;
|
||||
|
||||
|
||||
class Net_API HTTPSession
|
||||
/// HTTPSession implements basic HTTP session management
|
||||
/// for both HTTP clients and HTTP servers.
|
||||
@ -73,6 +96,12 @@ namespace Net
|
||||
Poco::Timespan getReceiveTimeout() const;
|
||||
/// Returns receive timeout for the HTTP session.
|
||||
|
||||
void setSendDataHooks(const HTTPSessionDataHooksPtr & sendDataHooks = {});
|
||||
/// Sets data hooks that will be called on every sent to the socket.
|
||||
|
||||
void setReceiveDataHooks(const HTTPSessionDataHooksPtr & receiveDataHooks = {});
|
||||
/// Sets data hooks that will be called on every receive from the socket.
|
||||
|
||||
bool connected() const;
|
||||
/// Returns true if the underlying socket is connected.
|
||||
|
||||
@ -211,6 +240,10 @@ namespace Net
|
||||
Poco::Exception * _pException;
|
||||
Poco::Any _data;
|
||||
|
||||
// Data hooks
|
||||
HTTPSessionDataHooksPtr _sendDataHooks;
|
||||
HTTPSessionDataHooksPtr _receiveDataHooks;
|
||||
|
||||
friend class HTTPStreamBuf;
|
||||
friend class HTTPHeaderStreamBuf;
|
||||
friend class HTTPFixedLengthStreamBuf;
|
||||
@ -246,6 +279,16 @@ namespace Net
|
||||
return _receiveTimeout;
|
||||
}
|
||||
|
||||
inline void HTTPSession::setSendDataHooks(const HTTPSessionDataHooksPtr & sendDataHooks)
|
||||
{
|
||||
_sendDataHooks = sendDataHooks;
|
||||
}
|
||||
|
||||
inline void HTTPSession::setReceiveDataHooks(const HTTPSessionDataHooksPtr & receiveDataHooks)
|
||||
{
|
||||
_receiveDataHooks = receiveDataHooks;
|
||||
}
|
||||
|
||||
inline StreamSocket & HTTPSession::socket()
|
||||
{
|
||||
return _socket;
|
||||
|
@ -128,14 +128,14 @@ int HTTPSession::get()
|
||||
{
|
||||
if (_pCurrent == _pEnd)
|
||||
refill();
|
||||
|
||||
|
||||
if (_pCurrent < _pEnd)
|
||||
return *_pCurrent++;
|
||||
else
|
||||
return std::char_traits<char>::eof();
|
||||
}
|
||||
|
||||
|
||||
|
||||
int HTTPSession::peek()
|
||||
{
|
||||
if (_pCurrent == _pEnd)
|
||||
@ -147,7 +147,7 @@ int HTTPSession::peek()
|
||||
return std::char_traits<char>::eof();
|
||||
}
|
||||
|
||||
|
||||
|
||||
int HTTPSession::read(char* buffer, std::streamsize length)
|
||||
{
|
||||
if (_pCurrent < _pEnd)
|
||||
@ -166,10 +166,17 @@ int HTTPSession::write(const char* buffer, std::streamsize length)
|
||||
{
|
||||
try
|
||||
{
|
||||
return _socket.sendBytes(buffer, (int) length);
|
||||
if (_sendDataHooks)
|
||||
_sendDataHooks->atStart((int) length);
|
||||
int result = _socket.sendBytes(buffer, (int) length);
|
||||
if (_sendDataHooks)
|
||||
_sendDataHooks->atFinish(result);
|
||||
return result;
|
||||
}
|
||||
catch (Poco::Exception& exc)
|
||||
{
|
||||
if (_sendDataHooks)
|
||||
_sendDataHooks->atFail();
|
||||
setException(exc);
|
||||
throw;
|
||||
}
|
||||
@ -180,10 +187,17 @@ int HTTPSession::receive(char* buffer, int length)
|
||||
{
|
||||
try
|
||||
{
|
||||
return _socket.receiveBytes(buffer, length);
|
||||
if (_receiveDataHooks)
|
||||
_receiveDataHooks->atStart(length);
|
||||
int result = _socket.receiveBytes(buffer, length);
|
||||
if (_receiveDataHooks)
|
||||
_receiveDataHooks->atFinish(result);
|
||||
return result;
|
||||
}
|
||||
catch (Poco::Exception& exc)
|
||||
{
|
||||
if (_receiveDataHooks)
|
||||
_receiveDataHooks->atFail();
|
||||
setException(exc);
|
||||
throw;
|
||||
}
|
||||
|
@ -63,7 +63,7 @@ bool checkIsBrokenTimeout()
|
||||
|
||||
SocketImpl::SocketImpl():
|
||||
_sockfd(POCO_INVALID_SOCKET),
|
||||
_blocking(true),
|
||||
_blocking(true),
|
||||
_isBrokenTimeout(checkIsBrokenTimeout())
|
||||
{
|
||||
}
|
||||
@ -82,7 +82,7 @@ SocketImpl::~SocketImpl()
|
||||
close();
|
||||
}
|
||||
|
||||
|
||||
|
||||
SocketImpl* SocketImpl::acceptConnection(SocketAddress& clientAddr)
|
||||
{
|
||||
if (_sockfd == POCO_INVALID_SOCKET) throw InvalidSocketException();
|
||||
@ -118,7 +118,7 @@ void SocketImpl::connect(const SocketAddress& address)
|
||||
rc = ::connect(_sockfd, address.addr(), address.length());
|
||||
}
|
||||
while (rc != 0 && lastError() == POCO_EINTR);
|
||||
if (rc != 0)
|
||||
if (rc != 0)
|
||||
{
|
||||
int err = lastError();
|
||||
error(err, address.toString());
|
||||
@ -205,7 +205,7 @@ void SocketImpl::bind6(const SocketAddress& address, bool reuseAddress, bool reu
|
||||
#if defined(POCO_HAVE_IPv6)
|
||||
if (address.family() != SocketAddress::IPv6)
|
||||
throw Poco::InvalidArgumentException("SocketAddress must be an IPv6 address");
|
||||
|
||||
|
||||
if (_sockfd == POCO_INVALID_SOCKET)
|
||||
{
|
||||
init(address.af());
|
||||
@ -226,11 +226,11 @@ void SocketImpl::bind6(const SocketAddress& address, bool reuseAddress, bool reu
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
|
||||
void SocketImpl::listen(int backlog)
|
||||
{
|
||||
if (_sockfd == POCO_INVALID_SOCKET) throw InvalidSocketException();
|
||||
|
||||
|
||||
int rc = ::listen(_sockfd, backlog);
|
||||
if (rc != 0) error();
|
||||
}
|
||||
@ -254,7 +254,7 @@ void SocketImpl::shutdownReceive()
|
||||
if (rc != 0) error();
|
||||
}
|
||||
|
||||
|
||||
|
||||
void SocketImpl::shutdownSend()
|
||||
{
|
||||
if (_sockfd == POCO_INVALID_SOCKET) throw InvalidSocketException();
|
||||
@ -263,7 +263,7 @@ void SocketImpl::shutdownSend()
|
||||
if (rc != 0) error();
|
||||
}
|
||||
|
||||
|
||||
|
||||
void SocketImpl::shutdown()
|
||||
{
|
||||
if (_sockfd == POCO_INVALID_SOCKET) throw InvalidSocketException();
|
||||
@ -318,7 +318,7 @@ int SocketImpl::receiveBytes(void* buffer, int length, int flags)
|
||||
throw TimeoutException();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
int rc;
|
||||
do
|
||||
{
|
||||
@ -326,7 +326,7 @@ int SocketImpl::receiveBytes(void* buffer, int length, int flags)
|
||||
rc = ::recv(_sockfd, reinterpret_cast<char*>(buffer), length, flags);
|
||||
}
|
||||
while (blocking && rc < 0 && lastError() == POCO_EINTR);
|
||||
if (rc < 0)
|
||||
if (rc < 0)
|
||||
{
|
||||
int err = lastError();
|
||||
if ((err == POCO_EAGAIN || err == POCO_EWOULDBLOCK) && !blocking)
|
||||
@ -364,7 +364,7 @@ int SocketImpl::receiveFrom(void* buffer, int length, SocketAddress& address, in
|
||||
throw TimeoutException();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
sockaddr_storage abuffer;
|
||||
struct sockaddr* pSA = reinterpret_cast<struct sockaddr*>(&abuffer);
|
||||
poco_socklen_t saLen = sizeof(abuffer);
|
||||
@ -451,7 +451,7 @@ bool SocketImpl::pollImpl(Poco::Timespan& remainingTime, int mode)
|
||||
}
|
||||
while (rc < 0 && lastError() == POCO_EINTR);
|
||||
if (rc < 0) error();
|
||||
return rc > 0;
|
||||
return rc > 0;
|
||||
|
||||
#else
|
||||
|
||||
@ -494,7 +494,7 @@ bool SocketImpl::pollImpl(Poco::Timespan& remainingTime, int mode)
|
||||
}
|
||||
while (rc < 0 && errorCode == POCO_EINTR);
|
||||
if (rc < 0) error(errorCode);
|
||||
return rc > 0;
|
||||
return rc > 0;
|
||||
|
||||
#endif // POCO_HAVE_FD_POLL
|
||||
}
|
||||
@ -504,13 +504,13 @@ bool SocketImpl::poll(const Poco::Timespan& timeout, int mode)
|
||||
Poco::Timespan remainingTime(timeout);
|
||||
return pollImpl(remainingTime, mode);
|
||||
}
|
||||
|
||||
|
||||
void SocketImpl::setSendBufferSize(int size)
|
||||
{
|
||||
setOption(SOL_SOCKET, SO_SNDBUF, size);
|
||||
}
|
||||
|
||||
|
||||
|
||||
int SocketImpl::getSendBufferSize()
|
||||
{
|
||||
int result;
|
||||
@ -524,7 +524,7 @@ void SocketImpl::setReceiveBufferSize(int size)
|
||||
setOption(SOL_SOCKET, SO_RCVBUF, size);
|
||||
}
|
||||
|
||||
|
||||
|
||||
int SocketImpl::getReceiveBufferSize()
|
||||
{
|
||||
int result;
|
||||
@ -570,7 +570,7 @@ Poco::Timespan SocketImpl::getReceiveTimeout()
|
||||
return result;
|
||||
}
|
||||
|
||||
|
||||
|
||||
SocketAddress SocketImpl::address()
|
||||
{
|
||||
if (_sockfd == POCO_INVALID_SOCKET) throw InvalidSocketException();
|
||||
@ -581,7 +581,7 @@ SocketAddress SocketImpl::address()
|
||||
int rc = ::getsockname(_sockfd, pSA, &saLen);
|
||||
if (rc == 0)
|
||||
return SocketAddress(pSA, saLen);
|
||||
else
|
||||
else
|
||||
error();
|
||||
return SocketAddress();
|
||||
}
|
||||
|
@ -18,4 +18,4 @@ target_compile_options (_poco_util
|
||||
-Wno-zero-as-null-pointer-constant
|
||||
)
|
||||
target_include_directories (_poco_util SYSTEM PUBLIC "include")
|
||||
target_link_libraries (_poco_util PUBLIC Poco::JSON Poco::XML)
|
||||
target_link_libraries (_poco_util PUBLIC Poco::JSON Poco::XML Poco::Net)
|
||||
|
@ -241,6 +241,20 @@ namespace Util
|
||||
/// If the value contains references to other properties (${<property>}), these
|
||||
/// are expanded.
|
||||
|
||||
std::string getHost(const std::string & key) const;
|
||||
/// Returns the string value of the host property with the given name.
|
||||
/// Throws a NotFoundException if the key does not exist.
|
||||
/// Throws a SyntaxException if the property is not a valid host (IP address or domain).
|
||||
/// If the value contains references to other properties (${<property>}), these
|
||||
/// are expanded.
|
||||
|
||||
std::string getHost(const std::string & key, const std::string & defaultValue) const;
|
||||
/// If a property with the given key exists, returns the host property's string value,
|
||||
/// otherwise returns the given default value.
|
||||
/// Throws a SyntaxException if the property is not a valid host (IP address or domain).
|
||||
/// If the value contains references to other properties (${<property>}), these
|
||||
/// are expanded.
|
||||
|
||||
virtual void setString(const std::string & key, const std::string & value);
|
||||
/// Sets the property with the given key to the given value.
|
||||
/// An already existing value for the key is overwritten.
|
||||
@ -339,12 +353,35 @@ namespace Util
|
||||
static bool parseBool(const std::string & value);
|
||||
void setRawWithEvent(const std::string & key, std::string value);
|
||||
|
||||
static void checkHostValidity(const std::string & value);
|
||||
/// Throws a SyntaxException if the value is not a valid host (IP address or domain).
|
||||
|
||||
virtual ~AbstractConfiguration();
|
||||
|
||||
private:
|
||||
std::string internalExpand(const std::string & value) const;
|
||||
std::string uncheckedExpand(const std::string & value) const;
|
||||
|
||||
static bool isValidIPv4Address(const std::string & value);
|
||||
/// IPv4 address considered valid if it is "0.0.0.0" or one of those,
|
||||
/// defined by inet_aton() or inet_addr()
|
||||
|
||||
static bool isValidIPv6Address(const std::string & value);
|
||||
/// IPv6 address considered valid if it is "::" or one of those,
|
||||
/// defined by inet_pton() with AF_INET6 flag
|
||||
/// (in this case it may have scope id and may be surrounded by '[', ']')
|
||||
|
||||
static bool isValidDomainName(const std::string & value);
|
||||
/// <domain> ::= <subdomain> [ "." ]
|
||||
/// <subdomain> ::= <label> | <subdomain> "." <label>
|
||||
/// <label> ::= <letter> [ [ <ldh-str> ] <let-dig> ]
|
||||
/// <ldh-str> ::= <let-dig-hyp> | <let-dig-hyp> <ldh-str>
|
||||
/// <let-dig-hyp> ::= <let-dig> | "-"
|
||||
/// <let-dig> ::= <letter> | <digit>
|
||||
/// <letter> ::= any one of the 52 alphabetic characters A through Z in
|
||||
/// upper case and a through z in lower case
|
||||
/// <digit> ::= any one of the ten digits 0 through 9
|
||||
|
||||
AbstractConfiguration(const AbstractConfiguration &);
|
||||
AbstractConfiguration & operator=(const AbstractConfiguration &);
|
||||
|
||||
|
@ -18,6 +18,7 @@
|
||||
#include "Poco/NumberParser.h"
|
||||
#include "Poco/NumberFormatter.h"
|
||||
#include "Poco/String.h"
|
||||
#include "Poco/Net/IPAddressImpl.h"
|
||||
|
||||
|
||||
using Poco::Mutex;
|
||||
@ -263,6 +264,41 @@ bool AbstractConfiguration::getBool(const std::string& key, bool defaultValue) c
|
||||
}
|
||||
|
||||
|
||||
std::string AbstractConfiguration::getHost(const std::string& key) const
|
||||
{
|
||||
Mutex::ScopedLock lock(_mutex);
|
||||
|
||||
std::string value;
|
||||
if (getRaw(key, value))
|
||||
{
|
||||
std::string expandedValue = internalExpand(value);
|
||||
checkHostValidity(expandedValue);
|
||||
return expandedValue;
|
||||
}
|
||||
else
|
||||
throw NotFoundException(key);
|
||||
}
|
||||
|
||||
|
||||
std::string AbstractConfiguration::getHost(const std::string& key, const std::string& defaultValue) const
|
||||
{
|
||||
Mutex::ScopedLock lock(_mutex);
|
||||
|
||||
std::string value;
|
||||
if (getRaw(key, value))
|
||||
{
|
||||
std::string expandedValue = internalExpand(value);
|
||||
checkHostValidity(expandedValue);
|
||||
return expandedValue;
|
||||
}
|
||||
else
|
||||
{
|
||||
checkHostValidity(defaultValue);
|
||||
return defaultValue;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void AbstractConfiguration::setString(const std::string& key, const std::string& value)
|
||||
{
|
||||
setRawWithEvent(key, value);
|
||||
@ -529,4 +565,68 @@ void AbstractConfiguration::setRawWithEvent(const std::string& key, std::string
|
||||
}
|
||||
|
||||
|
||||
void AbstractConfiguration::checkHostValidity(const std::string& value)
|
||||
{
|
||||
if (!isValidIPv4Address(value) && !isValidIPv6Address(value) && !isValidDomainName(value))
|
||||
{
|
||||
throw SyntaxException("Property is not a valid host name", value);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
bool AbstractConfiguration::isValidIPv4Address(const std::string& value)
|
||||
{
|
||||
using Poco::Net::Impl::IPv4AddressImpl;
|
||||
IPv4AddressImpl empty4 = IPv4AddressImpl();
|
||||
|
||||
IPv4AddressImpl ipAddress = IPv4AddressImpl::parse(value);
|
||||
return ipAddress != empty4 || value == "0.0.0.0";
|
||||
}
|
||||
|
||||
|
||||
bool AbstractConfiguration::isValidIPv6Address(const std::string& value)
|
||||
{
|
||||
#if defined(POCO_HAVE_IPv6)
|
||||
using Poco::Net::Impl::IPv6AddressImpl;
|
||||
IPv6AddressImpl empty6 = IPv6AddressImpl();
|
||||
|
||||
IPv6AddressImpl ipAddress = IPv6AddressImpl::parse(value);
|
||||
return ipAddress != empty6 || value == "::";
|
||||
#else
|
||||
return false;
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
bool AbstractConfiguration::isValidDomainName(const std::string& value)
|
||||
{
|
||||
if (value.empty() || value == "." || value.length() > 253)
|
||||
return false;
|
||||
int labelLength = 0;
|
||||
char oldChar = 0;
|
||||
|
||||
for (char ch : value)
|
||||
{
|
||||
if (ch == '.')
|
||||
{
|
||||
if (labelLength == 0 || labelLength > 63 || oldChar == '-')
|
||||
return false;
|
||||
labelLength = 0;
|
||||
}
|
||||
else if (isalnum(ch) || ch == '-')
|
||||
{
|
||||
if (labelLength == 0 && (ch == '-' || isdigit(ch)))
|
||||
return false;
|
||||
++labelLength;
|
||||
}
|
||||
else
|
||||
{
|
||||
return false;
|
||||
}
|
||||
oldChar = ch;
|
||||
}
|
||||
return oldChar == '.' || (labelLength > 0 && labelLength <= 63 && oldChar != '-');
|
||||
}
|
||||
|
||||
|
||||
} } // namespace Poco::Util
|
||||
|
@ -34,7 +34,7 @@ RUN arch=${TARGETARCH:-amd64} \
|
||||
# lts / testing / prestable / etc
|
||||
ARG REPO_CHANNEL="stable"
|
||||
ARG REPOSITORY="https://packages.clickhouse.com/tgz/${REPO_CHANNEL}"
|
||||
ARG VERSION="24.8.2.3"
|
||||
ARG VERSION="24.8.3.59"
|
||||
ARG PACKAGES="clickhouse-keeper"
|
||||
ARG DIRECT_DOWNLOAD_URLS=""
|
||||
|
||||
|
@ -32,7 +32,7 @@ RUN arch=${TARGETARCH:-amd64} \
|
||||
# lts / testing / prestable / etc
|
||||
ARG REPO_CHANNEL="stable"
|
||||
ARG REPOSITORY="https://packages.clickhouse.com/tgz/${REPO_CHANNEL}"
|
||||
ARG VERSION="24.8.2.3"
|
||||
ARG VERSION="24.8.3.59"
|
||||
ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static"
|
||||
ARG DIRECT_DOWNLOAD_URLS=""
|
||||
|
||||
|
@ -28,7 +28,7 @@ RUN sed -i "s|http://archive.ubuntu.com|${apt_archive}|g" /etc/apt/sources.list
|
||||
|
||||
ARG REPO_CHANNEL="stable"
|
||||
ARG REPOSITORY="deb [signed-by=/usr/share/keyrings/clickhouse-keyring.gpg] https://packages.clickhouse.com/deb ${REPO_CHANNEL} main"
|
||||
ARG VERSION="24.8.2.3"
|
||||
ARG VERSION="24.8.3.59"
|
||||
ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static"
|
||||
|
||||
#docker-official-library:off
|
||||
|
32
docs/changelogs/v24.3.10.33-lts.md
Normal file
32
docs/changelogs/v24.3.10.33-lts.md
Normal file
@ -0,0 +1,32 @@
|
||||
---
|
||||
sidebar_position: 1
|
||||
sidebar_label: 2024
|
||||
---
|
||||
|
||||
# 2024 Changelog
|
||||
|
||||
### ClickHouse release v24.3.10.33-lts (37b6502ebf0) FIXME as compared to v24.3.9.5-lts (a939270465e)
|
||||
|
||||
#### Improvement
|
||||
* Backported in [#68870](https://github.com/ClickHouse/ClickHouse/issues/68870): Make allow_experimental_analyzer be controlled by the initiator for distributed queries. This ensures compatibility and correctness during operations in mixed version clusters. [#65777](https://github.com/ClickHouse/ClickHouse/pull/65777) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||
* Backported in [#69095](https://github.com/ClickHouse/ClickHouse/issues/69095): Support for the Spanish language in the embedded dictionaries. [#69035](https://github.com/ClickHouse/ClickHouse/pull/69035) ([Vasily Okunev](https://github.com/VOkunev)).
|
||||
|
||||
#### Bug Fix (user-visible misbehavior in an official stable release)
|
||||
* Backported in [#68995](https://github.com/ClickHouse/ClickHouse/issues/68995): Fix the upper bound of the function `fromModifiedJulianDay`. It was supposed to be `9999-12-31` but was mistakenly set to `9999-01-01`. [#67583](https://github.com/ClickHouse/ClickHouse/pull/67583) ([PHO](https://github.com/depressed-pho)).
|
||||
* Backported in [#68844](https://github.com/ClickHouse/ClickHouse/issues/68844): Fixed crash in Parquet filtering when data types in the file substantially differ from requested types (e.g. `... FROM file('a.parquet', Parquet, 'x String')`, but the file has `x Int64`). Without this fix, use `input_format_parquet_filter_push_down = 0` as a workaround. [#68131](https://github.com/ClickHouse/ClickHouse/pull/68131) ([Michael Kolupaev](https://github.com/al13n321)).
|
||||
* Backported in [#68881](https://github.com/ClickHouse/ClickHouse/issues/68881): Fixes [#50868](https://github.com/ClickHouse/ClickHouse/issues/50868). Small DateTime64 constant values returned by a nested subquery inside a distributed query were wrongly transformed to Nulls, thus causing errors and possible incorrect query results. [#68323](https://github.com/ClickHouse/ClickHouse/pull/68323) ([Shankar](https://github.com/shiyer7474)).
|
||||
* Backported in [#69054](https://github.com/ClickHouse/ClickHouse/issues/69054): Added back virtual columns ` _table` and `_database` to distributed tables. They were available until version 24.3. [#68672](https://github.com/ClickHouse/ClickHouse/pull/68672) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Backported in [#68856](https://github.com/ClickHouse/ClickHouse/issues/68856): Fix possible error `Size of permutation (0) is less than required (...)` during Variant column permutation. [#68681](https://github.com/ClickHouse/ClickHouse/pull/68681) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Backported in [#69152](https://github.com/ClickHouse/ClickHouse/issues/69152): Fix possible wrong result during anyHeavy state merge. [#68950](https://github.com/ClickHouse/ClickHouse/pull/68950) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Backported in [#69112](https://github.com/ClickHouse/ClickHouse/issues/69112): Fix logical error when we have empty async insert. [#69080](https://github.com/ClickHouse/ClickHouse/pull/69080) ([Han Fei](https://github.com/hanfei1991)).
|
||||
|
||||
#### NO CL CATEGORY
|
||||
|
||||
* Backported in [#68938](https://github.com/ClickHouse/ClickHouse/issues/68938):. [#68897](https://github.com/ClickHouse/ClickHouse/pull/68897) ([Alexander Gololobov](https://github.com/davenger)).
|
||||
|
||||
#### NOT FOR CHANGELOG / INSIGNIFICANT
|
||||
|
||||
* Backported in [#68826](https://github.com/ClickHouse/ClickHouse/issues/68826): Turn off fault injection for insert in `01396_inactive_replica_cleanup_nodes_zookeeper`. [#68715](https://github.com/ClickHouse/ClickHouse/pull/68715) ([alesapin](https://github.com/alesapin)).
|
||||
* Backported in [#68754](https://github.com/ClickHouse/ClickHouse/issues/68754): To make patch release possible from every commit on release branch, package_debug build is required and must not be skipped. [#68750](https://github.com/ClickHouse/ClickHouse/pull/68750) ([Max K.](https://github.com/maxknv)).
|
||||
* Backported in [#69044](https://github.com/ClickHouse/ClickHouse/issues/69044): Fix 01114_database_atomic flakiness. [#68930](https://github.com/ClickHouse/ClickHouse/pull/68930) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
|
29
docs/changelogs/v24.5.7.31-stable.md
Normal file
29
docs/changelogs/v24.5.7.31-stable.md
Normal file
@ -0,0 +1,29 @@
|
||||
---
|
||||
sidebar_position: 1
|
||||
sidebar_label: 2024
|
||||
---
|
||||
|
||||
# 2024 Changelog
|
||||
|
||||
### ClickHouse release v24.5.7.31-stable (6c185e9aec1) FIXME as compared to v24.5.6.45-stable (bdca8604c29)
|
||||
|
||||
#### Bug Fix (user-visible misbehavior in an official stable release)
|
||||
* Backported in [#68564](https://github.com/ClickHouse/ClickHouse/issues/68564): Fix indexHint function case found by fuzzer. [#66286](https://github.com/ClickHouse/ClickHouse/pull/66286) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Backported in [#68996](https://github.com/ClickHouse/ClickHouse/issues/68996): Fix the upper bound of the function `fromModifiedJulianDay`. It was supposed to be `9999-12-31` but was mistakenly set to `9999-01-01`. [#67583](https://github.com/ClickHouse/ClickHouse/pull/67583) ([PHO](https://github.com/depressed-pho)).
|
||||
* Backported in [#68865](https://github.com/ClickHouse/ClickHouse/issues/68865): Fixed crash in Parquet filtering when data types in the file substantially differ from requested types (e.g. `... FROM file('a.parquet', Parquet, 'x String')`, but the file has `x Int64`). Without this fix, use `input_format_parquet_filter_push_down = 0` as a workaround. [#68131](https://github.com/ClickHouse/ClickHouse/pull/68131) ([Michael Kolupaev](https://github.com/al13n321)).
|
||||
* Backported in [#69004](https://github.com/ClickHouse/ClickHouse/issues/69004): After https://github.com/ClickHouse/ClickHouse/pull/61984 `schema_inference_make_columns_nullable=0` still can make columns `Nullable` in Parquet/Arrow formats. The change was backward incompatible and users noticed the changes in the behaviour. This PR makes `schema_inference_make_columns_nullable=0` to work as before (no Nullable columns will be inferred) and introduces new value `auto` for this setting that will make columns `Nullable` only if data has information about nullability. [#68298](https://github.com/ClickHouse/ClickHouse/pull/68298) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Backported in [#68882](https://github.com/ClickHouse/ClickHouse/issues/68882): Fixes [#50868](https://github.com/ClickHouse/ClickHouse/issues/50868). Small DateTime64 constant values returned by a nested subquery inside a distributed query were wrongly transformed to Nulls, thus causing errors and possible incorrect query results. [#68323](https://github.com/ClickHouse/ClickHouse/pull/68323) ([Shankar](https://github.com/shiyer7474)).
|
||||
* Backported in [#69023](https://github.com/ClickHouse/ClickHouse/issues/69023): Added back virtual columns ` _table` and `_database` to distributed tables. They were available until version 24.3. [#68672](https://github.com/ClickHouse/ClickHouse/pull/68672) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Backported in [#68858](https://github.com/ClickHouse/ClickHouse/issues/68858): Fix possible error `Size of permutation (0) is less than required (...)` during Variant column permutation. [#68681](https://github.com/ClickHouse/ClickHouse/pull/68681) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Backported in [#68784](https://github.com/ClickHouse/ClickHouse/issues/68784): Fix issue with materialized constant keys when hashing maps with arrays as keys in functions `sipHash(64/128)Keyed`. [#68731](https://github.com/ClickHouse/ClickHouse/pull/68731) ([Salvatore Mesoraca](https://github.com/aiven-sal)).
|
||||
* Backported in [#69154](https://github.com/ClickHouse/ClickHouse/issues/69154): Fix possible wrong result during anyHeavy state merge. [#68950](https://github.com/ClickHouse/ClickHouse/pull/68950) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
|
||||
#### NO CL CATEGORY
|
||||
|
||||
* Backported in [#68940](https://github.com/ClickHouse/ClickHouse/issues/68940):. [#68897](https://github.com/ClickHouse/ClickHouse/pull/68897) ([Alexander Gololobov](https://github.com/davenger)).
|
||||
|
||||
#### NOT FOR CHANGELOG / INSIGNIFICANT
|
||||
|
||||
* Backported in [#68828](https://github.com/ClickHouse/ClickHouse/issues/68828): Turn off fault injection for insert in `01396_inactive_replica_cleanup_nodes_zookeeper`. [#68715](https://github.com/ClickHouse/ClickHouse/pull/68715) ([alesapin](https://github.com/alesapin)).
|
||||
* Backported in [#69046](https://github.com/ClickHouse/ClickHouse/issues/69046): Fix 01114_database_atomic flakiness. [#68930](https://github.com/ClickHouse/ClickHouse/pull/68930) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
|
29
docs/changelogs/v24.6.5.30-stable.md
Normal file
29
docs/changelogs/v24.6.5.30-stable.md
Normal file
@ -0,0 +1,29 @@
|
||||
---
|
||||
sidebar_position: 1
|
||||
sidebar_label: 2024
|
||||
---
|
||||
|
||||
# 2024 Changelog
|
||||
|
||||
### ClickHouse release v24.6.5.30-stable (e6e196c92d6) FIXME as compared to v24.6.4.42-stable (c534bb4b4dd)
|
||||
|
||||
#### Bug Fix (user-visible misbehavior in an official stable release)
|
||||
* Backported in [#68969](https://github.com/ClickHouse/ClickHouse/issues/68969): Fix the upper bound of the function `fromModifiedJulianDay`. It was supposed to be `9999-12-31` but was mistakenly set to `9999-01-01`. [#67583](https://github.com/ClickHouse/ClickHouse/pull/67583) ([PHO](https://github.com/depressed-pho)).
|
||||
* Backported in [#68814](https://github.com/ClickHouse/ClickHouse/issues/68814): Fixed crash in Parquet filtering when data types in the file substantially differ from requested types (e.g. `... FROM file('a.parquet', Parquet, 'x String')`, but the file has `x Int64`). Without this fix, use `input_format_parquet_filter_push_down = 0` as a workaround. [#68131](https://github.com/ClickHouse/ClickHouse/pull/68131) ([Michael Kolupaev](https://github.com/al13n321)).
|
||||
* Backported in [#69005](https://github.com/ClickHouse/ClickHouse/issues/69005): After https://github.com/ClickHouse/ClickHouse/pull/61984 `schema_inference_make_columns_nullable=0` still can make columns `Nullable` in Parquet/Arrow formats. The change was backward incompatible and users noticed the changes in the behaviour. This PR makes `schema_inference_make_columns_nullable=0` to work as before (no Nullable columns will be inferred) and introduces new value `auto` for this setting that will make columns `Nullable` only if data has information about nullability. [#68298](https://github.com/ClickHouse/ClickHouse/pull/68298) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Backported in [#68883](https://github.com/ClickHouse/ClickHouse/issues/68883): Fixes [#50868](https://github.com/ClickHouse/ClickHouse/issues/50868). Small DateTime64 constant values returned by a nested subquery inside a distributed query were wrongly transformed to Nulls, thus causing errors and possible incorrect query results. [#68323](https://github.com/ClickHouse/ClickHouse/pull/68323) ([Shankar](https://github.com/shiyer7474)).
|
||||
* Backported in [#69025](https://github.com/ClickHouse/ClickHouse/issues/69025): Added back virtual columns ` _table` and `_database` to distributed tables. They were available until version 24.3. [#68672](https://github.com/ClickHouse/ClickHouse/pull/68672) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Backported in [#68860](https://github.com/ClickHouse/ClickHouse/issues/68860): Fix possible error `Size of permutation (0) is less than required (...)` during Variant column permutation. [#68681](https://github.com/ClickHouse/ClickHouse/pull/68681) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Backported in [#68786](https://github.com/ClickHouse/ClickHouse/issues/68786): Fix issue with materialized constant keys when hashing maps with arrays as keys in functions `sipHash(64/128)Keyed`. [#68731](https://github.com/ClickHouse/ClickHouse/pull/68731) ([Salvatore Mesoraca](https://github.com/aiven-sal)).
|
||||
* Backported in [#69156](https://github.com/ClickHouse/ClickHouse/issues/69156): Fix possible wrong result during anyHeavy state merge. [#68950](https://github.com/ClickHouse/ClickHouse/pull/68950) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Backported in [#69116](https://github.com/ClickHouse/ClickHouse/issues/69116): Fix logical error when we have empty async insert. [#69080](https://github.com/ClickHouse/ClickHouse/pull/69080) ([Han Fei](https://github.com/hanfei1991)).
|
||||
|
||||
#### NO CL CATEGORY
|
||||
|
||||
* Backported in [#68942](https://github.com/ClickHouse/ClickHouse/issues/68942):. [#68897](https://github.com/ClickHouse/ClickHouse/pull/68897) ([Alexander Gololobov](https://github.com/davenger)).
|
||||
|
||||
#### NOT FOR CHANGELOG / INSIGNIFICANT
|
||||
|
||||
* Backported in [#68830](https://github.com/ClickHouse/ClickHouse/issues/68830): Turn off fault injection for insert in `01396_inactive_replica_cleanup_nodes_zookeeper`. [#68715](https://github.com/ClickHouse/ClickHouse/pull/68715) ([alesapin](https://github.com/alesapin)).
|
||||
* Backported in [#69048](https://github.com/ClickHouse/ClickHouse/issues/69048): Fix 01114_database_atomic flakiness. [#68930](https://github.com/ClickHouse/ClickHouse/pull/68930) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
|
50
docs/changelogs/v24.8.3.59-lts.md
Normal file
50
docs/changelogs/v24.8.3.59-lts.md
Normal file
@ -0,0 +1,50 @@
|
||||
---
|
||||
sidebar_position: 1
|
||||
sidebar_label: 2024
|
||||
---
|
||||
|
||||
# 2024 Changelog
|
||||
|
||||
### ClickHouse release v24.8.3.59-lts (e729b9fa40e) FIXME as compared to v24.8.2.3-lts (b54f79ed323)
|
||||
|
||||
#### New Feature
|
||||
* Backported in [#68710](https://github.com/ClickHouse/ClickHouse/issues/68710): Query cache entries can now be dropped by tag. For example, the query cache entry created by `SELECT 1 SETTINGS use_query_cache = true, query_cache_tag = 'abc'` can now be dropped by `SYSTEM DROP QUERY CACHE TAG 'abc'` (or of course just: `SYSTEM DROP QUERY CACHE` which will clear the entire query cache). [#68477](https://github.com/ClickHouse/ClickHouse/pull/68477) ([Michał Tabaszewski](https://github.com/pinsvin00)).
|
||||
|
||||
#### Improvement
|
||||
* Backported in [#69097](https://github.com/ClickHouse/ClickHouse/issues/69097): Support for the Spanish language in the embedded dictionaries. [#69035](https://github.com/ClickHouse/ClickHouse/pull/69035) ([Vasily Okunev](https://github.com/VOkunev)).
|
||||
|
||||
#### Bug Fix (user-visible misbehavior in an official stable release)
|
||||
* Backported in [#68973](https://github.com/ClickHouse/ClickHouse/issues/68973): Fix the upper bound of the function `fromModifiedJulianDay`. It was supposed to be `9999-12-31` but was mistakenly set to `9999-01-01`. [#67583](https://github.com/ClickHouse/ClickHouse/pull/67583) ([PHO](https://github.com/depressed-pho)).
|
||||
* Backported in [#68818](https://github.com/ClickHouse/ClickHouse/issues/68818): Fixed crash in Parquet filtering when data types in the file substantially differ from requested types (e.g. `... FROM file('a.parquet', Parquet, 'x String')`, but the file has `x Int64`). Without this fix, use `input_format_parquet_filter_push_down = 0` as a workaround. [#68131](https://github.com/ClickHouse/ClickHouse/pull/68131) ([Michael Kolupaev](https://github.com/al13n321)).
|
||||
* Backported in [#68893](https://github.com/ClickHouse/ClickHouse/issues/68893): After https://github.com/ClickHouse/ClickHouse/pull/61984 `schema_inference_make_columns_nullable=0` still can make columns `Nullable` in Parquet/Arrow formats. The change was backward incompatible and users noticed the changes in the behaviour. This PR makes `schema_inference_make_columns_nullable=0` to work as before (no Nullable columns will be inferred) and introduces new value `auto` for this setting that will make columns `Nullable` only if data has information about nullability. [#68298](https://github.com/ClickHouse/ClickHouse/pull/68298) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Backported in [#68721](https://github.com/ClickHouse/ClickHouse/issues/68721): Fixes [#50868](https://github.com/ClickHouse/ClickHouse/issues/50868). Small DateTime64 constant values returned by a nested subquery inside a distributed query were wrongly transformed to Nulls, thus causing errors and possible incorrect query results. [#68323](https://github.com/ClickHouse/ClickHouse/pull/68323) ([Shankar](https://github.com/shiyer7474)).
|
||||
* Backported in [#69029](https://github.com/ClickHouse/ClickHouse/issues/69029): Added back virtual columns ` _table` and `_database` to distributed tables. They were available until version 24.3. [#68672](https://github.com/ClickHouse/ClickHouse/pull/68672) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Backported in [#68864](https://github.com/ClickHouse/ClickHouse/issues/68864): Fix possible error `Size of permutation (0) is less than required (...)` during Variant column permutation. [#68681](https://github.com/ClickHouse/ClickHouse/pull/68681) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Backported in [#68854](https://github.com/ClickHouse/ClickHouse/issues/68854): Fix possible error `DB::Exception: Block structure mismatch in joined block stream: different columns:` with new JSON column. [#68686](https://github.com/ClickHouse/ClickHouse/pull/68686) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Backported in [#68790](https://github.com/ClickHouse/ClickHouse/issues/68790): Fix issue with materialized constant keys when hashing maps with arrays as keys in functions `sipHash(64/128)Keyed`. [#68731](https://github.com/ClickHouse/ClickHouse/pull/68731) ([Salvatore Mesoraca](https://github.com/aiven-sal)).
|
||||
* Backported in [#69108](https://github.com/ClickHouse/ClickHouse/issues/69108): TODO. [#68744](https://github.com/ClickHouse/ClickHouse/pull/68744) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Backported in [#68850](https://github.com/ClickHouse/ClickHouse/issues/68850): Fix resolving dynamic subcolumns from subqueries in analyzer. [#68824](https://github.com/ClickHouse/ClickHouse/pull/68824) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Backported in [#68911](https://github.com/ClickHouse/ClickHouse/issues/68911): Fix complex types metadata parsing in DeltaLake. Closes [#68739](https://github.com/ClickHouse/ClickHouse/issues/68739). [#68836](https://github.com/ClickHouse/ClickHouse/pull/68836) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Backported in [#69160](https://github.com/ClickHouse/ClickHouse/issues/69160): Fix possible wrong result during anyHeavy state merge. [#68950](https://github.com/ClickHouse/ClickHouse/pull/68950) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Backported in [#69072](https://github.com/ClickHouse/ClickHouse/issues/69072): Fixed writing to Materialized Views with enabled setting `optimize_functions_to_subcolumns`. [#68951](https://github.com/ClickHouse/ClickHouse/pull/68951) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Backported in [#69016](https://github.com/ClickHouse/ClickHouse/issues/69016): Don't use serializations cache in const Dynamic column methods. It could let to use-of-unitialized value or even race condition during aggregations. [#68953](https://github.com/ClickHouse/ClickHouse/pull/68953) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Backported in [#69120](https://github.com/ClickHouse/ClickHouse/issues/69120): Fix logical error when we have empty async insert. [#69080](https://github.com/ClickHouse/ClickHouse/pull/69080) ([Han Fei](https://github.com/hanfei1991)).
|
||||
|
||||
#### NO CL CATEGORY
|
||||
|
||||
* Backported in [#68947](https://github.com/ClickHouse/ClickHouse/issues/68947):. [#68897](https://github.com/ClickHouse/ClickHouse/pull/68897) ([Alexander Gololobov](https://github.com/davenger)).
|
||||
|
||||
#### NOT FOR CHANGELOG / INSIGNIFICANT
|
||||
|
||||
* Backported in [#68704](https://github.com/ClickHouse/ClickHouse/issues/68704): Fix enumerating dynamic subcolumns. [#68582](https://github.com/ClickHouse/ClickHouse/pull/68582) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Backported in [#69000](https://github.com/ClickHouse/ClickHouse/issues/69000): Prioritizing of virtual columns in hive partitioning. [#68606](https://github.com/ClickHouse/ClickHouse/pull/68606) ([Yarik Briukhovetskyi](https://github.com/yariks5s)).
|
||||
* Backported in [#68799](https://github.com/ClickHouse/ClickHouse/issues/68799): CI: Disable SQLLogic job. [#68654](https://github.com/ClickHouse/ClickHouse/pull/68654) ([Max K.](https://github.com/maxknv)).
|
||||
* Backported in [#68834](https://github.com/ClickHouse/ClickHouse/issues/68834): Turn off fault injection for insert in `01396_inactive_replica_cleanup_nodes_zookeeper`. [#68715](https://github.com/ClickHouse/ClickHouse/pull/68715) ([alesapin](https://github.com/alesapin)).
|
||||
* Backported in [#68781](https://github.com/ClickHouse/ClickHouse/issues/68781): Fix flaky test 00989_parallel_parts_loading. [#68737](https://github.com/ClickHouse/ClickHouse/pull/68737) ([alesapin](https://github.com/alesapin)).
|
||||
* Backported in [#68762](https://github.com/ClickHouse/ClickHouse/issues/68762): To make patch release possible from every commit on release branch, package_debug build is required and must not be skipped. [#68750](https://github.com/ClickHouse/ClickHouse/pull/68750) ([Max K.](https://github.com/maxknv)).
|
||||
* Backported in [#68810](https://github.com/ClickHouse/ClickHouse/issues/68810): Try to disable rerun check if job triggered manually. [#68751](https://github.com/ClickHouse/ClickHouse/pull/68751) ([Max K.](https://github.com/maxknv)).
|
||||
* Backported in [#68962](https://github.com/ClickHouse/ClickHouse/issues/68962): Fix 2477 timeout. [#68752](https://github.com/ClickHouse/ClickHouse/pull/68752) ([jsc0218](https://github.com/jsc0218)).
|
||||
* Backported in [#68977](https://github.com/ClickHouse/ClickHouse/issues/68977): Check setting use_json_alias_for_old_object_type in runtime. [#68793](https://github.com/ClickHouse/ClickHouse/pull/68793) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Backported in [#68852](https://github.com/ClickHouse/ClickHouse/issues/68852): Make dynamic structure selection more consistent. [#68802](https://github.com/ClickHouse/ClickHouse/pull/68802) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Backported in [#69052](https://github.com/ClickHouse/ClickHouse/issues/69052): Fix 01114_database_atomic flakiness. [#68930](https://github.com/ClickHouse/ClickHouse/pull/68930) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
|
@ -499,7 +499,7 @@ Required parameters:
|
||||
- `type` — `encrypted`. Otherwise the encrypted disk is not created.
|
||||
- `disk` — Type of disk for data storage.
|
||||
- `key` — The key for encryption and decryption. Type: [Uint64](/docs/en/sql-reference/data-types/int-uint.md). You can use `key_hex` parameter to encode the key in hexadecimal form.
|
||||
You can specify multiple keys using the `id` attribute (see example above).
|
||||
You can specify multiple keys using the `id` attribute (see example below).
|
||||
|
||||
Optional parameters:
|
||||
|
||||
|
@ -104,7 +104,7 @@ Events that occur at the same second may lay in the sequence in an undefined ord
|
||||
|
||||
**Parameters**
|
||||
|
||||
- `pattern` — Pattern string. See [Pattern syntax](#sequencematch).
|
||||
- `pattern` — Pattern string. See [Pattern syntax](#pattern-syntax).
|
||||
|
||||
**Returned values**
|
||||
|
||||
@ -113,8 +113,7 @@ Events that occur at the same second may lay in the sequence in an undefined ord
|
||||
|
||||
Type: `UInt8`.
|
||||
|
||||
<a name="sequence-function-pattern-syntax"></a>
|
||||
**Pattern syntax**
|
||||
#### Pattern syntax
|
||||
|
||||
- `(?N)` — Matches the condition argument at position `N`. Conditions are numbered in the `[1, 32]` range. For example, `(?1)` matches the argument passed to the `cond1` parameter.
|
||||
|
||||
@ -196,7 +195,7 @@ sequenceCount(pattern)(timestamp, cond1, cond2, ...)
|
||||
|
||||
**Parameters**
|
||||
|
||||
- `pattern` — Pattern string. See [Pattern syntax](#sequencematch).
|
||||
- `pattern` — Pattern string. See [Pattern syntax](#pattern-syntax).
|
||||
|
||||
**Returned values**
|
||||
|
||||
|
@ -0,0 +1,44 @@
|
||||
---
|
||||
slug: /en/sql-reference/aggregate-functions/reference/distinctdynamictypes
|
||||
sidebar_position: 215
|
||||
---
|
||||
|
||||
# distinctDynamicTypes
|
||||
|
||||
Calculates the list of distinct data types stored in [Dynamic](../../data-types/dynamic.md) column.
|
||||
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
distinctDynamicTypes(dynamic)
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `dynamic` — [Dynamic](../../data-types/dynamic.md) column.
|
||||
|
||||
**Returned Value**
|
||||
|
||||
- The sorted list of data type names [Array(String)](../../data-types/array.md).
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
```sql
|
||||
DROP TABLE IF EXISTS test_dynamic;
|
||||
CREATE TABLE test_dynamic(d Dynamic) ENGINE = Memory;
|
||||
INSERT INTO test_dynamic VALUES (42), (NULL), ('Hello'), ([1, 2, 3]), ('2020-01-01'), (map(1, 2)), (43), ([4, 5]), (NULL), ('World'), (map(3, 4))
|
||||
```
|
||||
|
||||
```sql
|
||||
SELECT distinctDynamicTypes(d) FROM test_dynamic;
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```reference
|
||||
┌─distinctDynamicTypes(d)──────────────────────────────────────┐
|
||||
│ ['Array(Int64)','Date','Int64','Map(UInt8, UInt8)','String'] │
|
||||
└──────────────────────────────────────────────────────────────┘
|
||||
```
|
@ -0,0 +1,125 @@
|
||||
---
|
||||
slug: /en/sql-reference/aggregate-functions/reference/distinctjsonpaths
|
||||
sidebar_position: 216
|
||||
---
|
||||
|
||||
# distinctJSONPaths
|
||||
|
||||
Calculates the list of distinct paths stored in [JSON](../../data-types/newjson.md) column.
|
||||
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
distinctJSONPaths(json)
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `json` — [JSON](../../data-types/newjson.md) column.
|
||||
|
||||
**Returned Value**
|
||||
|
||||
- The sorted list of paths [Array(String)](../../data-types/array.md).
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
```sql
|
||||
DROP TABLE IF EXISTS test_json;
|
||||
CREATE TABLE test_json(json JSON) ENGINE = Memory;
|
||||
INSERT INTO test_json VALUES ('{"a" : 42, "b" : "Hello"}'), ('{"b" : [1, 2, 3], "c" : {"d" : {"e" : "2020-01-01"}}}'), ('{"a" : 43, "c" : {"d" : {"f" : [{"g" : 42}]}}}')
|
||||
```
|
||||
|
||||
```sql
|
||||
SELECT distinctJSONPaths(json) FROM test_json;
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```reference
|
||||
┌─distinctJSONPaths(json)───┐
|
||||
│ ['a','b','c.d.e','c.d.f'] │
|
||||
└───────────────────────────┘
|
||||
```
|
||||
|
||||
# distinctJSONPathsAndTypes
|
||||
|
||||
Calculates the list of distinct paths and their types stored in [JSON](../../data-types/newjson.md) column.
|
||||
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
distinctJSONPathsAndTypes(json)
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `json` — [JSON](../../data-types/newjson.md) column.
|
||||
|
||||
**Returned Value**
|
||||
|
||||
- The sorted map of paths and types [Map(String, Array(String))](../../data-types/map.md).
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
```sql
|
||||
DROP TABLE IF EXISTS test_json;
|
||||
CREATE TABLE test_json(json JSON) ENGINE = Memory;
|
||||
INSERT INTO test_json VALUES ('{"a" : 42, "b" : "Hello"}'), ('{"b" : [1, 2, 3], "c" : {"d" : {"e" : "2020-01-01"}}}'), ('{"a" : 43, "c" : {"d" : {"f" : [{"g" : 42}]}}}')
|
||||
```
|
||||
|
||||
```sql
|
||||
SELECT distinctJSONPathsAndTypes(json) FROM test_json;
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```reference
|
||||
┌─distinctJSONPathsAndTypes(json)───────────────────────────────────────────────────────────────────────────────────────────────────────────────┐
|
||||
│ {'a':['Int64'],'b':['Array(Nullable(Int64))','String'],'c.d.e':['Date'],'c.d.f':['Array(JSON(max_dynamic_types=16, max_dynamic_paths=256))']} │
|
||||
└───────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
**Note**
|
||||
|
||||
If JSON declaration contains paths with specified types, these paths will be always included in the result of `distinctJSONPaths/distinctJSONPathsAndTypes` functions even if input data didn't have values for these paths.
|
||||
|
||||
```sql
|
||||
DROP TABLE IF EXISTS test_json;
|
||||
CREATE TABLE test_json(json JSON(a UInt32)) ENGINE = Memory;
|
||||
INSERT INTO test_json VALUES ('{"b" : "Hello"}'), ('{"b" : "World", "c" : [1, 2, 3]}');
|
||||
```
|
||||
|
||||
```sql
|
||||
SELECT json FROM test_json;
|
||||
```
|
||||
|
||||
```text
|
||||
┌─json──────────────────────────────────┐
|
||||
│ {"a":0,"b":"Hello"} │
|
||||
│ {"a":0,"b":"World","c":["1","2","3"]} │
|
||||
└───────────────────────────────────────┘
|
||||
```
|
||||
|
||||
```sql
|
||||
SELECT distinctJSONPaths(json) FROM test_json;
|
||||
```
|
||||
|
||||
```text
|
||||
┌─distinctJSONPaths(json)─┐
|
||||
│ ['a','b','c'] │
|
||||
└─────────────────────────┘
|
||||
```
|
||||
|
||||
```sql
|
||||
SELECT distinctJSONPathsAndTypes(json) FROM test_json;
|
||||
```
|
||||
|
||||
```text
|
||||
┌─distinctJSONPathsAndTypes(json)────────────────────────────────┐
|
||||
│ {'a':['UInt32'],'b':['String'],'c':['Array(Nullable(Int64))']} │
|
||||
└────────────────────────────────────────────────────────────────┘
|
||||
```
|
@ -453,8 +453,8 @@ As we can see, after inserting paths `e` and `f.g` the limit was reached and we
|
||||
|
||||
### During merges of data parts in MergeTree table engines
|
||||
|
||||
During merge of several data parts in MergeTree table the `JSON` column in the resulting data part can reach the limit of dynamic paths won't be able to store all paths from source parts as subcolumns.
|
||||
In this case ClickHouse chooses what paths will remain as subcolumns after merge and what types will be stored in the shared data structure. In most cases ClickHouse tries to keep paths that contains
|
||||
During merge of several data parts in MergeTree table the `JSON` column in the resulting data part can reach the limit of dynamic paths and won't be able to store all paths from source parts as subcolumns.
|
||||
In this case ClickHouse chooses what paths will remain as subcolumns after merge and what paths will be stored in the shared data structure. In most cases ClickHouse tries to keep paths that contain
|
||||
the largest number of non-null values and move the rarest paths to the shared data structure, but it depends on the implementation.
|
||||
|
||||
Let's see an example of such merge. First, let's create a table with `JSON` column, set the limit of dynamic paths to `3` and insert values with `5` different paths:
|
||||
@ -505,7 +505,130 @@ As we can see, ClickHouse kept the most frequent paths `a`, `b` and `c` and move
|
||||
|
||||
## Introspection functions
|
||||
|
||||
There are several functions that can help to inspect the content of the JSON column: [JSONAllPaths](../functions/json-functions.md#jsonallpaths), [JSONAllPathsWithTypes](../functions/json-functions.md#jsonallpathswithtypes), [JSONDynamicPaths](../functions/json-functions.md#jsondynamicpaths), [JSONDynamicPathsWithTypes](../functions/json-functions.md#jsondynamicpathswithtypes), [JSONSharedDataPaths](../functions/json-functions.md#jsonshareddatapaths), [JSONSharedDataPathsWithTypes](../functions/json-functions.md#jsonshareddatapathswithtypes).
|
||||
There are several functions that can help to inspect the content of the JSON column: [JSONAllPaths](../functions/json-functions.md#jsonallpaths), [JSONAllPathsWithTypes](../functions/json-functions.md#jsonallpathswithtypes), [JSONDynamicPaths](../functions/json-functions.md#jsondynamicpaths), [JSONDynamicPathsWithTypes](../functions/json-functions.md#jsondynamicpathswithtypes), [JSONSharedDataPaths](../functions/json-functions.md#jsonshareddatapaths), [JSONSharedDataPathsWithTypes](../functions/json-functions.md#jsonshareddatapathswithtypes), [distinctDynamicTypes](../aggregate-functions/reference/distinctdynamictypes.md), [distinctJSONPaths and distinctJSONPathsAndTypes](../aggregate-functions/reference/distinctjsonpaths.md)
|
||||
|
||||
**Examples**
|
||||
|
||||
Let's investigate the content of [GH Archive](https://www.gharchive.org/) dataset for `2020-01-01` date:
|
||||
|
||||
```sql
|
||||
SELECT arrayJoin(distinctJSONPaths(json)) FROM s3('s3://clickhouse-public-datasets/gharchive/original/2020-01-01-*.json.gz', JSONAsObject)
|
||||
```
|
||||
|
||||
```text
|
||||
┌─arrayJoin(distinctJSONPaths(json))─────────────────────────┐
|
||||
│ actor.avatar_url │
|
||||
│ actor.display_login │
|
||||
│ actor.gravatar_id │
|
||||
│ actor.id │
|
||||
│ actor.login │
|
||||
│ actor.url │
|
||||
│ created_at │
|
||||
│ id │
|
||||
│ org.avatar_url │
|
||||
│ org.gravatar_id │
|
||||
│ org.id │
|
||||
│ org.login │
|
||||
│ org.url │
|
||||
│ payload.action │
|
||||
│ payload.before │
|
||||
│ payload.comment._links.html.href │
|
||||
│ payload.comment._links.pull_request.href │
|
||||
│ payload.comment._links.self.href │
|
||||
│ payload.comment.author_association │
|
||||
│ payload.comment.body │
|
||||
│ payload.comment.commit_id │
|
||||
│ payload.comment.created_at │
|
||||
│ payload.comment.diff_hunk │
|
||||
│ payload.comment.html_url │
|
||||
│ payload.comment.id │
|
||||
│ payload.comment.in_reply_to_id │
|
||||
│ payload.comment.issue_url │
|
||||
│ payload.comment.line │
|
||||
│ payload.comment.node_id │
|
||||
│ payload.comment.original_commit_id │
|
||||
│ payload.comment.original_position │
|
||||
│ payload.comment.path │
|
||||
│ payload.comment.position │
|
||||
│ payload.comment.pull_request_review_id │
|
||||
...
|
||||
│ payload.release.node_id │
|
||||
│ payload.release.prerelease │
|
||||
│ payload.release.published_at │
|
||||
│ payload.release.tag_name │
|
||||
│ payload.release.tarball_url │
|
||||
│ payload.release.target_commitish │
|
||||
│ payload.release.upload_url │
|
||||
│ payload.release.url │
|
||||
│ payload.release.zipball_url │
|
||||
│ payload.size │
|
||||
│ public │
|
||||
│ repo.id │
|
||||
│ repo.name │
|
||||
│ repo.url │
|
||||
│ type │
|
||||
└─arrayJoin(distinctJSONPaths(json))─────────────────────────┘
|
||||
```
|
||||
|
||||
```sql
|
||||
SELECT arrayJoin(distinctJSONPathsAndTypes(json)) FROM s3('s3://clickhouse-public-datasets/gharchive/original/2020-01-01-*.json.gz', JSONAsObject) SETTINGS date_time_input_format='best_effort'
|
||||
```
|
||||
|
||||
|
||||
```text
|
||||
┌─arrayJoin(distinctJSONPathsAndTypes(json))──────────────────┐
|
||||
│ ('actor.avatar_url',['String']) │
|
||||
│ ('actor.display_login',['String']) │
|
||||
│ ('actor.gravatar_id',['String']) │
|
||||
│ ('actor.id',['Int64']) │
|
||||
│ ('actor.login',['String']) │
|
||||
│ ('actor.url',['String']) │
|
||||
│ ('created_at',['DateTime']) │
|
||||
│ ('id',['String']) │
|
||||
│ ('org.avatar_url',['String']) │
|
||||
│ ('org.gravatar_id',['String']) │
|
||||
│ ('org.id',['Int64']) │
|
||||
│ ('org.login',['String']) │
|
||||
│ ('org.url',['String']) │
|
||||
│ ('payload.action',['String']) │
|
||||
│ ('payload.before',['String']) │
|
||||
│ ('payload.comment._links.html.href',['String']) │
|
||||
│ ('payload.comment._links.pull_request.href',['String']) │
|
||||
│ ('payload.comment._links.self.href',['String']) │
|
||||
│ ('payload.comment.author_association',['String']) │
|
||||
│ ('payload.comment.body',['String']) │
|
||||
│ ('payload.comment.commit_id',['String']) │
|
||||
│ ('payload.comment.created_at',['DateTime']) │
|
||||
│ ('payload.comment.diff_hunk',['String']) │
|
||||
│ ('payload.comment.html_url',['String']) │
|
||||
│ ('payload.comment.id',['Int64']) │
|
||||
│ ('payload.comment.in_reply_to_id',['Int64']) │
|
||||
│ ('payload.comment.issue_url',['String']) │
|
||||
│ ('payload.comment.line',['Int64']) │
|
||||
│ ('payload.comment.node_id',['String']) │
|
||||
│ ('payload.comment.original_commit_id',['String']) │
|
||||
│ ('payload.comment.original_position',['Int64']) │
|
||||
│ ('payload.comment.path',['String']) │
|
||||
│ ('payload.comment.position',['Int64']) │
|
||||
│ ('payload.comment.pull_request_review_id',['Int64']) │
|
||||
...
|
||||
│ ('payload.release.node_id',['String']) │
|
||||
│ ('payload.release.prerelease',['Bool']) │
|
||||
│ ('payload.release.published_at',['DateTime']) │
|
||||
│ ('payload.release.tag_name',['String']) │
|
||||
│ ('payload.release.tarball_url',['String']) │
|
||||
│ ('payload.release.target_commitish',['String']) │
|
||||
│ ('payload.release.upload_url',['String']) │
|
||||
│ ('payload.release.url',['String']) │
|
||||
│ ('payload.release.zipball_url',['String']) │
|
||||
│ ('payload.size',['Int64']) │
|
||||
│ ('public',['Bool']) │
|
||||
│ ('repo.id',['Int64']) │
|
||||
│ ('repo.name',['String']) │
|
||||
│ ('repo.url',['String']) │
|
||||
│ ('type',['String']) │
|
||||
└─arrayJoin(distinctJSONPathsAndTypes(json))──────────────────┘
|
||||
```
|
||||
|
||||
## Tips for better usage of the JSON type
|
||||
|
||||
|
@ -2019,7 +2019,7 @@ Alias: `dateTrunc`.
|
||||
|
||||
`unit` argument is case-insensitive.
|
||||
|
||||
- `value` — Date and time. [DateTime](../data-types/datetime.md) or [DateTime64](../data-types/datetime64.md).
|
||||
- `value` — Date and time. [Date](../data-types/date.md), [Date32](../data-types/date32.md), [DateTime](../data-types/datetime.md) or [DateTime64](../data-types/datetime64.md).
|
||||
- `timezone` — [Timezone name](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-timezone) for the returned value (optional). If not specified, the function uses the timezone of the `value` parameter. [String](../data-types/string.md).
|
||||
|
||||
**Returned value**
|
||||
|
@ -8,14 +8,14 @@ slug: /en/guides/developer/transactional
|
||||
This is transactional (ACID) if the inserted rows are packed and inserted as a single block (see Notes):
|
||||
- Atomic: an INSERT succeeds or is rejected as a whole: if a confirmation is sent to the client, then all rows were inserted; if an error is sent to the client, then no rows were inserted.
|
||||
- Consistent: if there are no table constraints violated, then all rows in an INSERT are inserted and the INSERT succeeds; if constraints are violated, then no rows are inserted.
|
||||
- Isolated: concurrent clients observe a consistent snapshot of the table–the state of the table either as it was before the INSERT attempt, or after the successful INSERT; no partial state is seen
|
||||
- Isolated: concurrent clients observe a consistent snapshot of the table–the state of the table either as it was before the INSERT attempt, or after the successful INSERT; no partial state is seen. Clients inside of another transaction have [snapshot isolation](https://en.wikipedia.org/wiki/Snapshot_isolation), while clients outside of a transaction have [read uncommitted](https://en.wikipedia.org/wiki/Isolation_(database_systems)#Read_uncommitted) isolation level.
|
||||
- Durable: a successful INSERT is written to the filesystem before answering to the client, on a single replica or multiple replicas (controlled by the `insert_quorum` setting), and ClickHouse can ask the OS to sync the filesystem data on the storage media (controlled by the `fsync_after_insert` setting).
|
||||
- INSERT into multiple tables with one statement is possible if materialized views are involved (the INSERT from the client is to a table which has associate materialized views).
|
||||
|
||||
## Case 2: INSERT into multiple partitions, of one table, of the MergeTree* family
|
||||
|
||||
Same as Case 1 above, with this detail:
|
||||
- If table has many partitions and INSERT covers many partitions–then insertion into every partition is transactional on its own
|
||||
- If table has many partitions and INSERT covers many partitions, then insertion into every partition is transactional on its own
|
||||
|
||||
|
||||
## Case 3: INSERT into one distributed table of the MergeTree* family
|
||||
@ -38,7 +38,7 @@ Same as Case 1 above, with this detail:
|
||||
- the insert format is column-based (like Native, Parquet, ORC, etc) and the data contains only one block of data
|
||||
- the size of the inserted block in general may depend on many settings (for example: `max_block_size`, `max_insert_block_size`, `min_insert_block_size_rows`, `min_insert_block_size_bytes`, `preferred_block_size_bytes`, etc)
|
||||
- if the client did not receive an answer from the server, the client does not know if the transaction succeeded, and it can repeat the transaction, using exactly-once insertion properties
|
||||
- ClickHouse is using MVCC with snapshot isolation internally
|
||||
- ClickHouse is using [MVCC](https://en.wikipedia.org/wiki/Multiversion_concurrency_control) with [snapshot isolation](https://en.wikipedia.org/wiki/Snapshot_isolation) internally for concurrent transactions
|
||||
- all ACID properties are valid even in the case of server kill/crash
|
||||
- either insert_quorum into different AZ or fsync should be enabled to ensure durable inserts in the typical setup
|
||||
- "consistency" in ACID terms does not cover the semantics of distributed systems, see https://jepsen.io/consistency which is controlled by different settings (select_sequential_consistency)
|
||||
@ -260,7 +260,7 @@ FROM mergetree_table
|
||||
### Transactions introspection
|
||||
|
||||
You can inspect transactions by querying the `system.transactions` table, but note that you cannot query that
|
||||
table from a session that is in a transaction–open a second `clickhouse client` session to query that table.
|
||||
table from a session that is in a transaction. Open a second `clickhouse client` session to query that table.
|
||||
|
||||
```sql
|
||||
SELECT *
|
||||
|
@ -677,4 +677,122 @@ void GetAllChildrenNumberCommand::execute(const ASTKeeperQuery * query, KeeperCl
|
||||
std::cout << totalNumChildren << "\n";
|
||||
}
|
||||
|
||||
namespace
|
||||
{
|
||||
|
||||
class CPMVOperation
|
||||
{
|
||||
constexpr static UInt64 kTryLimit = 1000;
|
||||
|
||||
public:
|
||||
CPMVOperation(String src_, String dest_, bool remove_src_, KeeperClient * client_)
|
||||
: src(std::move(src_)), dest(std::move(dest_)), remove_src(remove_src_), client(client_)
|
||||
{
|
||||
}
|
||||
|
||||
bool isTryLimitReached() const { return failed_tries_count >= kTryLimit; }
|
||||
|
||||
bool isCompleted() const { return is_completed; }
|
||||
|
||||
void perform()
|
||||
{
|
||||
Coordination::Stat src_stat;
|
||||
String data = client->zookeeper->get(src, &src_stat);
|
||||
|
||||
Coordination::Requests ops{
|
||||
zkutil::makeCheckRequest(src, src_stat.version),
|
||||
zkutil::makeCreateRequest(dest, data, zkutil::CreateMode::Persistent), // Do we need to copy ACLs here?
|
||||
};
|
||||
|
||||
if (remove_src)
|
||||
ops.push_back(zkutil::makeRemoveRequest(src, src_stat.version));
|
||||
|
||||
Coordination::Responses responses;
|
||||
auto code = client->zookeeper->tryMulti(ops, responses);
|
||||
|
||||
switch (code)
|
||||
{
|
||||
case Coordination::Error::ZOK: {
|
||||
is_completed = true;
|
||||
return;
|
||||
}
|
||||
case Coordination::Error::ZBADVERSION: {
|
||||
++failed_tries_count;
|
||||
|
||||
if (isTryLimitReached())
|
||||
zkutil::KeeperMultiException::check(code, ops, responses);
|
||||
|
||||
return;
|
||||
}
|
||||
default:
|
||||
zkutil::KeeperMultiException::check(code, ops, responses);
|
||||
}
|
||||
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Unreachable");
|
||||
}
|
||||
|
||||
private:
|
||||
String src;
|
||||
String dest;
|
||||
bool remove_src = false;
|
||||
KeeperClient * client = nullptr;
|
||||
|
||||
bool is_completed = false;
|
||||
uint64_t failed_tries_count = 0;
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
bool CPCommand::parse(IParser::Pos & pos, std::shared_ptr<ASTKeeperQuery> & node, [[maybe_unused]] Expected & expected) const
|
||||
{
|
||||
String src_path;
|
||||
if (!parseKeeperPath(pos, expected, src_path))
|
||||
return false;
|
||||
node->args.push_back(std::move(src_path));
|
||||
|
||||
String to_path;
|
||||
if (!parseKeeperPath(pos, expected, to_path))
|
||||
return false;
|
||||
node->args.push_back(std::move(to_path));
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void CPCommand::execute(const ASTKeeperQuery * query, KeeperClient * client) const
|
||||
{
|
||||
auto src = client->getAbsolutePath(query->args[0].safeGet<String>());
|
||||
auto dest = client->getAbsolutePath(query->args[1].safeGet<String>());
|
||||
|
||||
CPMVOperation operation(std::move(src), std::move(dest), /*remove_src_=*/false, /*client_=*/client);
|
||||
|
||||
while (!operation.isTryLimitReached() && !operation.isCompleted())
|
||||
operation.perform();
|
||||
}
|
||||
|
||||
bool MVCommand::parse(IParser::Pos & pos, std::shared_ptr<ASTKeeperQuery> & node, Expected & expected) const
|
||||
{
|
||||
String src_path;
|
||||
if (!parseKeeperPath(pos, expected, src_path))
|
||||
return false;
|
||||
node->args.push_back(std::move(src_path));
|
||||
|
||||
String to_path;
|
||||
if (!parseKeeperPath(pos, expected, to_path))
|
||||
return false;
|
||||
node->args.push_back(std::move(to_path));
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void MVCommand::execute(const ASTKeeperQuery * query, KeeperClient * client) const
|
||||
{
|
||||
auto src = client->getAbsolutePath(query->args[0].safeGet<String>());
|
||||
auto dest = client->getAbsolutePath(query->args[1].safeGet<String>());
|
||||
|
||||
CPMVOperation operation(std::move(src), std::move(dest), /*remove_src_=*/true, /*client_=*/client);
|
||||
|
||||
while (!operation.isTryLimitReached() && !operation.isCompleted())
|
||||
operation.perform();
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -266,4 +266,32 @@ class GetAllChildrenNumberCommand : public IKeeperClientCommand
|
||||
}
|
||||
};
|
||||
|
||||
class CPCommand : public IKeeperClientCommand
|
||||
{
|
||||
String getName() const override { return "cp"; }
|
||||
|
||||
bool parse(IParser::Pos & pos, std::shared_ptr<ASTKeeperQuery> & node, Expected & expected) const override;
|
||||
|
||||
void execute(const ASTKeeperQuery * query, KeeperClient * client) const override;
|
||||
|
||||
String getHelpMessage() const override
|
||||
{
|
||||
return "{} <src> <dest> -- Copies 'src' node to 'dest' path.";
|
||||
}
|
||||
};
|
||||
|
||||
class MVCommand : public IKeeperClientCommand
|
||||
{
|
||||
String getName() const override { return "mv"; }
|
||||
|
||||
bool parse(IParser::Pos & pos, std::shared_ptr<ASTKeeperQuery> & node, Expected & expected) const override;
|
||||
|
||||
void execute(const ASTKeeperQuery * query, KeeperClient * client) const override;
|
||||
|
||||
String getHelpMessage() const override
|
||||
{
|
||||
return "{} <src> <dest> -- Moves 'src' node to the 'dest' path.";
|
||||
}
|
||||
};
|
||||
|
||||
}
|
||||
|
@ -212,6 +212,8 @@ void KeeperClient::initialize(Poco::Util::Application & /* self */)
|
||||
std::make_shared<FourLetterWordCommand>(),
|
||||
std::make_shared<GetDirectChildrenNumberCommand>(),
|
||||
std::make_shared<GetAllChildrenNumberCommand>(),
|
||||
std::make_shared<CPCommand>(),
|
||||
std::make_shared<MVCommand>(),
|
||||
});
|
||||
|
||||
String home_path;
|
||||
|
@ -68,7 +68,10 @@ public:
|
||||
if (data().isEqualTo(to.data()))
|
||||
counter += to.counter;
|
||||
else if (!data().has() || counter < to.counter)
|
||||
{
|
||||
data().set(to.data(), arena);
|
||||
counter = to.counter - counter;
|
||||
}
|
||||
else
|
||||
counter -= to.counter;
|
||||
}
|
||||
|
161
src/AggregateFunctions/AggregateFunctionDistinctDynamicTypes.cpp
Normal file
161
src/AggregateFunctions/AggregateFunctionDistinctDynamicTypes.cpp
Normal file
@ -0,0 +1,161 @@
|
||||
#include <unordered_set>
|
||||
#include <IO/WriteHelpers.h>
|
||||
#include <IO/ReadHelpers.h>
|
||||
|
||||
#include <DataTypes/DataTypeString.h>
|
||||
#include <DataTypes/DataTypeArray.h>
|
||||
#include <DataTypes/DataTypesBinaryEncoding.h>
|
||||
#include <Columns/ColumnDynamic.h>
|
||||
|
||||
#include <AggregateFunctions/IAggregateFunction.h>
|
||||
#include <AggregateFunctions/AggregateFunctionFactory.h>
|
||||
#include <AggregateFunctions/FactoryHelpers.h>
|
||||
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH;
|
||||
extern const int ILLEGAL_TYPE_OF_ARGUMENT;
|
||||
extern const int TOO_LARGE_ARRAY_SIZE;
|
||||
}
|
||||
|
||||
struct AggregateFunctionDistinctDynamicTypesData
|
||||
{
|
||||
constexpr static size_t MAX_ARRAY_SIZE = 0xFFFFFF;
|
||||
|
||||
std::unordered_set<String> data;
|
||||
|
||||
void add(const String & type)
|
||||
{
|
||||
data.insert(type);
|
||||
}
|
||||
|
||||
void merge(const AggregateFunctionDistinctDynamicTypesData & other)
|
||||
{
|
||||
data.insert(other.data.begin(), other.data.end());
|
||||
}
|
||||
|
||||
void serialize(WriteBuffer & buf) const
|
||||
{
|
||||
writeVarUInt(data.size(), buf);
|
||||
for (const auto & type : data)
|
||||
writeStringBinary(type, buf);
|
||||
}
|
||||
|
||||
void deserialize(ReadBuffer & buf)
|
||||
{
|
||||
size_t size;
|
||||
readVarUInt(size, buf);
|
||||
if (size > MAX_ARRAY_SIZE)
|
||||
throw Exception(ErrorCodes::TOO_LARGE_ARRAY_SIZE, "Too large array size (maximum: {}): {}", MAX_ARRAY_SIZE, size);
|
||||
|
||||
data.reserve(size);
|
||||
String type;
|
||||
for (size_t i = 0; i != size; ++i)
|
||||
{
|
||||
readStringBinary(type, buf);
|
||||
data.insert(type);
|
||||
}
|
||||
}
|
||||
|
||||
void insertResultInto(IColumn & column)
|
||||
{
|
||||
/// Insert types in sorted order for better output.
|
||||
auto & array_column = assert_cast<ColumnArray &>(column);
|
||||
auto & string_column = assert_cast<ColumnString &>(array_column.getData());
|
||||
std::vector<String> sorted_data(data.begin(), data.end());
|
||||
std::sort(sorted_data.begin(), sorted_data.end());
|
||||
for (const auto & type : sorted_data)
|
||||
string_column.insertData(type.data(), type.size());
|
||||
array_column.getOffsets().push_back(string_column.size());
|
||||
}
|
||||
};
|
||||
|
||||
/// Calculates the list of distinct data types in Dynamic column.
|
||||
class AggregateFunctionDistinctDynamicTypes final : public IAggregateFunctionDataHelper<AggregateFunctionDistinctDynamicTypesData, AggregateFunctionDistinctDynamicTypes>
|
||||
{
|
||||
public:
|
||||
explicit AggregateFunctionDistinctDynamicTypes(const DataTypes & argument_types_)
|
||||
: IAggregateFunctionDataHelper<AggregateFunctionDistinctDynamicTypesData, AggregateFunctionDistinctDynamicTypes>(argument_types_, {}, std::make_shared<DataTypeArray>(std::make_shared<DataTypeString>()))
|
||||
{
|
||||
}
|
||||
|
||||
String getName() const override { return "distinctDynamicTypes"; }
|
||||
|
||||
bool allocatesMemoryInArena() const override { return false; }
|
||||
|
||||
void ALWAYS_INLINE add(AggregateDataPtr __restrict place, const IColumn ** columns, size_t row_num, Arena *) const override
|
||||
{
|
||||
const auto & dynamic_column = assert_cast<const ColumnDynamic & >(*columns[0]);
|
||||
if (dynamic_column.isNullAt(row_num))
|
||||
return;
|
||||
|
||||
data(place).add(dynamic_column.getTypeNameAt(row_num));
|
||||
}
|
||||
|
||||
void ALWAYS_INLINE addBatchSinglePlace(
|
||||
size_t row_begin, size_t row_end, AggregateDataPtr __restrict place, const IColumn ** columns, Arena * arena, ssize_t if_argument_pos)
|
||||
const override
|
||||
{
|
||||
if (if_argument_pos >= 0 || row_begin != 0 || row_end != columns[0]->size())
|
||||
IAggregateFunctionDataHelper::addBatchSinglePlace(row_begin, row_end, place, columns, arena, if_argument_pos);
|
||||
/// Optimization for case when we add all rows from the column into single place.
|
||||
/// In this case we can avoid iterating over all rows because we can get all types
|
||||
/// in Dynamic column in a more efficient way.
|
||||
else
|
||||
assert_cast<const ColumnDynamic & >(*columns[0]).getAllTypeNamesInto(data(place).data);
|
||||
}
|
||||
|
||||
void addManyDefaults(
|
||||
AggregateDataPtr __restrict /*place*/,
|
||||
const IColumn ** /*columns*/,
|
||||
size_t /*length*/,
|
||||
Arena * /*arena*/) const override
|
||||
{
|
||||
/// Default value for Dynamic is NULL, so nothing to add.
|
||||
}
|
||||
|
||||
void merge(AggregateDataPtr __restrict place, ConstAggregateDataPtr rhs, Arena *) const override
|
||||
{
|
||||
data(place).merge(data(rhs));
|
||||
}
|
||||
|
||||
void serialize(ConstAggregateDataPtr __restrict place, WriteBuffer & buf, std::optional<size_t> /* version */) const override
|
||||
{
|
||||
data(place).serialize(buf);
|
||||
}
|
||||
|
||||
void deserialize(AggregateDataPtr __restrict place, ReadBuffer & buf, std::optional<size_t> /* version */, Arena *) const override
|
||||
{
|
||||
data(place).deserialize(buf);
|
||||
}
|
||||
|
||||
void insertResultInto(AggregateDataPtr __restrict place, IColumn & to, Arena *) const override
|
||||
{
|
||||
data(place).insertResultInto(to);
|
||||
}
|
||||
};
|
||||
|
||||
AggregateFunctionPtr createAggregateFunctionDistinctDynamicTypes(
|
||||
const std::string & name, const DataTypes & argument_types, const Array & parameters, const Settings *)
|
||||
{
|
||||
assertNoParameters(name, parameters);
|
||||
if (argument_types.size() != 1)
|
||||
throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH,
|
||||
"Incorrect number of arguments for aggregate function {}. Expected single argument with type Dynamic, got {} arguments", name, argument_types.size());
|
||||
|
||||
if (!isDynamic(argument_types[0]))
|
||||
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Illegal type {} of argument for aggregate function {}. Expected type Dynamic", argument_types[0]->getName(), name);
|
||||
|
||||
return std::make_shared<AggregateFunctionDistinctDynamicTypes>(argument_types);
|
||||
}
|
||||
|
||||
void registerAggregateFunctionDistinctDynamicTypes(AggregateFunctionFactory & factory)
|
||||
{
|
||||
factory.registerFunction("distinctDynamicTypes", createAggregateFunctionDistinctDynamicTypes);
|
||||
}
|
||||
|
||||
}
|
350
src/AggregateFunctions/AggregateFunctionDistinctJSONPaths.cpp
Normal file
350
src/AggregateFunctions/AggregateFunctionDistinctJSONPaths.cpp
Normal file
@ -0,0 +1,350 @@
|
||||
#include <unordered_set>
|
||||
#include <IO/WriteHelpers.h>
|
||||
#include <IO/ReadHelpers.h>
|
||||
|
||||
#include <DataTypes/DataTypeString.h>
|
||||
#include <DataTypes/DataTypeArray.h>
|
||||
#include <DataTypes/DataTypeMap.h>
|
||||
#include <DataTypes/DataTypeObject.h>
|
||||
#include <DataTypes/DataTypesBinaryEncoding.h>
|
||||
#include <Columns/ColumnDynamic.h>
|
||||
#include <Columns/ColumnObject.h>
|
||||
#include <Columns/ColumnMap.h>
|
||||
|
||||
#include <AggregateFunctions/IAggregateFunction.h>
|
||||
#include <AggregateFunctions/AggregateFunctionFactory.h>
|
||||
#include <AggregateFunctions/FactoryHelpers.h>
|
||||
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH;
|
||||
extern const int ILLEGAL_TYPE_OF_ARGUMENT;
|
||||
extern const int TOO_LARGE_ARRAY_SIZE;
|
||||
}
|
||||
|
||||
constexpr static size_t DISTINCT_JSON_PATHS_MAX_ARRAY_SIZE = 0xFFFFFF;
|
||||
|
||||
|
||||
struct AggregateFunctionDistinctJSONPathsData
|
||||
{
|
||||
static constexpr auto name = "distinctJSONPaths";
|
||||
|
||||
std::unordered_set<String> data;
|
||||
|
||||
void add(const ColumnObject & column, size_t row_num, const std::unordered_map<String, String> &)
|
||||
{
|
||||
for (const auto & [path, _] : column.getTypedPaths())
|
||||
data.insert(path);
|
||||
for (const auto & [path, dynamic_column] : column.getDynamicPathsPtrs())
|
||||
{
|
||||
/// Add path from dynamic paths only if it's not NULL in this row.
|
||||
if (!dynamic_column->isNullAt(row_num))
|
||||
data.insert(path);
|
||||
}
|
||||
|
||||
/// Iterate over paths in shared data in this row.
|
||||
const auto [shared_data_paths, _] = column.getSharedDataPathsAndValues();
|
||||
const auto & shared_data_offsets = column.getSharedDataOffsets();
|
||||
const size_t start = shared_data_offsets[static_cast<ssize_t>(row_num) - 1];
|
||||
const size_t end = shared_data_offsets[static_cast<ssize_t>(row_num)];
|
||||
for (size_t i = start; i != end; ++i)
|
||||
data.insert(shared_data_paths->getDataAt(i).toString());
|
||||
}
|
||||
|
||||
void addWholeColumn(const ColumnObject & column, const std::unordered_map<String, String> &)
|
||||
{
|
||||
for (const auto & [path, _] : column.getTypedPaths())
|
||||
data.insert(path);
|
||||
for (const auto & [path, dynamic_column] : column.getDynamicPathsPtrs())
|
||||
{
|
||||
/// Add dynamic path only if it has at least one non-null value.
|
||||
/// getNumberOfDefaultRows for Dynamic column is O(1).
|
||||
if (dynamic_column->getNumberOfDefaultRows() != dynamic_column->size())
|
||||
data.insert(path);
|
||||
}
|
||||
|
||||
/// Iterate over all paths in shared data.
|
||||
const auto [shared_data_paths, _] = column.getSharedDataPathsAndValues();
|
||||
for (size_t i = 0; i != shared_data_paths->size(); ++i)
|
||||
data.insert(shared_data_paths->getDataAt(i).toString());
|
||||
}
|
||||
|
||||
void merge(const AggregateFunctionDistinctJSONPathsData & other)
|
||||
{
|
||||
data.insert(other.data.begin(), other.data.end());
|
||||
}
|
||||
|
||||
void serialize(WriteBuffer & buf) const
|
||||
{
|
||||
writeVarUInt(data.size(), buf);
|
||||
for (const auto & path : data)
|
||||
writeStringBinary(path, buf);
|
||||
}
|
||||
|
||||
void deserialize(ReadBuffer & buf)
|
||||
{
|
||||
size_t size;
|
||||
readVarUInt(size, buf);
|
||||
if (size > DISTINCT_JSON_PATHS_MAX_ARRAY_SIZE)
|
||||
throw Exception(ErrorCodes::TOO_LARGE_ARRAY_SIZE, "Too large array size (maximum: {}): {}", DISTINCT_JSON_PATHS_MAX_ARRAY_SIZE, size);
|
||||
|
||||
String path;
|
||||
for (size_t i = 0; i != size; ++i)
|
||||
{
|
||||
readStringBinary(path, buf);
|
||||
data.insert(path);
|
||||
}
|
||||
}
|
||||
|
||||
void insertResultInto(IColumn & column)
|
||||
{
|
||||
/// Insert paths in sorted order for better output.
|
||||
auto & array_column = assert_cast<ColumnArray &>(column);
|
||||
auto & string_column = assert_cast<ColumnString &>(array_column.getData());
|
||||
std::vector<String> sorted_data(data.begin(), data.end());
|
||||
std::sort(sorted_data.begin(), sorted_data.end());
|
||||
for (const auto & path : sorted_data)
|
||||
string_column.insertData(path.data(), path.size());
|
||||
array_column.getOffsets().push_back(string_column.size());
|
||||
}
|
||||
|
||||
static DataTypePtr getResultType()
|
||||
{
|
||||
return std::make_shared<DataTypeArray>(std::make_shared<DataTypeString>());
|
||||
}
|
||||
};
|
||||
|
||||
struct AggregateFunctionDistinctJSONPathsAndTypesData
|
||||
{
|
||||
static constexpr auto name = "distinctJSONPathsAndTypes";
|
||||
|
||||
std::unordered_map<String, std::unordered_set<String>> data;
|
||||
|
||||
void add(const ColumnObject & column, size_t row_num, const std::unordered_map<String, String> & typed_paths_type_names)
|
||||
{
|
||||
for (const auto & [path, _] : column.getTypedPaths())
|
||||
data[path].insert(typed_paths_type_names.at(path));
|
||||
for (const auto & [path, dynamic_column] : column.getDynamicPathsPtrs())
|
||||
{
|
||||
if (!dynamic_column->isNullAt(row_num))
|
||||
data[path].insert(dynamic_column->getTypeNameAt(row_num));
|
||||
}
|
||||
|
||||
/// Iterate over paths om shared data in this row and decode the data types.
|
||||
const auto [shared_data_paths, shared_data_values] = column.getSharedDataPathsAndValues();
|
||||
const auto & shared_data_offsets = column.getSharedDataOffsets();
|
||||
const size_t start = shared_data_offsets[static_cast<ssize_t>(row_num) - 1];
|
||||
const size_t end = shared_data_offsets[static_cast<ssize_t>(row_num)];
|
||||
for (size_t i = start; i != end; ++i)
|
||||
{
|
||||
auto path = shared_data_paths->getDataAt(i).toString();
|
||||
auto value = shared_data_values->getDataAt(i);
|
||||
ReadBufferFromMemory buf(value.data, value.size);
|
||||
auto type = decodeDataType(buf);
|
||||
/// We should not have Nulls here but let's check just in case.
|
||||
chassert(!isNothing(type));
|
||||
data[path].insert(type->getName());
|
||||
}
|
||||
}
|
||||
|
||||
void addWholeColumn(const ColumnObject & column, const std::unordered_map<String, String> & typed_paths_type_names)
|
||||
{
|
||||
for (const auto & [path, _] : column.getTypedPaths())
|
||||
data[path].insert(typed_paths_type_names.at(path));
|
||||
for (const auto & [path, dynamic_column] : column.getDynamicPathsPtrs())
|
||||
{
|
||||
/// Add dynamic path only if it has at least one non-null value.
|
||||
/// getNumberOfDefaultRows for Dynamic column is O(1).
|
||||
if (dynamic_column->getNumberOfDefaultRows() != dynamic_column->size())
|
||||
dynamic_column->getAllTypeNamesInto(data[path]);
|
||||
}
|
||||
|
||||
/// Iterate over all paths in shared data and decode the data types.
|
||||
const auto [shared_data_paths, shared_data_values] = column.getSharedDataPathsAndValues();
|
||||
for (size_t i = 0; i != shared_data_paths->size(); ++i)
|
||||
{
|
||||
auto path = shared_data_paths->getDataAt(i).toString();
|
||||
auto value = shared_data_values->getDataAt(i);
|
||||
ReadBufferFromMemory buf(value.data, value.size);
|
||||
auto type = decodeDataType(buf);
|
||||
/// We should not have Nulls here but let's check just in case.
|
||||
chassert(!isNothing(type));
|
||||
data[path].insert(type->getName());
|
||||
}
|
||||
}
|
||||
|
||||
void merge(const AggregateFunctionDistinctJSONPathsAndTypesData & other)
|
||||
{
|
||||
for (const auto & [path, types] : other.data)
|
||||
data[path].insert(types.begin(), types.end());
|
||||
}
|
||||
|
||||
void serialize(WriteBuffer & buf) const
|
||||
{
|
||||
writeVarUInt(data.size(), buf);
|
||||
for (const auto & [path, types] : data)
|
||||
{
|
||||
writeStringBinary(path, buf);
|
||||
writeVarUInt(types.size(), buf);
|
||||
for (const auto & type : types)
|
||||
writeStringBinary(type, buf);
|
||||
}
|
||||
}
|
||||
|
||||
void deserialize(ReadBuffer & buf)
|
||||
{
|
||||
size_t paths_size, types_size;
|
||||
readVarUInt(paths_size, buf);
|
||||
if (paths_size > DISTINCT_JSON_PATHS_MAX_ARRAY_SIZE)
|
||||
throw Exception(ErrorCodes::TOO_LARGE_ARRAY_SIZE, "Too large array size for paths (maximum: {}): {}", DISTINCT_JSON_PATHS_MAX_ARRAY_SIZE, paths_size);
|
||||
|
||||
data.reserve(paths_size);
|
||||
String path, type;
|
||||
for (size_t i = 0; i != paths_size; ++i)
|
||||
{
|
||||
readStringBinary(path, buf);
|
||||
readVarUInt(types_size, buf);
|
||||
if (types_size > DISTINCT_JSON_PATHS_MAX_ARRAY_SIZE)
|
||||
throw Exception(ErrorCodes::TOO_LARGE_ARRAY_SIZE, "Too large array size for types (maximum: {}): {}", DISTINCT_JSON_PATHS_MAX_ARRAY_SIZE, types_size);
|
||||
|
||||
data[path].reserve(types_size);
|
||||
for (size_t j = 0; j != types_size; ++j)
|
||||
{
|
||||
readStringBinary(type, buf);
|
||||
data[path].insert(type);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void insertResultInto(IColumn & column)
|
||||
{
|
||||
/// Insert sorted paths and types for better output.
|
||||
auto & array_column = assert_cast<ColumnMap &>(column).getNestedColumn();
|
||||
auto & tuple_column = assert_cast<ColumnTuple &>(array_column.getData());
|
||||
auto & key_column = assert_cast<ColumnString &>(tuple_column.getColumn(0));
|
||||
auto & value_column = assert_cast<ColumnArray &>(tuple_column.getColumn(1));
|
||||
auto & value_column_data = assert_cast<ColumnString &>(value_column.getData());
|
||||
std::vector<std::pair<String, std::vector<String>>> sorted_data;
|
||||
sorted_data.reserve(data.size());
|
||||
for (const auto & [path, types] : data)
|
||||
{
|
||||
std::vector<String> sorted_types(types.begin(), types.end());
|
||||
std::sort(sorted_types.begin(), sorted_types.end());
|
||||
sorted_data.emplace_back(path, std::move(sorted_types));
|
||||
}
|
||||
std::sort(sorted_data.begin(), sorted_data.end());
|
||||
|
||||
for (const auto & [path, types] : sorted_data)
|
||||
{
|
||||
key_column.insertData(path.data(), path.size());
|
||||
for (const auto & type : types)
|
||||
value_column_data.insertData(type.data(), type.size());
|
||||
value_column.getOffsets().push_back(value_column_data.size());
|
||||
}
|
||||
|
||||
array_column.getOffsets().push_back(key_column.size());
|
||||
}
|
||||
|
||||
static DataTypePtr getResultType()
|
||||
{
|
||||
return std::make_shared<DataTypeMap>(std::make_shared<DataTypeString>(), std::make_shared<DataTypeArray>(std::make_shared<DataTypeString>()));
|
||||
}
|
||||
};
|
||||
|
||||
/// Calculates the list of distinct paths or pairs (path, type) in JSON column.
|
||||
template <typename Data>
|
||||
class AggregateFunctionDistinctJSONPathsAndTypes final : public IAggregateFunctionDataHelper<Data, AggregateFunctionDistinctJSONPathsAndTypes<Data>>
|
||||
{
|
||||
public:
|
||||
explicit AggregateFunctionDistinctJSONPathsAndTypes(const DataTypes & argument_types_)
|
||||
: IAggregateFunctionDataHelper<Data, AggregateFunctionDistinctJSONPathsAndTypes<Data>>(
|
||||
argument_types_, {}, Data::getResultType())
|
||||
{
|
||||
const auto & typed_paths_types = assert_cast<const DataTypeObject &>(*argument_types_[0]).getTypedPaths();
|
||||
typed_paths_type_names.reserve(typed_paths_types.size());
|
||||
for (const auto & [path, type] : typed_paths_types)
|
||||
typed_paths_type_names[path] = type->getName();
|
||||
}
|
||||
|
||||
String getName() const override { return Data::name; }
|
||||
|
||||
bool allocatesMemoryInArena() const override { return false; }
|
||||
|
||||
void ALWAYS_INLINE add(AggregateDataPtr __restrict place, const IColumn ** columns, size_t row_num, Arena *) const override
|
||||
{
|
||||
const auto & object_column = assert_cast<const ColumnObject & >(*columns[0]);
|
||||
this->data(place).add(object_column, row_num, typed_paths_type_names);
|
||||
}
|
||||
|
||||
void ALWAYS_INLINE addBatchSinglePlace(
|
||||
size_t row_begin, size_t row_end, AggregateDataPtr __restrict place, const IColumn ** columns, Arena * arena, ssize_t if_argument_pos)
|
||||
const override
|
||||
{
|
||||
if (if_argument_pos >= 0 || row_begin != 0 || row_end != columns[0]->size())
|
||||
IAggregateFunctionDataHelper<Data, AggregateFunctionDistinctJSONPathsAndTypes<Data>>::addBatchSinglePlace(row_begin, row_end, place, columns, arena, if_argument_pos);
|
||||
/// Optimization for case when we add all rows from the column into single place.
|
||||
/// In this case we can avoid iterating over all rows because we can get all paths
|
||||
/// and types in JSON column in a more efficient way.
|
||||
else
|
||||
this->data(place).addWholeColumn(assert_cast<const ColumnObject & >(*columns[0]), typed_paths_type_names);
|
||||
}
|
||||
|
||||
void addManyDefaults(
|
||||
AggregateDataPtr __restrict /*place*/,
|
||||
const IColumn ** /*columns*/,
|
||||
size_t /*length*/,
|
||||
Arena * /*arena*/) const override
|
||||
{
|
||||
/// Default value for JSON is empty object, so nothing to add.
|
||||
}
|
||||
|
||||
void merge(AggregateDataPtr __restrict place, ConstAggregateDataPtr rhs, Arena *) const override
|
||||
{
|
||||
this->data(place).merge(this->data(rhs));
|
||||
}
|
||||
|
||||
void serialize(ConstAggregateDataPtr __restrict place, WriteBuffer & buf, std::optional<size_t> /* version */) const override
|
||||
{
|
||||
this->data(place).serialize(buf);
|
||||
}
|
||||
|
||||
void deserialize(AggregateDataPtr __restrict place, ReadBuffer & buf, std::optional<size_t> /* version */, Arena *) const override
|
||||
{
|
||||
this->data(place).deserialize(buf);
|
||||
}
|
||||
|
||||
void insertResultInto(AggregateDataPtr __restrict place, IColumn & to, Arena *) const override
|
||||
{
|
||||
this->data(place).insertResultInto(to);
|
||||
}
|
||||
|
||||
private:
|
||||
std::unordered_map<String, String> typed_paths_type_names;
|
||||
};
|
||||
|
||||
template <typename Data>
|
||||
AggregateFunctionPtr createAggregateFunctionDistinctJSONPathsAndTypes(
|
||||
const std::string & name, const DataTypes & argument_types, const Array & parameters, const Settings *)
|
||||
{
|
||||
assertNoParameters(name, parameters);
|
||||
if (argument_types.size() != 1)
|
||||
throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH,
|
||||
"Incorrect number of arguments for aggregate function {}. Expected single argument with type JSON, got {} arguments", name, argument_types.size());
|
||||
|
||||
if (!isObject(argument_types[0]))
|
||||
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Illegal type {} of argument for aggregate function {}. Expected type JSON", argument_types[0]->getName(), name);
|
||||
|
||||
return std::make_shared<AggregateFunctionDistinctJSONPathsAndTypes<Data>>(argument_types);
|
||||
}
|
||||
|
||||
void registerAggregateFunctionDistinctJSONPathsAndTypes(AggregateFunctionFactory & factory)
|
||||
{
|
||||
factory.registerFunction("distinctJSONPaths", createAggregateFunctionDistinctJSONPathsAndTypes<AggregateFunctionDistinctJSONPathsData>);
|
||||
factory.registerFunction("distinctJSONPathsAndTypes", createAggregateFunctionDistinctJSONPathsAndTypes<AggregateFunctionDistinctJSONPathsAndTypesData>);
|
||||
}
|
||||
|
||||
}
|
@ -89,6 +89,8 @@ void registerAggregateFunctionAnalysisOfVariance(AggregateFunctionFactory &);
|
||||
void registerAggregateFunctionFlameGraph(AggregateFunctionFactory &);
|
||||
void registerAggregateFunctionKolmogorovSmirnovTest(AggregateFunctionFactory & factory);
|
||||
void registerAggregateFunctionLargestTriangleThreeBuckets(AggregateFunctionFactory & factory);
|
||||
void registerAggregateFunctionDistinctDynamicTypes(AggregateFunctionFactory & factory);
|
||||
void registerAggregateFunctionDistinctJSONPathsAndTypes(AggregateFunctionFactory & factory);
|
||||
|
||||
class AggregateFunctionCombinatorFactory;
|
||||
void registerAggregateFunctionCombinatorIf(AggregateFunctionCombinatorFactory &);
|
||||
@ -191,6 +193,8 @@ void registerAggregateFunctions()
|
||||
registerAggregateFunctionFlameGraph(factory);
|
||||
registerAggregateFunctionKolmogorovSmirnovTest(factory);
|
||||
registerAggregateFunctionLargestTriangleThreeBuckets(factory);
|
||||
registerAggregateFunctionDistinctDynamicTypes(factory);
|
||||
registerAggregateFunctionDistinctJSONPathsAndTypes(factory);
|
||||
|
||||
registerWindowFunctions(factory);
|
||||
}
|
||||
|
@ -2699,14 +2699,6 @@ bool ClientBase::processMultiQueryFromFile(const String & file_name)
|
||||
ReadBufferFromFile in(file_name);
|
||||
readStringUntilEOF(queries_from_file, in);
|
||||
|
||||
if (!getClientConfiguration().has("log_comment"))
|
||||
{
|
||||
Settings settings = client_context->getSettingsCopy();
|
||||
/// NOTE: cannot use even weakly_canonical() since it fails for /dev/stdin due to resolving of "pipe:[X]"
|
||||
settings.log_comment = fs::absolute(fs::path(file_name));
|
||||
client_context->setSettings(settings);
|
||||
}
|
||||
|
||||
return executeMultiQuery(queries_from_file);
|
||||
}
|
||||
|
||||
|
@ -979,6 +979,41 @@ ColumnPtr ColumnDynamic::compress() const
|
||||
});
|
||||
}
|
||||
|
||||
String ColumnDynamic::getTypeNameAt(size_t row_num) const
|
||||
{
|
||||
const auto & variant_col = getVariantColumn();
|
||||
const size_t discr = variant_col.globalDiscriminatorAt(row_num);
|
||||
if (discr == ColumnVariant::NULL_DISCRIMINATOR)
|
||||
return "";
|
||||
|
||||
if (discr == getSharedVariantDiscriminator())
|
||||
{
|
||||
const auto value = getSharedVariant().getDataAt(variant_col.offsetAt(row_num));
|
||||
ReadBufferFromMemory buf(value.data, value.size);
|
||||
return decodeDataType(buf)->getName();
|
||||
}
|
||||
|
||||
return variant_info.variant_names[discr];
|
||||
}
|
||||
|
||||
void ColumnDynamic::getAllTypeNamesInto(std::unordered_set<String> & names) const
|
||||
{
|
||||
auto shared_variant_discr = getSharedVariantDiscriminator();
|
||||
for (size_t i = 0; i != variant_info.variant_names.size(); ++i)
|
||||
{
|
||||
if (i != shared_variant_discr && !variant_column_ptr->getVariantByGlobalDiscriminator(i).empty())
|
||||
names.insert(variant_info.variant_names[i]);
|
||||
}
|
||||
|
||||
const auto & shared_variant = getSharedVariant();
|
||||
for (size_t i = 0; i != shared_variant.size(); ++i)
|
||||
{
|
||||
const auto value = shared_variant.getDataAt(i);
|
||||
ReadBufferFromMemory buf(value.data, value.size);
|
||||
names.insert(decodeDataType(buf)->getName());
|
||||
}
|
||||
}
|
||||
|
||||
void ColumnDynamic::prepareForSquashing(const Columns & source_columns)
|
||||
{
|
||||
if (source_columns.empty())
|
||||
|
@ -430,6 +430,9 @@ public:
|
||||
|
||||
const SerializationPtr & getVariantSerialization(const DataTypePtr & variant_type) { return getVariantSerialization(variant_type, variant_type->getName()); }
|
||||
|
||||
String getTypeNameAt(size_t row_num) const;
|
||||
void getAllTypeNamesInto(std::unordered_set<String> & names) const;
|
||||
|
||||
private:
|
||||
void createVariantInfo(const DataTypePtr & variant_type);
|
||||
|
||||
|
@ -75,9 +75,9 @@
|
||||
M(GlobalThread, "Number of threads in global thread pool.") \
|
||||
M(GlobalThreadActive, "Number of threads in global thread pool running a task.") \
|
||||
M(GlobalThreadScheduled, "Number of queued or active jobs in global thread pool.") \
|
||||
M(LocalThread, "Number of threads in local thread pools. The threads in local thread pools are taken from the global thread pool.") \
|
||||
M(LocalThreadActive, "Number of threads in local thread pools running a task.") \
|
||||
M(LocalThreadScheduled, "Number of queued or active jobs in local thread pools.") \
|
||||
M(LocalThread, "Obsolete. Number of threads in local thread pools. The threads in local thread pools are taken from the global thread pool.") \
|
||||
M(LocalThreadActive, "Obsolete. Number of threads in local thread pools running a task.") \
|
||||
M(LocalThreadScheduled, "Obsolete. Number of queued or active jobs in local thread pools.") \
|
||||
M(MergeTreeDataSelectExecutorThreads, "Number of threads in the MergeTreeDataSelectExecutor thread pool.") \
|
||||
M(MergeTreeDataSelectExecutorThreadsActive, "Number of threads in the MergeTreeDataSelectExecutor thread pool running a task.") \
|
||||
M(MergeTreeDataSelectExecutorThreadsScheduled, "Number of queued or active jobs in the MergeTreeDataSelectExecutor thread pool.") \
|
||||
@ -292,6 +292,9 @@
|
||||
M(DistrCacheWriteRequests, "Number of executed Write requests to Distributed Cache") \
|
||||
M(DistrCacheServerConnections, "Number of open connections to ClickHouse server from Distributed Cache") \
|
||||
\
|
||||
M(SchedulerIOReadScheduled, "Number of IO reads are being scheduled currently") \
|
||||
M(SchedulerIOWriteScheduled, "Number of IO writes are being scheduled currently") \
|
||||
\
|
||||
M(StorageConnectionsStored, "Total count of sessions stored in the session pool for storages") \
|
||||
M(StorageConnectionsTotal, "Total count of all sessions: stored in the pool and actively used right now for storages") \
|
||||
\
|
||||
|
@ -113,6 +113,56 @@ std::string_view CurrentThread::getQueryId()
|
||||
return current_thread->getQueryId();
|
||||
}
|
||||
|
||||
void CurrentThread::attachReadResource(ResourceLink link)
|
||||
{
|
||||
if (unlikely(!current_thread))
|
||||
return;
|
||||
if (current_thread->read_resource_link)
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Thread #{} has been already attached to read resource", std::to_string(getThreadId()));
|
||||
current_thread->read_resource_link = link;
|
||||
}
|
||||
|
||||
void CurrentThread::detachReadResource()
|
||||
{
|
||||
if (unlikely(!current_thread))
|
||||
return;
|
||||
if (!current_thread->read_resource_link)
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Thread #{} has not been attached to read resource", std::to_string(getThreadId()));
|
||||
current_thread->read_resource_link.reset();
|
||||
}
|
||||
|
||||
ResourceLink CurrentThread::getReadResourceLink()
|
||||
{
|
||||
if (unlikely(!current_thread))
|
||||
return {};
|
||||
return current_thread->read_resource_link;
|
||||
}
|
||||
|
||||
void CurrentThread::attachWriteResource(ResourceLink link)
|
||||
{
|
||||
if (unlikely(!current_thread))
|
||||
return;
|
||||
if (current_thread->write_resource_link)
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Thread #{} has been already attached to write resource", std::to_string(getThreadId()));
|
||||
current_thread->write_resource_link = link;
|
||||
}
|
||||
|
||||
void CurrentThread::detachWriteResource()
|
||||
{
|
||||
if (unlikely(!current_thread))
|
||||
return;
|
||||
if (!current_thread->write_resource_link)
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Thread #{} has not been attached to write resource", std::to_string(getThreadId()));
|
||||
current_thread->write_resource_link.reset();
|
||||
}
|
||||
|
||||
ResourceLink CurrentThread::getWriteResourceLink()
|
||||
{
|
||||
if (unlikely(!current_thread))
|
||||
return {};
|
||||
return current_thread->write_resource_link;
|
||||
}
|
||||
|
||||
MemoryTracker * CurrentThread::getUserMemoryTracker()
|
||||
{
|
||||
if (unlikely(!current_thread))
|
||||
|
@ -2,6 +2,7 @@
|
||||
|
||||
#include <Interpreters/Context_fwd.h>
|
||||
#include <Common/ThreadStatus.h>
|
||||
#include <Common/Scheduler/ResourceLink.h>
|
||||
|
||||
#include <memory>
|
||||
#include <string>
|
||||
@ -23,7 +24,6 @@ class QueryStatus;
|
||||
struct Progress;
|
||||
class InternalTextLogsQueue;
|
||||
|
||||
|
||||
/** Collection of static methods to work with thread-local objects.
|
||||
* Allows to attach and detach query/process (thread group) to a thread
|
||||
* (to calculate query-related metrics and to allow to obtain query-related data from a thread).
|
||||
@ -92,6 +92,14 @@ public:
|
||||
|
||||
static std::string_view getQueryId();
|
||||
|
||||
// For IO Scheduling
|
||||
static void attachReadResource(ResourceLink link);
|
||||
static void detachReadResource();
|
||||
static ResourceLink getReadResourceLink();
|
||||
static void attachWriteResource(ResourceLink link);
|
||||
static void detachWriteResource();
|
||||
static ResourceLink getWriteResourceLink();
|
||||
|
||||
/// Initializes query with current thread as master thread in constructor, and detaches it in destructor
|
||||
struct QueryScope : private boost::noncopyable
|
||||
{
|
||||
@ -102,6 +110,39 @@ public:
|
||||
void logPeakMemoryUsage();
|
||||
bool log_peak_memory_usage_in_destructor = true;
|
||||
};
|
||||
|
||||
/// Scoped attach/detach of IO resource links
|
||||
struct IOScope : private boost::noncopyable
|
||||
{
|
||||
explicit IOScope(ResourceLink read_resource_link, ResourceLink write_resource_link)
|
||||
{
|
||||
if (read_resource_link)
|
||||
{
|
||||
attachReadResource(read_resource_link);
|
||||
read_attached = true;
|
||||
}
|
||||
if (write_resource_link)
|
||||
{
|
||||
attachWriteResource(write_resource_link);
|
||||
write_attached = true;
|
||||
}
|
||||
}
|
||||
|
||||
explicit IOScope(const IOSchedulingSettings & settings)
|
||||
: IOScope(settings.read_resource_link, settings.write_resource_link)
|
||||
{}
|
||||
|
||||
~IOScope()
|
||||
{
|
||||
if (read_attached)
|
||||
detachReadResource();
|
||||
if (write_attached)
|
||||
detachWriteResource();
|
||||
}
|
||||
|
||||
bool read_attached = false;
|
||||
bool write_attached = false;
|
||||
};
|
||||
};
|
||||
|
||||
}
|
||||
|
@ -2,6 +2,7 @@
|
||||
#include <Common/HostResolvePool.h>
|
||||
|
||||
#include <Common/ProfileEvents.h>
|
||||
#include <Common/Stopwatch.h>
|
||||
#include <Common/CurrentMetrics.h>
|
||||
#include <Common/logger_useful.h>
|
||||
#include <Common/Exception.h>
|
||||
@ -9,6 +10,7 @@
|
||||
#include <Common/ProxyConfiguration.h>
|
||||
#include <Common/MemoryTrackerSwitcher.h>
|
||||
#include <Common/SipHash.h>
|
||||
#include <Common/Scheduler/ResourceGuard.h>
|
||||
#include <Common/proxyConfigurationToPocoProxyConfig.h>
|
||||
|
||||
#include <Poco/Net/HTTPChunkedStream.h>
|
||||
@ -236,6 +238,59 @@ public:
|
||||
};
|
||||
|
||||
|
||||
// Session data hooks implementation for integration with resource scheduler.
|
||||
// Hooks are created per every request-response pair and are registered/unregistered in HTTP session.
|
||||
// * `atStart()` send resource request to the scheduler every time HTTP session is going to send or receive
|
||||
// data to/from socket. `start()` waits for the scheduler confirmation. This way scheduler might
|
||||
// throttle and/or schedule socket data streams.
|
||||
// * `atFinish()` hook is called on successful socket read/write operation.
|
||||
// It informs the scheduler that operation is complete, which allows the scheduler to control the total
|
||||
// amount of in-flight bytes and/or operations.
|
||||
// * `atFail()` hook is called on failure of socket operation. The purpose is to correct the amount of bytes
|
||||
// passed through the scheduler queue to ensure fair bandwidth allocation even in presence of errors.
|
||||
struct ResourceGuardSessionDataHooks : public Poco::Net::IHTTPSessionDataHooks
|
||||
{
|
||||
ResourceGuardSessionDataHooks(ResourceLink link_, const ResourceGuard::Metrics * metrics, LoggerPtr log_, const String & method, const String & uri)
|
||||
: link(link_)
|
||||
, log(log_)
|
||||
, http_request(method + " " + uri)
|
||||
{
|
||||
request.metrics = metrics;
|
||||
chassert(link);
|
||||
}
|
||||
|
||||
~ResourceGuardSessionDataHooks() override
|
||||
{
|
||||
request.assertFinished(); // Never destruct with an active request
|
||||
}
|
||||
|
||||
void atStart(int bytes) override
|
||||
{
|
||||
Stopwatch timer;
|
||||
request.enqueue(bytes, link);
|
||||
request.wait();
|
||||
timer.stop();
|
||||
if (timer.elapsedMilliseconds() >= 5000)
|
||||
LOG_INFO(log, "Resource request took too long to finish: {} ms for {}", timer.elapsedMilliseconds(), http_request);
|
||||
}
|
||||
|
||||
void atFinish(int bytes) override
|
||||
{
|
||||
request.finish(bytes, link);
|
||||
}
|
||||
|
||||
void atFail() override
|
||||
{
|
||||
request.finish(0, link);
|
||||
}
|
||||
|
||||
ResourceLink link;
|
||||
ResourceGuard::Request request;
|
||||
LoggerPtr log;
|
||||
String http_request;
|
||||
};
|
||||
|
||||
|
||||
// EndpointConnectionPool manage connections to the endpoint
|
||||
// Features:
|
||||
// - it uses HostResolver for address selecting. See Common/HostResolver.h for more info.
|
||||
@ -246,8 +301,6 @@ public:
|
||||
// - `Session::reconnect()` uses the pool as well
|
||||
// - comprehensive sensors
|
||||
// - session is reused according its inner state, automatically
|
||||
|
||||
|
||||
template <class Session>
|
||||
class EndpointConnectionPool : public std::enable_shared_from_this<EndpointConnectionPool<Session>>, public IExtendedPool
|
||||
{
|
||||
@ -337,6 +390,13 @@ private:
|
||||
std::ostream & sendRequest(Poco::Net::HTTPRequest & request) override
|
||||
{
|
||||
auto idle = idleTime();
|
||||
|
||||
// Set data hooks for IO scheduling
|
||||
if (ResourceLink link = CurrentThread::getReadResourceLink())
|
||||
Session::setReceiveDataHooks(std::make_shared<ResourceGuardSessionDataHooks>(link, ResourceGuard::Metrics::getIORead(), log, request.getMethod(), request.getURI()));
|
||||
if (ResourceLink link = CurrentThread::getWriteResourceLink())
|
||||
Session::setSendDataHooks(std::make_shared<ResourceGuardSessionDataHooks>(link, ResourceGuard::Metrics::getIOWrite(), log, request.getMethod(), request.getURI()));
|
||||
|
||||
std::ostream & result = Session::sendRequest(request);
|
||||
result.exceptions(std::ios::badbit);
|
||||
|
||||
@ -393,6 +453,8 @@ private:
|
||||
}
|
||||
}
|
||||
response_stream = nullptr;
|
||||
Session::setSendDataHooks();
|
||||
Session::setReceiveDataHooks();
|
||||
|
||||
group->atConnectionDestroy();
|
||||
|
||||
|
@ -86,6 +86,20 @@
|
||||
M(NetworkReceiveBytes, "Total number of bytes received from network. Only ClickHouse-related network interaction is included, not by 3rd party libraries.") \
|
||||
M(NetworkSendBytes, "Total number of bytes send to network. Only ClickHouse-related network interaction is included, not by 3rd party libraries.") \
|
||||
\
|
||||
M(GlobalThreadPoolExpansions, "Counts the total number of times new threads have been added to the global thread pool. This metric indicates the frequency of expansions in the global thread pool to accommodate increased processing demands.") \
|
||||
M(GlobalThreadPoolShrinks, "Counts the total number of times the global thread pool has shrunk by removing threads. This occurs when the number of idle threads exceeds max_thread_pool_free_size, indicating adjustments in the global thread pool size in response to decreased thread utilization.") \
|
||||
M(GlobalThreadPoolThreadCreationMicroseconds, "Total time spent waiting for new threads to start.") \
|
||||
M(GlobalThreadPoolLockWaitMicroseconds, "Total time threads have spent waiting for locks in the global thread pool.") \
|
||||
M(GlobalThreadPoolJobs, "Counts the number of jobs that have been pushed to the global thread pool.") \
|
||||
M(GlobalThreadPoolJobWaitTimeMicroseconds, "Measures the elapsed time from when a job is scheduled in the thread pool to when it is picked up for execution by a worker thread. This metric helps identify delays in job processing, indicating the responsiveness of the thread pool to new tasks.") \
|
||||
M(LocalThreadPoolExpansions, "Counts the total number of times threads have been borrowed from the global thread pool to expand local thread pools.") \
|
||||
M(LocalThreadPoolShrinks, "Counts the total number of times threads have been returned to the global thread pool from local thread pools.") \
|
||||
M(LocalThreadPoolThreadCreationMicroseconds, "Total time local thread pools have spent waiting to borrow a thread from the global pool.") \
|
||||
M(LocalThreadPoolLockWaitMicroseconds, "Total time threads have spent waiting for locks in the local thread pools.") \
|
||||
M(LocalThreadPoolJobs, "Counts the number of jobs that have been pushed to the local thread pools.") \
|
||||
M(LocalThreadPoolBusyMicroseconds, "Total time threads have spent executing the actual work.") \
|
||||
M(LocalThreadPoolJobWaitTimeMicroseconds, "Measures the elapsed time from when a job is scheduled in the thread pool to when it is picked up for execution by a worker thread. This metric helps identify delays in job processing, indicating the responsiveness of the thread pool to new tasks.") \
|
||||
\
|
||||
M(DiskS3GetRequestThrottlerCount, "Number of DiskS3 GET and SELECT requests passed through throttler.") \
|
||||
M(DiskS3GetRequestThrottlerSleepMicroseconds, "Total time a query was sleeping to conform DiskS3 GET and SELECT request throttling.") \
|
||||
M(DiskS3PutRequestThrottlerCount, "Number of DiskS3 PUT, COPY, POST and LIST requests passed through throttler.") \
|
||||
@ -106,6 +120,13 @@
|
||||
M(PartsWithAppliedMutationsOnFly, "Total number of parts for which there was any mutation applied on fly") \
|
||||
M(MutationsAppliedOnFlyInAllParts, "The sum of number of applied mutations on-fly for part among all read parts") \
|
||||
\
|
||||
M(SchedulerIOReadRequests, "Resource requests passed through scheduler for IO reads.") \
|
||||
M(SchedulerIOReadBytes, "Bytes passed through scheduler for IO reads.") \
|
||||
M(SchedulerIOReadWaitMicroseconds, "Total time a query was waiting on resource requests for IO reads.") \
|
||||
M(SchedulerIOWriteRequests, "Resource requests passed through scheduler for IO writes.") \
|
||||
M(SchedulerIOWriteBytes, "Bytes passed through scheduler for IO writes.") \
|
||||
M(SchedulerIOWriteWaitMicroseconds, "Total time a query was waiting on resource requests for IO writes.") \
|
||||
\
|
||||
M(QueryMaskingRulesMatch, "Number of times query masking rules was successfully matched.") \
|
||||
\
|
||||
M(ReplicatedPartFetches, "Number of times a data part was downloaded from replica of a ReplicatedMergeTree table.") \
|
||||
|
@ -34,13 +34,16 @@ bool ProgressIndication::updateProgress(const Progress & value)
|
||||
|
||||
void ProgressIndication::resetProgress()
|
||||
{
|
||||
watch.restart();
|
||||
progress.reset();
|
||||
show_progress_bar = false;
|
||||
written_progress_chars = 0;
|
||||
write_progress_on_update = false;
|
||||
{
|
||||
std::lock_guard lock(progress_mutex);
|
||||
progress.reset();
|
||||
show_progress_bar = false;
|
||||
written_progress_chars = 0;
|
||||
write_progress_on_update = false;
|
||||
}
|
||||
{
|
||||
std::lock_guard lock(profile_events_mutex);
|
||||
watch.restart();
|
||||
cpu_usage_meter.reset(getElapsedNanoseconds());
|
||||
hosts_data.clear();
|
||||
}
|
||||
@ -90,6 +93,8 @@ ProgressIndication::MemoryUsage ProgressIndication::getMemoryUsage() const
|
||||
|
||||
void ProgressIndication::writeFinalProgress()
|
||||
{
|
||||
std::lock_guard lock(progress_mutex);
|
||||
|
||||
if (progress.read_rows < 1000)
|
||||
return;
|
||||
|
||||
@ -271,6 +276,8 @@ void ProgressIndication::writeProgress(WriteBufferFromFileDescriptor & message)
|
||||
|
||||
void ProgressIndication::clearProgressOutput(WriteBufferFromFileDescriptor & message)
|
||||
{
|
||||
std::lock_guard lock(progress_mutex);
|
||||
|
||||
if (written_progress_chars)
|
||||
{
|
||||
written_progress_chars = 0;
|
||||
|
@ -115,6 +115,8 @@ private:
|
||||
/// It is possible concurrent access to the following:
|
||||
/// - writeProgress() (class properties) (guarded with progress_mutex)
|
||||
/// - hosts_data/cpu_usage_meter (guarded with profile_events_mutex)
|
||||
///
|
||||
/// It is also possible to have more races if query is cancelled, so that clearProgressOutput() is called concurrently
|
||||
mutable std::mutex profile_events_mutex;
|
||||
mutable std::mutex progress_mutex;
|
||||
|
||||
|
@ -22,10 +22,13 @@ public:
|
||||
{}
|
||||
|
||||
// Wrapper for `enqueueRequest()` that should be used to account for available resource budget
|
||||
void enqueueRequestUsingBudget(ResourceRequest * request)
|
||||
// Returns `estimated_cost` that should be passed later to `adjustBudget()`
|
||||
[[ nodiscard ]] ResourceCost enqueueRequestUsingBudget(ResourceRequest * request)
|
||||
{
|
||||
request->cost = budget.ask(request->cost);
|
||||
ResourceCost estimated_cost = request->cost;
|
||||
request->cost = budget.ask(estimated_cost);
|
||||
enqueueRequest(request);
|
||||
return estimated_cost;
|
||||
}
|
||||
|
||||
// Should be called to account for difference between real and estimated costs
|
||||
@ -34,18 +37,6 @@ public:
|
||||
budget.adjust(estimated_cost, real_cost);
|
||||
}
|
||||
|
||||
// Adjust budget to account for extra consumption of `cost` resource units
|
||||
void consumeBudget(ResourceCost cost)
|
||||
{
|
||||
adjustBudget(0, cost);
|
||||
}
|
||||
|
||||
// Adjust budget to account for requested, but not consumed `cost` resource units
|
||||
void accumulateBudget(ResourceCost cost)
|
||||
{
|
||||
adjustBudget(cost, 0);
|
||||
}
|
||||
|
||||
/// Enqueue new request to be executed using underlying resource.
|
||||
/// Should be called outside of scheduling subsystem, implementation must be thread-safe.
|
||||
virtual void enqueueRequest(ResourceRequest * request) = 0;
|
||||
|
@ -232,12 +232,13 @@ struct ResourceTestManager : public ResourceTestBase
|
||||
ResourceTestManager & t;
|
||||
|
||||
Guard(ResourceTestManager & t_, ResourceLink link_, ResourceCost cost)
|
||||
: ResourceGuard(link_, cost, PostponeLocking)
|
||||
: ResourceGuard(ResourceGuard::Metrics::getIOWrite(), link_, cost, Lock::Defer)
|
||||
, t(t_)
|
||||
{
|
||||
t.onEnqueue(link);
|
||||
lock();
|
||||
t.onExecute(link);
|
||||
consume(cost);
|
||||
}
|
||||
};
|
||||
|
||||
@ -310,8 +311,9 @@ struct ResourceTestManager : public ResourceTestBase
|
||||
// NOTE: actually leader's request(s) make their own small busy period.
|
||||
void blockResource(ResourceLink link)
|
||||
{
|
||||
ResourceGuard g(link, 1, ResourceGuard::PostponeLocking);
|
||||
ResourceGuard g(ResourceGuard::Metrics::getIOWrite(), link, 1, ResourceGuard::Lock::Defer);
|
||||
g.lock();
|
||||
g.consume(1);
|
||||
// NOTE: at this point we assume resource to be blocked by single request (<max_requests>1</max_requests>)
|
||||
busy_period.arrive_and_wait(); // (1) notify all followers that resource is blocked
|
||||
busy_period.arrive_and_wait(); // (2) wait all followers to enqueue their requests
|
||||
@ -320,10 +322,11 @@ struct ResourceTestManager : public ResourceTestBase
|
||||
{
|
||||
getLinkData(link).left += total_requests + 1;
|
||||
busy_period.arrive_and_wait(); // (1) wait leader to block resource
|
||||
ResourceGuard g(link, cost, ResourceGuard::PostponeLocking);
|
||||
ResourceGuard g(ResourceGuard::Metrics::getIOWrite(), link, cost, ResourceGuard::Lock::Defer);
|
||||
onEnqueue(link);
|
||||
busy_period.arrive_and_wait(); // (2) notify leader to unblock
|
||||
g.lock();
|
||||
g.consume(cost);
|
||||
onExecute(link);
|
||||
}
|
||||
};
|
||||
|
@ -36,11 +36,16 @@ TEST(SchedulerDynamicResourceManager, Smoke)
|
||||
|
||||
for (int i = 0; i < 10; i++)
|
||||
{
|
||||
ResourceGuard gA(cA->get("res1"), ResourceGuard::PostponeLocking);
|
||||
ResourceGuard gA(ResourceGuard::Metrics::getIOWrite(), cA->get("res1"), 1, ResourceGuard::Lock::Defer);
|
||||
gA.lock();
|
||||
gA.consume(1);
|
||||
gA.unlock();
|
||||
|
||||
ResourceGuard gB(cB->get("res1"));
|
||||
ResourceGuard gB(ResourceGuard::Metrics::getIOWrite(), cB->get("res1"));
|
||||
gB.unlock();
|
||||
|
||||
ResourceGuard gC(ResourceGuard::Metrics::getIORead(), cB->get("res1"));
|
||||
gB.consume(2);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1,11 +1,13 @@
|
||||
#include <gtest/gtest.h>
|
||||
|
||||
#include <Common/Scheduler/SchedulerRoot.h>
|
||||
|
||||
#include <Common/Scheduler/Nodes/tests/ResourceTest.h>
|
||||
|
||||
#include <Common/Scheduler/SchedulerRoot.h>
|
||||
#include <Common/randomSeed.h>
|
||||
|
||||
#include <barrier>
|
||||
#include <future>
|
||||
#include <pcg_random.hpp>
|
||||
|
||||
using namespace DB;
|
||||
|
||||
@ -22,6 +24,17 @@ struct ResourceTest : public ResourceTestBase
|
||||
{
|
||||
scheduler.stop(true);
|
||||
}
|
||||
|
||||
std::mutex rng_mutex;
|
||||
pcg64 rng{randomSeed()};
|
||||
|
||||
template <typename T>
|
||||
T randomInt(T from, T to)
|
||||
{
|
||||
std::uniform_int_distribution<T> distribution(from, to);
|
||||
std::lock_guard lock(rng_mutex);
|
||||
return distribution(rng);
|
||||
}
|
||||
};
|
||||
|
||||
struct ResourceHolder
|
||||
@ -109,26 +122,55 @@ TEST(SchedulerRoot, Smoke)
|
||||
r2.registerResource();
|
||||
|
||||
{
|
||||
ResourceGuard rg(a);
|
||||
ResourceGuard rg(ResourceGuard::Metrics::getIOWrite(), a);
|
||||
EXPECT_TRUE(fc1->requests.contains(&rg.request));
|
||||
rg.consume(1);
|
||||
}
|
||||
|
||||
{
|
||||
ResourceGuard rg(b);
|
||||
ResourceGuard rg(ResourceGuard::Metrics::getIOWrite(), b);
|
||||
EXPECT_TRUE(fc1->requests.contains(&rg.request));
|
||||
rg.consume(1);
|
||||
}
|
||||
|
||||
{
|
||||
ResourceGuard rg(c);
|
||||
ResourceGuard rg(ResourceGuard::Metrics::getIOWrite(), c);
|
||||
EXPECT_TRUE(fc2->requests.contains(&rg.request));
|
||||
rg.consume(1);
|
||||
}
|
||||
|
||||
{
|
||||
ResourceGuard rg(d);
|
||||
ResourceGuard rg(ResourceGuard::Metrics::getIOWrite(), d);
|
||||
EXPECT_TRUE(fc2->requests.contains(&rg.request));
|
||||
rg.consume(1);
|
||||
}
|
||||
}
|
||||
|
||||
TEST(SchedulerRoot, Budget)
|
||||
{
|
||||
ResourceTest t;
|
||||
|
||||
ResourceHolder r1(t);
|
||||
r1.add<ConstraintTest>("/", "<max_requests>1</max_requests>");
|
||||
r1.add<PriorityPolicy>("/prio");
|
||||
auto a = r1.addQueue("/prio/A", "");
|
||||
r1.registerResource();
|
||||
|
||||
ResourceCost total_real_cost = 0;
|
||||
int total_requests = 10;
|
||||
for (int i = 0 ; i < total_requests; i++)
|
||||
{
|
||||
ResourceCost est_cost = t.randomInt(1, 10);
|
||||
ResourceCost real_cost = t.randomInt(0, 10);
|
||||
ResourceGuard rg(ResourceGuard::Metrics::getIOWrite(), a, est_cost);
|
||||
rg.consume(real_cost);
|
||||
total_real_cost += real_cost;
|
||||
}
|
||||
|
||||
EXPECT_EQ(total_requests, a.queue->dequeued_requests);
|
||||
EXPECT_EQ(total_real_cost, a.queue->dequeued_cost - a.queue->getBudget());
|
||||
}
|
||||
|
||||
TEST(SchedulerRoot, Cancel)
|
||||
{
|
||||
ResourceTest t;
|
||||
|
@ -1,25 +0,0 @@
|
||||
#include <Common/Scheduler/ISchedulerQueue.h>
|
||||
#include <Common/Scheduler/ResourceLink.h>
|
||||
#include <Common/Scheduler/ResourceRequest.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
void ResourceLink::adjust(ResourceCost estimated_cost, ResourceCost real_cost) const
|
||||
{
|
||||
if (queue)
|
||||
queue->adjustBudget(estimated_cost, real_cost);
|
||||
}
|
||||
|
||||
void ResourceLink::consumed(ResourceCost cost) const
|
||||
{
|
||||
if (queue)
|
||||
queue->consumeBudget(cost);
|
||||
}
|
||||
|
||||
void ResourceLink::accumulate(DB::ResourceCost cost) const
|
||||
{
|
||||
if (queue)
|
||||
queue->accumulateBudget(cost);
|
||||
}
|
||||
}
|
||||
|
@ -7,10 +7,30 @@
|
||||
#include <Common/Scheduler/ResourceRequest.h>
|
||||
#include <Common/Scheduler/ResourceLink.h>
|
||||
|
||||
#include <Common/CurrentThread.h>
|
||||
#include <Common/ProfileEvents.h>
|
||||
#include <Common/CurrentMetrics.h>
|
||||
|
||||
#include <condition_variable>
|
||||
#include <mutex>
|
||||
|
||||
|
||||
namespace ProfileEvents
|
||||
{
|
||||
extern const Event SchedulerIOReadRequests;
|
||||
extern const Event SchedulerIOReadBytes;
|
||||
extern const Event SchedulerIOReadWaitMicroseconds;
|
||||
extern const Event SchedulerIOWriteRequests;
|
||||
extern const Event SchedulerIOWriteBytes;
|
||||
extern const Event SchedulerIOWriteWaitMicroseconds;
|
||||
}
|
||||
|
||||
namespace CurrentMetrics
|
||||
{
|
||||
extern const Metric SchedulerIOReadScheduled;
|
||||
extern const Metric SchedulerIOWriteScheduled;
|
||||
}
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
@ -22,12 +42,42 @@ namespace DB
|
||||
class ResourceGuard
|
||||
{
|
||||
public:
|
||||
enum ResourceGuardCtor
|
||||
enum class Lock
|
||||
{
|
||||
LockStraightAway, /// Locks inside constructor (default)
|
||||
Default, /// Locks inside constructor
|
||||
|
||||
// WARNING: Only for tests. It is not exception-safe because `lock()` must be called after construction.
|
||||
PostponeLocking /// Don't lock in constructor, but send request
|
||||
Defer /// Don't lock in constructor, but send request
|
||||
};
|
||||
|
||||
struct Metrics
|
||||
{
|
||||
const ProfileEvents::Event requests = ProfileEvents::end();
|
||||
const ProfileEvents::Event cost = ProfileEvents::end();
|
||||
const ProfileEvents::Event wait_microseconds = ProfileEvents::end();
|
||||
const CurrentMetrics::Metric scheduled_count = CurrentMetrics::end();
|
||||
|
||||
static const Metrics * getIORead()
|
||||
{
|
||||
static Metrics metrics{
|
||||
.requests = ProfileEvents::SchedulerIOReadRequests,
|
||||
.cost = ProfileEvents::SchedulerIOReadBytes,
|
||||
.wait_microseconds = ProfileEvents::SchedulerIOReadWaitMicroseconds,
|
||||
.scheduled_count = CurrentMetrics::SchedulerIOReadScheduled
|
||||
};
|
||||
return &metrics;
|
||||
}
|
||||
|
||||
static const Metrics * getIOWrite()
|
||||
{
|
||||
static Metrics metrics{
|
||||
.requests = ProfileEvents::SchedulerIOWriteRequests,
|
||||
.cost = ProfileEvents::SchedulerIOWriteBytes,
|
||||
.wait_microseconds = ProfileEvents::SchedulerIOWriteWaitMicroseconds,
|
||||
.scheduled_count = CurrentMetrics::SchedulerIOWriteScheduled
|
||||
};
|
||||
return &metrics;
|
||||
}
|
||||
};
|
||||
|
||||
enum RequestState
|
||||
@ -46,60 +96,74 @@ public:
|
||||
chassert(state == Finished);
|
||||
state = Enqueued;
|
||||
ResourceRequest::reset(cost_);
|
||||
link_.queue->enqueueRequestUsingBudget(this);
|
||||
estimated_cost = link_.queue->enqueueRequestUsingBudget(this); // NOTE: it modifies `cost` and enqueues request
|
||||
}
|
||||
|
||||
// This function is executed inside scheduler thread and wakes thread issued this `request`.
|
||||
// That thread will continue execution and do real consumption of requested resource synchronously.
|
||||
void execute() override
|
||||
{
|
||||
{
|
||||
std::unique_lock lock(mutex);
|
||||
chassert(state == Enqueued);
|
||||
state = Dequeued;
|
||||
}
|
||||
std::unique_lock lock(mutex);
|
||||
chassert(state == Enqueued);
|
||||
state = Dequeued;
|
||||
dequeued_cv.notify_one();
|
||||
}
|
||||
|
||||
void wait()
|
||||
{
|
||||
CurrentMetrics::Increment scheduled(metrics->scheduled_count);
|
||||
auto timer = CurrentThread::getProfileEvents().timer(metrics->wait_microseconds);
|
||||
std::unique_lock lock(mutex);
|
||||
dequeued_cv.wait(lock, [this] { return state == Dequeued; });
|
||||
}
|
||||
|
||||
void finish()
|
||||
void finish(ResourceCost real_cost_, ResourceLink link_)
|
||||
{
|
||||
// lock(mutex) is not required because `Dequeued` request cannot be used by the scheduler thread
|
||||
chassert(state == Dequeued);
|
||||
state = Finished;
|
||||
if (estimated_cost != real_cost_)
|
||||
link_.queue->adjustBudget(estimated_cost, real_cost_);
|
||||
ResourceRequest::finish();
|
||||
ProfileEvents::increment(metrics->requests);
|
||||
ProfileEvents::increment(metrics->cost, real_cost_);
|
||||
}
|
||||
|
||||
static Request & local()
|
||||
void assertFinished()
|
||||
{
|
||||
// lock(mutex) is not required because `Finished` request cannot be used by the scheduler thread
|
||||
chassert(state == Finished);
|
||||
}
|
||||
|
||||
static Request & local(const Metrics * metrics)
|
||||
{
|
||||
// Since single thread cannot use more than one resource request simultaneously,
|
||||
// we can reuse thread-local request to avoid allocations
|
||||
static thread_local Request instance;
|
||||
instance.metrics = metrics;
|
||||
return instance;
|
||||
}
|
||||
|
||||
const Metrics * metrics = nullptr; // Must be initialized before use
|
||||
|
||||
private:
|
||||
ResourceCost estimated_cost = 0; // Stores initial `cost` value in case budget was used to modify it
|
||||
std::mutex mutex;
|
||||
std::condition_variable dequeued_cv;
|
||||
RequestState state = Finished;
|
||||
};
|
||||
|
||||
/// Creates pending request for resource; blocks while resource is not available (unless `PostponeLocking`)
|
||||
explicit ResourceGuard(ResourceLink link_, ResourceCost cost = 1, ResourceGuardCtor ctor = LockStraightAway)
|
||||
/// Creates pending request for resource; blocks while resource is not available (unless `Lock::Defer`)
|
||||
explicit ResourceGuard(const Metrics * metrics, ResourceLink link_, ResourceCost cost = 1, ResourceGuard::Lock type = ResourceGuard::Lock::Default)
|
||||
: link(link_)
|
||||
, request(Request::local())
|
||||
, request(Request::local(metrics))
|
||||
{
|
||||
if (cost == 0)
|
||||
link.queue = nullptr; // Ignore zero-cost requests
|
||||
else if (link.queue)
|
||||
link.reset(); // Ignore zero-cost requests
|
||||
else if (link)
|
||||
{
|
||||
request.enqueue(cost, link);
|
||||
if (ctor == LockStraightAway)
|
||||
if (type == Lock::Default)
|
||||
request.wait();
|
||||
}
|
||||
}
|
||||
@ -112,22 +176,29 @@ public:
|
||||
/// Blocks until resource is available
|
||||
void lock()
|
||||
{
|
||||
if (link.queue)
|
||||
if (link)
|
||||
request.wait();
|
||||
}
|
||||
|
||||
/// Report resource consumption has finished
|
||||
void unlock()
|
||||
void consume(ResourceCost cost)
|
||||
{
|
||||
if (link.queue)
|
||||
real_cost += cost;
|
||||
}
|
||||
|
||||
/// Report resource consumption has finished
|
||||
void unlock(ResourceCost consumed = 0)
|
||||
{
|
||||
consume(consumed);
|
||||
if (link)
|
||||
{
|
||||
request.finish();
|
||||
link.queue = nullptr;
|
||||
request.finish(real_cost, link);
|
||||
link.reset();
|
||||
}
|
||||
}
|
||||
|
||||
ResourceLink link;
|
||||
Request & request;
|
||||
ResourceCost real_cost = 0;
|
||||
};
|
||||
|
||||
}
|
||||
|
@ -13,13 +13,28 @@ using ResourceCost = Int64;
|
||||
struct ResourceLink
|
||||
{
|
||||
ISchedulerQueue * queue = nullptr;
|
||||
|
||||
bool operator==(const ResourceLink &) const = default;
|
||||
explicit operator bool() const { return queue != nullptr; }
|
||||
|
||||
void adjust(ResourceCost estimated_cost, ResourceCost real_cost) const;
|
||||
void reset()
|
||||
{
|
||||
queue = nullptr;
|
||||
}
|
||||
};
|
||||
|
||||
void consumed(ResourceCost cost) const;
|
||||
/*
|
||||
* Everything required for IO scheduling.
|
||||
* Note that raw pointer are stored inside, so make sure that `ClassifierPtr` that produced
|
||||
* resource links will outlive them. Usually classifier is stored in query `Context`.
|
||||
*/
|
||||
struct IOSchedulingSettings
|
||||
{
|
||||
ResourceLink read_resource_link;
|
||||
ResourceLink write_resource_link;
|
||||
|
||||
void accumulate(ResourceCost cost) const;
|
||||
bool operator==(const IOSchedulingSettings &) const = default;
|
||||
explicit operator bool() const { return read_resource_link && write_resource_link; }
|
||||
};
|
||||
|
||||
}
|
||||
|
@ -45,7 +45,7 @@ constexpr ResourceCost ResourceCostMax = std::numeric_limits<int>::max();
|
||||
class ResourceRequest : public boost::intrusive::list_base_hook<>
|
||||
{
|
||||
public:
|
||||
/// Cost of request execution; should be filled before request enqueueing.
|
||||
/// Cost of request execution; should be filled before request enqueueing and remain constant until `finish()`.
|
||||
/// NOTE: If cost is not known in advance, ResourceBudget should be used (note that every ISchedulerQueue has it)
|
||||
ResourceCost cost;
|
||||
|
||||
|
@ -1,4 +1,5 @@
|
||||
#include <Common/ThreadPool.h>
|
||||
#include <Common/ProfileEvents.h>
|
||||
#include <Common/setThreadName.h>
|
||||
#include <Common/Exception.h>
|
||||
#include <Common/getNumberOfPhysicalCPUCores.h>
|
||||
@ -27,6 +28,25 @@ namespace CurrentMetrics
|
||||
extern const Metric GlobalThreadScheduled;
|
||||
}
|
||||
|
||||
namespace ProfileEvents
|
||||
{
|
||||
extern const Event GlobalThreadPoolExpansions;
|
||||
extern const Event GlobalThreadPoolShrinks;
|
||||
extern const Event GlobalThreadPoolThreadCreationMicroseconds;
|
||||
extern const Event GlobalThreadPoolLockWaitMicroseconds;
|
||||
extern const Event GlobalThreadPoolJobs;
|
||||
extern const Event GlobalThreadPoolJobWaitTimeMicroseconds;
|
||||
|
||||
extern const Event LocalThreadPoolExpansions;
|
||||
extern const Event LocalThreadPoolShrinks;
|
||||
extern const Event LocalThreadPoolThreadCreationMicroseconds;
|
||||
extern const Event LocalThreadPoolLockWaitMicroseconds;
|
||||
extern const Event LocalThreadPoolJobs;
|
||||
extern const Event LocalThreadPoolBusyMicroseconds;
|
||||
extern const Event LocalThreadPoolJobWaitTimeMicroseconds;
|
||||
|
||||
}
|
||||
|
||||
class JobWithPriority
|
||||
{
|
||||
public:
|
||||
@ -40,6 +60,7 @@ public:
|
||||
/// Call stacks of all jobs' schedulings leading to this one
|
||||
std::vector<StackTrace::FramePointers> frame_pointers;
|
||||
bool enable_job_stack_trace = false;
|
||||
Stopwatch job_create_time;
|
||||
|
||||
JobWithPriority(
|
||||
Job job_, Priority priority_, CurrentMetrics::Metric metric,
|
||||
@ -59,6 +80,13 @@ public:
|
||||
{
|
||||
return priority > rhs.priority; // Reversed for `priority_queue` max-heap to yield minimum value (i.e. highest priority) first
|
||||
}
|
||||
|
||||
UInt64 elapsedMicroseconds() const
|
||||
{
|
||||
return job_create_time.elapsedMicroseconds();
|
||||
}
|
||||
|
||||
|
||||
};
|
||||
|
||||
static constexpr auto DEFAULT_THREAD_NAME = "ThreadPool";
|
||||
@ -180,14 +208,18 @@ ReturnType ThreadPoolImpl<Thread>::scheduleImpl(Job job, Priority priority, std:
|
||||
};
|
||||
|
||||
{
|
||||
Stopwatch watch;
|
||||
std::unique_lock lock(mutex);
|
||||
ProfileEvents::increment(
|
||||
std::is_same_v<Thread, std::thread> ? ProfileEvents::GlobalThreadPoolLockWaitMicroseconds : ProfileEvents::LocalThreadPoolLockWaitMicroseconds,
|
||||
watch.elapsedMicroseconds());
|
||||
|
||||
if (CannotAllocateThreadFaultInjector::injectFault())
|
||||
return on_error("fault injected");
|
||||
|
||||
auto pred = [this] { return !queue_size || scheduled_jobs < queue_size || shutdown; };
|
||||
|
||||
if (wait_microseconds) /// Check for optional. Condition is true if the optional is set and the value is zero.
|
||||
if (wait_microseconds) /// Check for optional. Condition is true if the optional is set. Even if the value is zero.
|
||||
{
|
||||
if (!job_finished.wait_for(lock, std::chrono::microseconds(*wait_microseconds), pred))
|
||||
return on_error(fmt::format("no free thread (timeout={})", *wait_microseconds));
|
||||
@ -216,7 +248,13 @@ ReturnType ThreadPoolImpl<Thread>::scheduleImpl(Job job, Priority priority, std:
|
||||
|
||||
try
|
||||
{
|
||||
Stopwatch watch2;
|
||||
threads.front() = Thread([this, it = threads.begin()] { worker(it); });
|
||||
ProfileEvents::increment(
|
||||
std::is_same_v<Thread, std::thread> ? ProfileEvents::GlobalThreadPoolThreadCreationMicroseconds : ProfileEvents::LocalThreadPoolThreadCreationMicroseconds,
|
||||
watch2.elapsedMicroseconds());
|
||||
ProfileEvents::increment(
|
||||
std::is_same_v<Thread, std::thread> ? ProfileEvents::GlobalThreadPoolExpansions : ProfileEvents::LocalThreadPoolExpansions);
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
@ -239,6 +277,8 @@ ReturnType ThreadPoolImpl<Thread>::scheduleImpl(Job job, Priority priority, std:
|
||||
/// Wake up a free thread to run the new job.
|
||||
new_job_or_shutdown.notify_one();
|
||||
|
||||
ProfileEvents::increment(std::is_same_v<Thread, std::thread> ? ProfileEvents::GlobalThreadPoolJobs : ProfileEvents::LocalThreadPoolJobs);
|
||||
|
||||
return static_cast<ReturnType>(true);
|
||||
}
|
||||
|
||||
@ -262,7 +302,14 @@ void ThreadPoolImpl<Thread>::startNewThreadsNoLock()
|
||||
|
||||
try
|
||||
{
|
||||
Stopwatch watch;
|
||||
threads.front() = Thread([this, it = threads.begin()] { worker(it); });
|
||||
ProfileEvents::increment(
|
||||
std::is_same_v<Thread, std::thread> ? ProfileEvents::GlobalThreadPoolThreadCreationMicroseconds : ProfileEvents::LocalThreadPoolThreadCreationMicroseconds,
|
||||
watch.elapsedMicroseconds());
|
||||
ProfileEvents::increment(
|
||||
std::is_same_v<Thread, std::thread> ? ProfileEvents::GlobalThreadPoolExpansions : ProfileEvents::LocalThreadPoolExpansions);
|
||||
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
@ -293,7 +340,11 @@ void ThreadPoolImpl<Thread>::scheduleOrThrow(Job job, Priority priority, uint64_
|
||||
template <typename Thread>
|
||||
void ThreadPoolImpl<Thread>::wait()
|
||||
{
|
||||
Stopwatch watch;
|
||||
std::unique_lock lock(mutex);
|
||||
ProfileEvents::increment(
|
||||
std::is_same_v<Thread, std::thread> ? ProfileEvents::GlobalThreadPoolLockWaitMicroseconds : ProfileEvents::LocalThreadPoolLockWaitMicroseconds,
|
||||
watch.elapsedMicroseconds());
|
||||
/// Signal here just in case.
|
||||
/// If threads are waiting on condition variables, but there are some jobs in the queue
|
||||
/// then it will prevent us from deadlock.
|
||||
@ -334,7 +385,11 @@ void ThreadPoolImpl<Thread>::finalize()
|
||||
|
||||
/// Wait for all currently running jobs to finish (we don't wait for all scheduled jobs here like the function wait() does).
|
||||
for (auto & thread : threads)
|
||||
{
|
||||
thread.join();
|
||||
ProfileEvents::increment(
|
||||
std::is_same_v<Thread, std::thread> ? ProfileEvents::GlobalThreadPoolShrinks : ProfileEvents::LocalThreadPoolShrinks);
|
||||
}
|
||||
|
||||
threads.clear();
|
||||
}
|
||||
@ -391,7 +446,11 @@ void ThreadPoolImpl<Thread>::worker(typename std::list<Thread>::iterator thread_
|
||||
std::optional<JobWithPriority> job_data;
|
||||
|
||||
{
|
||||
Stopwatch watch;
|
||||
std::unique_lock lock(mutex);
|
||||
ProfileEvents::increment(
|
||||
std::is_same_v<Thread, std::thread> ? ProfileEvents::GlobalThreadPoolLockWaitMicroseconds : ProfileEvents::LocalThreadPoolLockWaitMicroseconds,
|
||||
watch.elapsedMicroseconds());
|
||||
|
||||
// Finish with previous job if any
|
||||
if (job_is_done)
|
||||
@ -424,6 +483,8 @@ void ThreadPoolImpl<Thread>::worker(typename std::list<Thread>::iterator thread_
|
||||
{
|
||||
thread_it->detach();
|
||||
threads.erase(thread_it);
|
||||
ProfileEvents::increment(
|
||||
std::is_same_v<Thread, std::thread> ? ProfileEvents::GlobalThreadPoolShrinks : ProfileEvents::LocalThreadPoolShrinks);
|
||||
}
|
||||
return;
|
||||
}
|
||||
@ -433,6 +494,10 @@ void ThreadPoolImpl<Thread>::worker(typename std::list<Thread>::iterator thread_
|
||||
job_data = std::move(const_cast<JobWithPriority &>(jobs.top()));
|
||||
jobs.pop();
|
||||
|
||||
ProfileEvents::increment(
|
||||
std::is_same_v<Thread, std::thread> ? ProfileEvents::GlobalThreadPoolJobWaitTimeMicroseconds : ProfileEvents::LocalThreadPoolJobWaitTimeMicroseconds,
|
||||
job_data->elapsedMicroseconds());
|
||||
|
||||
/// We don't run jobs after `shutdown` is set, but we have to properly dequeue all jobs and finish them.
|
||||
if (shutdown)
|
||||
{
|
||||
@ -459,7 +524,22 @@ void ThreadPoolImpl<Thread>::worker(typename std::list<Thread>::iterator thread_
|
||||
|
||||
CurrentMetrics::Increment metric_active_pool_threads(metric_active_threads);
|
||||
|
||||
job_data->job();
|
||||
if constexpr (!std::is_same_v<Thread, std::thread>)
|
||||
{
|
||||
Stopwatch watch;
|
||||
job_data->job();
|
||||
// This metric is less relevant for the global thread pool, as it would show large values (time while
|
||||
// a thread was used by local pools) and increment only when local pools are destroyed.
|
||||
//
|
||||
// In cases where global pool threads are used directly (without a local thread pool), distinguishing
|
||||
// them is difficult.
|
||||
ProfileEvents::increment(ProfileEvents::LocalThreadPoolBusyMicroseconds, watch.elapsedMicroseconds());
|
||||
}
|
||||
else
|
||||
{
|
||||
job_data->job();
|
||||
}
|
||||
|
||||
|
||||
if (thread_trace_context.root_span.isTraceEnabled())
|
||||
{
|
||||
|
@ -131,7 +131,7 @@ private:
|
||||
bool threads_remove_themselves = true;
|
||||
const bool shutdown_on_exception = true;
|
||||
|
||||
boost::heap::priority_queue<JobWithPriority> jobs;
|
||||
boost::heap::priority_queue<JobWithPriority,boost::heap::stable<true>> jobs;
|
||||
std::list<Thread> threads;
|
||||
std::exception_ptr first_exception;
|
||||
std::stack<OnDestroyCallback> on_destroy_callbacks;
|
||||
|
@ -7,11 +7,11 @@
|
||||
#include <Common/MemoryTracker.h>
|
||||
#include <Common/ProfileEvents.h>
|
||||
#include <Common/Stopwatch.h>
|
||||
#include <Common/Scheduler/ResourceLink.h>
|
||||
|
||||
#include <boost/noncopyable.hpp>
|
||||
|
||||
#include <functional>
|
||||
#include <map>
|
||||
#include <memory>
|
||||
#include <mutex>
|
||||
#include <unordered_set>
|
||||
@ -188,6 +188,10 @@ public:
|
||||
Progress progress_in;
|
||||
Progress progress_out;
|
||||
|
||||
/// IO scheduling
|
||||
ResourceLink read_resource_link;
|
||||
ResourceLink write_resource_link;
|
||||
|
||||
private:
|
||||
/// Group of threads, to which this thread attached
|
||||
ThreadGroupPtr thread_group;
|
||||
|
69
src/Common/tests/gtest_config_host_validation.cpp
Normal file
69
src/Common/tests/gtest_config_host_validation.cpp
Normal file
@ -0,0 +1,69 @@
|
||||
#include <Poco/AutoPtr.h>
|
||||
#include <Poco/DOM/DOMParser.h>
|
||||
#include <Poco/Util/XMLConfiguration.h>
|
||||
|
||||
#include <gtest/gtest.h>
|
||||
|
||||
TEST(Common, ConfigHostValidation)
|
||||
{
|
||||
std::string xml(R"CONFIG(<clickhouse>
|
||||
<IPv4_1>0.0.0.0</IPv4_1>
|
||||
<IPv4_2>192.168.0.1</IPv4_2>
|
||||
<IPv4_3>127.0.0.1</IPv4_3>
|
||||
<IPv4_4>255.255.255.255</IPv4_4>
|
||||
<IPv6_1>2001:0db8:85a3:0000:0000:8a2e:0370:7334</IPv6_1>
|
||||
<IPv6_2>2001:DB8::8a2e:370:7334</IPv6_2>
|
||||
<IPv6_3>::1</IPv6_3>
|
||||
<IPv6_4>::</IPv6_4>
|
||||
<Domain_1>www.example.com.</Domain_1>
|
||||
<Domain_2>a.co</Domain_2>
|
||||
<Domain_3>localhost</Domain_3>
|
||||
<Domain_4>xn--fiqs8s.xn--fiqz9s</Domain_4>
|
||||
<IPv4_Invalid_1>192.168.1.256</IPv4_Invalid_1>
|
||||
<IPv4_Invalid_2>192.168.1.1.1</IPv4_Invalid_2>
|
||||
<IPv4_Invalid_3>192.168.1.99999999999999999999</IPv4_Invalid_3>
|
||||
<IPv4_Invalid_4>192.168.1.a</IPv4_Invalid_4>
|
||||
<IPv6_Invalid_1>2001:0db8:85a3:::8a2e:0370:7334</IPv6_Invalid_1>
|
||||
<IPv6_Invalid_2>1200::AB00:1234::2552:7777:1313</IPv6_Invalid_2>
|
||||
<IPv6_Invalid_3>1200::AB00:1234:Q000:2552:7777:1313</IPv6_Invalid_3>
|
||||
<IPv6_Invalid_4>1200:AB00:1234:2552:7777:1313:FFFF</IPv6_Invalid_4>
|
||||
<Domain_Invalid_1>example.com..</Domain_Invalid_1>
|
||||
<Domain_Invalid_2>5example.com</Domain_Invalid_2>
|
||||
<Domain_Invalid_3>example.com-</Domain_Invalid_3>
|
||||
<Domain_Invalid_4>exa_mple.com</Domain_Invalid_4>
|
||||
</clickhouse>)CONFIG");
|
||||
|
||||
Poco::XML::DOMParser dom_parser;
|
||||
Poco::AutoPtr<Poco::XML::Document> document = dom_parser.parseString(xml);
|
||||
Poco::AutoPtr<Poco::Util::XMLConfiguration> config = new Poco::Util::XMLConfiguration(document);
|
||||
|
||||
EXPECT_NO_THROW(config->getHost("IPv4_1"));
|
||||
EXPECT_NO_THROW(config->getHost("IPv4_2"));
|
||||
EXPECT_NO_THROW(config->getHost("IPv4_3"));
|
||||
EXPECT_NO_THROW(config->getHost("IPv4_4"));
|
||||
|
||||
EXPECT_NO_THROW(config->getHost("IPv6_1"));
|
||||
EXPECT_NO_THROW(config->getHost("IPv6_2"));
|
||||
EXPECT_NO_THROW(config->getHost("IPv6_3"));
|
||||
EXPECT_NO_THROW(config->getHost("IPv6_4"));
|
||||
|
||||
EXPECT_NO_THROW(config->getHost("Domain_1"));
|
||||
EXPECT_NO_THROW(config->getHost("Domain_2"));
|
||||
EXPECT_NO_THROW(config->getHost("Domain_3"));
|
||||
EXPECT_NO_THROW(config->getHost("Domain_4"));
|
||||
|
||||
EXPECT_THROW(config->getHost("IPv4_Invalid_1"), Poco::SyntaxException);
|
||||
EXPECT_THROW(config->getHost("IPv4_Invalid_2"), Poco::SyntaxException);
|
||||
EXPECT_THROW(config->getHost("IPv4_Invalid_3"), Poco::SyntaxException);
|
||||
EXPECT_THROW(config->getHost("IPv4_Invalid_4"), Poco::SyntaxException);
|
||||
|
||||
EXPECT_THROW(config->getHost("IPv6_Invalid_1"), Poco::SyntaxException);
|
||||
EXPECT_THROW(config->getHost("IPv6_Invalid_2"), Poco::SyntaxException);
|
||||
EXPECT_THROW(config->getHost("IPv6_Invalid_3"), Poco::SyntaxException);
|
||||
EXPECT_THROW(config->getHost("IPv6_Invalid_4"), Poco::SyntaxException);
|
||||
|
||||
EXPECT_THROW(config->getHost("Domain_Invalid_1"), Poco::SyntaxException);
|
||||
EXPECT_THROW(config->getHost("Domain_Invalid_2"), Poco::SyntaxException);
|
||||
EXPECT_THROW(config->getHost("Domain_Invalid_3"), Poco::SyntaxException);
|
||||
EXPECT_THROW(config->getHost("Domain_Invalid_4"), Poco::SyntaxException);
|
||||
}
|
@ -17,11 +17,12 @@
|
||||
|
||||
#include <Core/ExternalTable.h>
|
||||
#include <Core/Settings.h>
|
||||
#include <Poco/Net/MessageHeader.h>
|
||||
#include <Parsers/ASTNameTypePair.h>
|
||||
#include <Parsers/IdentifierQuotingStyle.h>
|
||||
#include <Parsers/ParserCreateQuery.h>
|
||||
#include <Parsers/parseQuery.h>
|
||||
#include <base/scope_guard.h>
|
||||
#include <Poco/Net/MessageHeader.h>
|
||||
|
||||
|
||||
namespace DB
|
||||
@ -85,7 +86,15 @@ void BaseExternalTable::parseStructureFromStructureField(const std::string & arg
|
||||
/// We use `formatWithPossiblyHidingSensitiveData` instead of `getColumnNameWithoutAlias` because `column->type` is an ASTFunction.
|
||||
/// `getColumnNameWithoutAlias` will return name of the function with `(arguments)` even if arguments is empty.
|
||||
if (column)
|
||||
structure.emplace_back(column->name, column->type->formatWithPossiblyHidingSensitiveData(0, true, true, false));
|
||||
structure.emplace_back(
|
||||
column->name,
|
||||
column->type->formatWithPossiblyHidingSensitiveData(
|
||||
/*max_length=*/0,
|
||||
/*one_line=*/true,
|
||||
/*show_secrets=*/true,
|
||||
/*print_pretty_type_names=*/false,
|
||||
/*always_quote_identifiers=*/false,
|
||||
/*identifier_quoting_style=*/IdentifierQuotingStyle::Backticks));
|
||||
else
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Error while parsing table structure: expected column definition, got {}", child->formatForErrorMessage());
|
||||
}
|
||||
@ -102,7 +111,15 @@ void BaseExternalTable::parseStructureFromTypesField(const std::string & argumen
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Error while parsing table structure: {}", error);
|
||||
|
||||
for (size_t i = 0; i < type_list_raw->children.size(); ++i)
|
||||
structure.emplace_back("_" + toString(i + 1), type_list_raw->children[i]->formatWithPossiblyHidingSensitiveData(0, true, true, false));
|
||||
structure.emplace_back(
|
||||
"_" + toString(i + 1),
|
||||
type_list_raw->children[i]->formatWithPossiblyHidingSensitiveData(
|
||||
/*max_length=*/0,
|
||||
/*one_line=*/true,
|
||||
/*show_secrets=*/true,
|
||||
/*print_pretty_type_names=*/false,
|
||||
/*always_quote_identifiers=*/false,
|
||||
/*identifier_quoting_style=*/IdentifierQuotingStyle::Backticks));
|
||||
}
|
||||
|
||||
void BaseExternalTable::initSampleBlock()
|
||||
|
@ -1296,6 +1296,9 @@ class IColumn;
|
||||
M(Bool, precise_float_parsing, false, "Prefer more precise (but slower) float parsing algorithm", 0) \
|
||||
M(DateTimeOverflowBehavior, date_time_overflow_behavior, "ignore", "Overflow mode for Date, Date32, DateTime, DateTime64 types. Possible values: 'ignore', 'throw', 'saturate'.", 0) \
|
||||
M(Bool, validate_experimental_and_suspicious_types_inside_nested_types, true, "Validate usage of experimental and suspicious types inside nested types like Array/Map/Tuple", 0) \
|
||||
\
|
||||
M(Bool, output_format_always_quote_identifiers, false, "Always quote identifiers", 0) \
|
||||
M(IdentifierQuotingStyle, output_format_identifier_quoting_style, IdentifierQuotingStyle::Backticks, "Set the quoting style for identifiers", 0) \
|
||||
|
||||
|
||||
// End of FORMAT_FACTORY_SETTINGS
|
||||
|
@ -75,6 +75,8 @@ static std::initializer_list<std::pair<ClickHouseVersion, SettingsChangesHistory
|
||||
{"join_output_by_rowlist_perkey_rows_threshold", 0, 5, "The lower limit of per-key average rows in the right table to determine whether to output by row list in hash join."},
|
||||
{"create_if_not_exists", false, false, "New setting."},
|
||||
{"allow_materialized_view_with_bad_select", true, true, "Support (but not enable yet) stricter validation in CREATE MATERIALIZED VIEW"},
|
||||
{"output_format_always_quote_identifiers", false, false, "New setting."},
|
||||
{"output_format_identifier_quoting_style", "Backticks", "Backticks", "New setting."}
|
||||
}
|
||||
},
|
||||
{"24.8",
|
||||
|
@ -244,4 +244,10 @@ IMPLEMENT_SETTING_ENUM(
|
||||
GroupArrayActionWhenLimitReached,
|
||||
ErrorCodes::BAD_ARGUMENTS,
|
||||
{{"throw", GroupArrayActionWhenLimitReached::THROW}, {"discard", GroupArrayActionWhenLimitReached::DISCARD}})
|
||||
|
||||
IMPLEMENT_SETTING_ENUM(IdentifierQuotingStyle, ErrorCodes::BAD_ARGUMENTS,
|
||||
{{"None", IdentifierQuotingStyle::None},
|
||||
{"Backticks", IdentifierQuotingStyle::Backticks},
|
||||
{"DoubleQuotes", IdentifierQuotingStyle::DoubleQuotes},
|
||||
{"BackticksMySQL", IdentifierQuotingStyle::BackticksMySQL}})
|
||||
}
|
||||
|
@ -10,6 +10,7 @@
|
||||
#include <Formats/FormatSettings.h>
|
||||
#include <IO/ReadSettings.h>
|
||||
#include <Parsers/ASTSQLSecurity.h>
|
||||
#include <Parsers/IdentifierQuotingStyle.h>
|
||||
#include <QueryPipeline/SizeLimits.h>
|
||||
#include <Common/ShellCommandSettings.h>
|
||||
|
||||
@ -351,6 +352,8 @@ DECLARE_SETTING_ENUM_WITH_RENAME(DateTimeOverflowBehavior, FormatSettings::DateT
|
||||
|
||||
DECLARE_SETTING_ENUM(SQLSecurityType)
|
||||
|
||||
DECLARE_SETTING_ENUM(IdentifierQuotingStyle)
|
||||
|
||||
enum class GroupArrayActionWhenLimitReached : uint8_t
|
||||
{
|
||||
THROW,
|
||||
|
@ -210,7 +210,7 @@ namespace
|
||||
{
|
||||
UInt64 stringToMaxThreads(const String & str)
|
||||
{
|
||||
if (startsWith(str, "auto"))
|
||||
if (startsWith(str, "auto") || startsWith(str, "'auto"))
|
||||
return 0;
|
||||
return parseFromString<UInt64>(str);
|
||||
}
|
||||
@ -237,7 +237,8 @@ SettingFieldMaxThreads & SettingFieldMaxThreads::operator=(const Field & f)
|
||||
String SettingFieldMaxThreads::toString() const
|
||||
{
|
||||
if (is_auto)
|
||||
return "auto(" + ::DB::toString(value) + ")";
|
||||
/// Removing quotes here will introduce an incompatibility between replicas with different versions.
|
||||
return "'auto(" + ::DB::toString(value) + ")'";
|
||||
else
|
||||
return ::DB::toString(value);
|
||||
}
|
||||
|
@ -519,10 +519,10 @@ static DataTypePtr createJSON(const ASTPtr & arguments)
|
||||
if (!context)
|
||||
context = Context::getGlobalContextInstance();
|
||||
|
||||
if (context->getSettingsRef().use_json_alias_for_old_object_type)
|
||||
if (context->getSettingsRef().allow_experimental_object_type && context->getSettingsRef().use_json_alias_for_old_object_type)
|
||||
{
|
||||
if (arguments && !arguments->children.empty())
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Experimental Object type doesn't support any arguments. If you want to use new JSON type, set setting allow_experimental_json_type = 1");
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Experimental Object type doesn't support any arguments. If you want to use new JSON type, set settings allow_experimental_json_type = 1 and use_json_alias_for_old_object_type = 0");
|
||||
|
||||
return std::make_shared<DataTypeObjectDeprecated>("JSON", false);
|
||||
}
|
||||
|
@ -8,6 +8,7 @@
|
||||
#include <IO/ReadBufferFromString.h>
|
||||
#include <Common/logger_useful.h>
|
||||
#include <Common/Throttler.h>
|
||||
#include <Common/Scheduler/ResourceGuard.h>
|
||||
#include <base/sleep.h>
|
||||
#include <Common/ProfileEvents.h>
|
||||
#include <IO/SeekableReadBuffer.h>
|
||||
@ -113,7 +114,9 @@ bool ReadBufferFromAzureBlobStorage::nextImpl()
|
||||
{
|
||||
try
|
||||
{
|
||||
ResourceGuard rlock(ResourceGuard::Metrics::getIORead(), read_settings.io_scheduling.read_resource_link, to_read_bytes);
|
||||
bytes_read = data_stream->ReadToCount(reinterpret_cast<uint8_t *>(data_ptr), to_read_bytes);
|
||||
rlock.unlock(bytes_read); // Do not hold resource under bandwidth throttler
|
||||
if (read_settings.remote_throttler)
|
||||
read_settings.remote_throttler->add(bytes_read, ProfileEvents::RemoteReadThrottlerBytes, ProfileEvents::RemoteReadThrottlerSleepMicroseconds);
|
||||
break;
|
||||
|
@ -101,15 +101,13 @@ void WriteBufferFromAzureBlobStorage::execWithRetry(std::function<void()> func,
|
||||
{
|
||||
try
|
||||
{
|
||||
ResourceGuard rlock(write_settings.resource_link, cost); // Note that zero-cost requests are ignored
|
||||
ResourceGuard rlock(ResourceGuard::Metrics::getIOWrite(), write_settings.io_scheduling.write_resource_link, cost); // Note that zero-cost requests are ignored
|
||||
func();
|
||||
rlock.unlock(cost);
|
||||
break;
|
||||
}
|
||||
catch (const Azure::Core::RequestFailedException & e)
|
||||
{
|
||||
if (cost)
|
||||
write_settings.resource_link.accumulate(cost); // Accumulate resource for later use, because we have failed to consume it
|
||||
|
||||
if (i == num_tries - 1 || !isRetryableAzureException(e))
|
||||
throw;
|
||||
|
||||
@ -117,8 +115,6 @@ void WriteBufferFromAzureBlobStorage::execWithRetry(std::function<void()> func,
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
if (cost)
|
||||
write_settings.resource_link.accumulate(cost); // We assume no resource was used in case of failure
|
||||
throw;
|
||||
}
|
||||
}
|
||||
|
@ -461,14 +461,17 @@ DiskObjectStoragePtr DiskObjectStorage::createDiskObjectStorage()
|
||||
}
|
||||
|
||||
template <class Settings>
|
||||
static inline Settings updateResourceLink(const Settings & settings, const String & resource_name)
|
||||
static inline Settings updateIOSchedulingSettings(const Settings & settings, const String & read_resource_name, const String & write_resource_name)
|
||||
{
|
||||
if (resource_name.empty())
|
||||
if (read_resource_name.empty() && write_resource_name.empty())
|
||||
return settings;
|
||||
if (auto query_context = CurrentThread::getQueryContext())
|
||||
{
|
||||
Settings result(settings);
|
||||
result.resource_link = query_context->getWorkloadClassifier()->get(resource_name);
|
||||
if (!read_resource_name.empty())
|
||||
result.io_scheduling.read_resource_link = query_context->getWorkloadClassifier()->get(read_resource_name);
|
||||
if (!write_resource_name.empty())
|
||||
result.io_scheduling.write_resource_link = query_context->getWorkloadClassifier()->get(write_resource_name);
|
||||
return result;
|
||||
}
|
||||
return settings;
|
||||
@ -500,7 +503,7 @@ std::unique_ptr<ReadBufferFromFileBase> DiskObjectStorage::readFile(
|
||||
|
||||
return object_storage->readObjects(
|
||||
storage_objects,
|
||||
updateResourceLink(settings, getReadResourceName()),
|
||||
updateIOSchedulingSettings(settings, getReadResourceName(), getWriteResourceName()),
|
||||
read_hint,
|
||||
file_size);
|
||||
}
|
||||
@ -513,7 +516,7 @@ std::unique_ptr<WriteBufferFromFileBase> DiskObjectStorage::writeFile(
|
||||
{
|
||||
LOG_TEST(log, "Write file: {}", path);
|
||||
|
||||
WriteSettings write_settings = updateResourceLink(settings, getWriteResourceName());
|
||||
WriteSettings write_settings = updateIOSchedulingSettings(settings, getReadResourceName(), getWriteResourceName());
|
||||
auto transaction = createObjectStorageTransaction();
|
||||
return transaction->writeFile(path, buf_size, mode, write_settings);
|
||||
}
|
||||
|
@ -406,7 +406,7 @@ void UserDefinedSQLObjectsZooKeeperStorage::syncObjects(const zkutil::ZooKeeperP
|
||||
LOG_DEBUG(log, "Syncing user-defined {} objects", object_type);
|
||||
Strings object_names = getObjectNamesAndSetWatch(zookeeper, object_type);
|
||||
|
||||
getLock();
|
||||
auto lock = getLock();
|
||||
|
||||
/// Remove stale objects
|
||||
removeAllObjectsExcept(object_names);
|
||||
|
@ -2,6 +2,7 @@
|
||||
#include <Columns/ColumnString.h>
|
||||
#include <Columns/ColumnsNumber.h>
|
||||
#include <DataTypes/DataTypeDate.h>
|
||||
#include <DataTypes/DataTypeDate32.h>
|
||||
#include <DataTypes/DataTypeDateTime.h>
|
||||
#include <DataTypes/DataTypeInterval.h>
|
||||
#include <Formats/FormatSettings.h>
|
||||
@ -43,6 +44,7 @@ public:
|
||||
enum ResultType
|
||||
{
|
||||
Date,
|
||||
Date32,
|
||||
DateTime,
|
||||
DateTime64,
|
||||
};
|
||||
@ -75,15 +77,15 @@ public:
|
||||
|
||||
bool second_argument_is_date = false;
|
||||
auto check_second_argument = [&] {
|
||||
if (!isDate(arguments[1].type) && !isDateTime(arguments[1].type) && !isDateTime64(arguments[1].type))
|
||||
if (!isDateOrDate32(arguments[1].type) && !isDateTime(arguments[1].type) && !isDateTime64(arguments[1].type))
|
||||
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Illegal type {} of 2nd argument of function {}. "
|
||||
"Should be a date or a date with time", arguments[1].type->getName(), getName());
|
||||
|
||||
second_argument_is_date = isDate(arguments[1].type);
|
||||
second_argument_is_date = isDateOrDate32(arguments[1].type);
|
||||
|
||||
if (second_argument_is_date && ((datepart_kind == IntervalKind::Kind::Hour)
|
||||
|| (datepart_kind == IntervalKind::Kind::Minute) || (datepart_kind == IntervalKind::Kind::Second)))
|
||||
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Illegal type Date of argument for function {}", getName());
|
||||
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Illegal type {} of argument for function {}", arguments[1].type->getName(), getName());
|
||||
};
|
||||
|
||||
auto check_timezone_argument = [&] {
|
||||
@ -119,6 +121,8 @@ public:
|
||||
|
||||
if (result_type == ResultType::Date)
|
||||
return std::make_shared<DataTypeDate>();
|
||||
if (result_type == ResultType::Date32)
|
||||
return std::make_shared<DataTypeDate32>();
|
||||
else if (result_type == ResultType::DateTime)
|
||||
return std::make_shared<DataTypeDateTime>(extractTimeZoneNameFromFunctionArguments(arguments, 2, 1, false));
|
||||
else
|
||||
|
@ -44,9 +44,9 @@ public:
|
||||
auto check_first_argument = [&]
|
||||
{
|
||||
const DataTypePtr & type_arg1 = arguments[0].type;
|
||||
if (!isDate(type_arg1) && !isDateTime(type_arg1) && !isDateTime64(type_arg1))
|
||||
if (!isDateOrDate32(type_arg1) && !isDateTime(type_arg1) && !isDateTime64(type_arg1))
|
||||
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT,
|
||||
"Illegal type {} of 1st argument of function {}, expected a Date, DateTime or DateTime64",
|
||||
"Illegal type {} of 1st argument of function {}, expected a Date, Date32, DateTime or DateTime64",
|
||||
type_arg1->getName(), getName());
|
||||
value_is_date = isDate(type_arg1);
|
||||
};
|
||||
@ -56,6 +56,7 @@ public:
|
||||
enum class ResultType : uint8_t
|
||||
{
|
||||
Date,
|
||||
Date32,
|
||||
DateTime,
|
||||
DateTime64
|
||||
};
|
||||
@ -128,6 +129,8 @@ public:
|
||||
{
|
||||
case ResultType::Date:
|
||||
return std::make_shared<DataTypeDate>();
|
||||
case ResultType::Date32:
|
||||
return std::make_shared<DataTypeDate32>();
|
||||
case ResultType::DateTime:
|
||||
return std::make_shared<DataTypeDateTime>(extractTimeZoneNameFromFunctionArguments(arguments, 2, 0, false));
|
||||
case ResultType::DateTime64:
|
||||
@ -185,7 +188,13 @@ private:
|
||||
if (time_column_vec)
|
||||
return dispatchForIntervalColumn(assert_cast<const DataTypeDate &>(time_column_type), *time_column_vec, interval_column, result_type, time_zone, input_rows_count);
|
||||
}
|
||||
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Illegal column for 1st argument of function {}, expected a Date, DateTime or DateTime64", getName());
|
||||
else if (isDate32(time_column_type))
|
||||
{
|
||||
const auto * time_column_vec = checkAndGetColumn<ColumnDate32>(&time_column_col);
|
||||
if (time_column_vec)
|
||||
return dispatchForIntervalColumn(assert_cast<const DataTypeDate32 &>(time_column_type), *time_column_vec, interval_column, result_type, time_zone, input_rows_count);
|
||||
}
|
||||
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Illegal column for 1st argument of function {}, expected a Date, Date32, DateTime or DateTime64", getName());
|
||||
}
|
||||
|
||||
template <typename TimeDataType, typename TimeColumnType>
|
||||
|
@ -6,7 +6,6 @@
|
||||
|
||||
#include <IO/ReadBufferFromIStream.h>
|
||||
#include <IO/ReadBufferFromS3.h>
|
||||
#include <Common/Scheduler/ResourceGuard.h>
|
||||
#include <IO/S3/getObjectInfo.h>
|
||||
#include <IO/S3/Requests.h>
|
||||
|
||||
@ -423,22 +422,13 @@ Aws::S3::Model::GetObjectResult ReadBufferFromS3::sendRequest(size_t attempt, si
|
||||
ProfileEventTimeIncrement<Microseconds> watch(ProfileEvents::ReadBufferFromS3InitMicroseconds);
|
||||
|
||||
// We do not know in advance how many bytes we are going to consume, to avoid blocking estimated it from below
|
||||
constexpr ResourceCost estimated_cost = 1;
|
||||
ResourceGuard rlock(read_settings.resource_link, estimated_cost);
|
||||
|
||||
CurrentThread::IOScope io_scope(read_settings.io_scheduling);
|
||||
Aws::S3::Model::GetObjectOutcome outcome = client_ptr->GetObject(req);
|
||||
|
||||
rlock.unlock();
|
||||
|
||||
if (outcome.IsSuccess())
|
||||
{
|
||||
ResourceCost bytes_read = outcome.GetResult().GetContentLength();
|
||||
read_settings.resource_link.adjust(estimated_cost, bytes_read);
|
||||
return outcome.GetResultWithOwnership();
|
||||
}
|
||||
else
|
||||
{
|
||||
read_settings.resource_link.accumulate(estimated_cost);
|
||||
const auto & error = outcome.GetError();
|
||||
throw S3Exception(error.GetMessage(), error.GetErrorType());
|
||||
}
|
||||
|
@ -118,8 +118,7 @@ struct ReadSettings
|
||||
ThrottlerPtr remote_throttler;
|
||||
ThrottlerPtr local_throttler;
|
||||
|
||||
// Resource to be used during reading
|
||||
ResourceLink resource_link;
|
||||
IOSchedulingSettings io_scheduling;
|
||||
|
||||
size_t http_max_tries = 10;
|
||||
size_t http_retry_initial_backoff_ms = 100;
|
||||
|
@ -11,7 +11,6 @@
|
||||
#include <Common/Throttler.h>
|
||||
#include <Interpreters/Cache/FileCache.h>
|
||||
|
||||
#include <Common/Scheduler/ResourceGuard.h>
|
||||
#include <IO/WriteHelpers.h>
|
||||
#include <IO/S3Common.h>
|
||||
#include <IO/S3/Requests.h>
|
||||
@ -558,12 +557,11 @@ void WriteBufferFromS3::writePart(WriteBufferFromS3::PartData && data)
|
||||
|
||||
auto & request = std::get<0>(*worker_data);
|
||||
|
||||
ResourceCost cost = request.GetContentLength();
|
||||
ResourceGuard rlock(write_settings.resource_link, cost);
|
||||
CurrentThread::IOScope io_scope(write_settings.io_scheduling);
|
||||
|
||||
Stopwatch watch;
|
||||
auto outcome = client_ptr->UploadPart(request);
|
||||
watch.stop();
|
||||
rlock.unlock(); // Avoid acquiring other locks under resource lock
|
||||
|
||||
ProfileEvents::increment(ProfileEvents::WriteBufferFromS3Microseconds, watch.elapsedMicroseconds());
|
||||
|
||||
@ -577,7 +575,6 @@ void WriteBufferFromS3::writePart(WriteBufferFromS3::PartData && data)
|
||||
if (!outcome.IsSuccess())
|
||||
{
|
||||
ProfileEvents::increment(ProfileEvents::WriteBufferFromS3RequestsErrors, 1);
|
||||
write_settings.resource_link.accumulate(cost); // We assume no resource was used in case of failure
|
||||
throw S3Exception(outcome.GetError().GetMessage(), outcome.GetError().GetErrorType());
|
||||
}
|
||||
|
||||
@ -715,12 +712,11 @@ void WriteBufferFromS3::makeSinglepartUpload(WriteBufferFromS3::PartData && data
|
||||
if (client_ptr->isClientForDisk())
|
||||
ProfileEvents::increment(ProfileEvents::DiskS3PutObject);
|
||||
|
||||
ResourceCost cost = request.GetContentLength();
|
||||
ResourceGuard rlock(write_settings.resource_link, cost);
|
||||
CurrentThread::IOScope io_scope(write_settings.io_scheduling);
|
||||
|
||||
Stopwatch watch;
|
||||
auto outcome = client_ptr->PutObject(request);
|
||||
watch.stop();
|
||||
rlock.unlock();
|
||||
|
||||
ProfileEvents::increment(ProfileEvents::WriteBufferFromS3Microseconds, watch.elapsedMicroseconds());
|
||||
if (blob_log)
|
||||
@ -734,7 +730,6 @@ void WriteBufferFromS3::makeSinglepartUpload(WriteBufferFromS3::PartData && data
|
||||
}
|
||||
|
||||
ProfileEvents::increment(ProfileEvents::WriteBufferFromS3RequestsErrors, 1);
|
||||
write_settings.resource_link.accumulate(cost); // We assume no resource was used in case of failure
|
||||
|
||||
if (outcome.GetError().GetErrorType() == Aws::S3::S3Errors::NO_SUCH_KEY)
|
||||
{
|
||||
|
@ -13,8 +13,7 @@ struct WriteSettings
|
||||
ThrottlerPtr remote_throttler;
|
||||
ThrottlerPtr local_throttler;
|
||||
|
||||
// Resource to be used during reading
|
||||
ResourceLink resource_link;
|
||||
IOSchedulingSettings io_scheduling;
|
||||
|
||||
/// Filesystem cache settings
|
||||
bool enable_filesystem_cache_on_write_operations = false;
|
||||
|
@ -396,7 +396,7 @@ const ActionsDAG::Node * ActionsDAG::tryFindInOutputs(const std::string & name)
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
ActionsDAG::NodeRawConstPtrs ActionsDAG::findInOutpus(const Names & names) const
|
||||
ActionsDAG::NodeRawConstPtrs ActionsDAG::findInOutputs(const Names & names) const
|
||||
{
|
||||
NodeRawConstPtrs required_nodes;
|
||||
required_nodes.reserve(names.size());
|
||||
@ -524,7 +524,7 @@ void ActionsDAG::removeUnusedActions(const NameSet & required_names, bool allow_
|
||||
|
||||
void ActionsDAG::removeUnusedActions(const Names & required_names, bool allow_remove_inputs, bool allow_constant_folding)
|
||||
{
|
||||
auto required_nodes = findInOutpus(required_names);
|
||||
auto required_nodes = findInOutputs(required_names);
|
||||
outputs.swap(required_nodes);
|
||||
removeUnusedActions(allow_remove_inputs, allow_constant_folding);
|
||||
}
|
||||
|
@ -156,7 +156,7 @@ public:
|
||||
const Node * tryFindInOutputs(const std::string & name) const;
|
||||
|
||||
/// Same, but for the list of names.
|
||||
NodeRawConstPtrs findInOutpus(const Names & names) const;
|
||||
NodeRawConstPtrs findInOutputs(const Names & names) const;
|
||||
|
||||
/// Find first node with the same name in output nodes and replace it.
|
||||
/// If was not found, add node to outputs end.
|
||||
@ -436,7 +436,7 @@ public:
|
||||
/// Returns a list of nodes representing atomic predicates.
|
||||
static NodeRawConstPtrs extractConjunctionAtoms(const Node * predicate);
|
||||
|
||||
/// Get a list of nodes. For every node, check if it can be compused using allowed subset of inputs.
|
||||
/// Get a list of nodes. For every node, check if it can be computed using allowed subset of inputs.
|
||||
/// Returns only those nodes from the list which can be computed.
|
||||
static NodeRawConstPtrs filterNodesByAllowedInputs(
|
||||
NodeRawConstPtrs nodes,
|
||||
|
@ -33,6 +33,8 @@
|
||||
#include <Common/SensitiveDataMasker.h>
|
||||
#include <Common/SipHash.h>
|
||||
#include <Common/logger_useful.h>
|
||||
#include <Parsers/ASTExpressionList.h>
|
||||
#include <Parsers/ASTIdentifier.h>
|
||||
|
||||
namespace CurrentMetrics
|
||||
{
|
||||
@ -308,6 +310,7 @@ void AsynchronousInsertQueue::preprocessInsertQuery(const ASTPtr & query, const
|
||||
/* no_squash */ false,
|
||||
/* no_destination */ false,
|
||||
/* async_insert */ false);
|
||||
|
||||
auto table = interpreter.getTable(insert_query);
|
||||
auto sample_block = InterpreterInsertQuery::getSampleBlock(insert_query, table, table->getInMemoryMetadataPtr(), query_context);
|
||||
|
||||
@ -318,6 +321,10 @@ void AsynchronousInsertQueue::preprocessInsertQuery(const ASTPtr & query, const
|
||||
/// InterpreterInsertQuery::getTable() -> ITableFunction::execute().
|
||||
if (insert_query.table_id)
|
||||
query_context->checkAccess(AccessType::INSERT, insert_query.table_id, sample_block.getNames());
|
||||
|
||||
insert_query.columns = std::make_shared<ASTExpressionList>();
|
||||
for (const auto & column : sample_block)
|
||||
insert_query.columns->children.push_back(std::make_shared<ASTIdentifier>(column.name));
|
||||
}
|
||||
|
||||
AsynchronousInsertQueue::PushResult
|
||||
@ -696,6 +703,17 @@ catch (...)
|
||||
tryLogCurrentException("AsynchronousInsertQueue", "Failed to add elements to AsynchronousInsertLog");
|
||||
}
|
||||
|
||||
void convertBlockToHeader(Block & block, const Block & header)
|
||||
{
|
||||
auto converting_dag = ActionsDAG::makeConvertingActions(
|
||||
block.getColumnsWithTypeAndName(),
|
||||
header.getColumnsWithTypeAndName(),
|
||||
ActionsDAG::MatchColumnsMode::Name);
|
||||
|
||||
auto converting_actions = std::make_shared<ExpressionActions>(std::move(converting_dag));
|
||||
converting_actions->execute(block);
|
||||
}
|
||||
|
||||
String serializeQuery(const IAST & query, size_t max_length)
|
||||
{
|
||||
return query.hasSecretParts()
|
||||
@ -791,6 +809,61 @@ try
|
||||
if (async_insert_log)
|
||||
log_elements.reserve(data->entries.size());
|
||||
|
||||
auto add_entry_to_asynchronous_insert_log = [&, query_by_format = NameToNameMap{}](
|
||||
const InsertData::EntryPtr & entry,
|
||||
const String & parsing_exception,
|
||||
size_t num_rows,
|
||||
size_t num_bytes) mutable
|
||||
{
|
||||
if (!async_insert_log)
|
||||
return;
|
||||
|
||||
AsynchronousInsertLogElement elem;
|
||||
elem.event_time = timeInSeconds(entry->create_time);
|
||||
elem.event_time_microseconds = timeInMicroseconds(entry->create_time);
|
||||
elem.database = query_database;
|
||||
elem.table = query_table;
|
||||
elem.format = entry->format;
|
||||
elem.query_id = entry->query_id;
|
||||
elem.bytes = num_bytes;
|
||||
elem.rows = num_rows;
|
||||
elem.exception = parsing_exception;
|
||||
elem.data_kind = entry->chunk.getDataKind();
|
||||
elem.timeout_milliseconds = data->timeout_ms.count();
|
||||
elem.flush_query_id = insert_query_id;
|
||||
|
||||
auto get_query_by_format = [&](const String & format) -> const String &
|
||||
{
|
||||
auto [it, inserted] = query_by_format.try_emplace(format);
|
||||
if (!inserted)
|
||||
return it->second;
|
||||
|
||||
auto query = key.query->clone();
|
||||
assert_cast<ASTInsertQuery &>(*query).format = format;
|
||||
it->second = serializeQuery(*query, insert_context->getSettingsRef().log_queries_cut_to_length);
|
||||
return it->second;
|
||||
};
|
||||
|
||||
if (entry->chunk.getDataKind() == DataKind::Parsed)
|
||||
elem.query_for_logging = key.query_str;
|
||||
else
|
||||
elem.query_for_logging = get_query_by_format(entry->format);
|
||||
|
||||
/// If there was a parsing error,
|
||||
/// the entry won't be flushed anyway,
|
||||
/// so add the log element immediately.
|
||||
if (!elem.exception.empty())
|
||||
{
|
||||
elem.status = AsynchronousInsertLogElement::ParsingError;
|
||||
async_insert_log->add(std::move(elem));
|
||||
}
|
||||
else
|
||||
{
|
||||
elem.status = AsynchronousInsertLogElement::Ok;
|
||||
log_elements.push_back(std::move(elem));
|
||||
}
|
||||
};
|
||||
|
||||
try
|
||||
{
|
||||
interpreter = std::make_unique<InterpreterInsertQuery>(
|
||||
@ -819,49 +892,20 @@ try
|
||||
catch (...)
|
||||
{
|
||||
logExceptionBeforeStart(query_for_logging, insert_context, key.query, query_span, start_watch.elapsedMilliseconds());
|
||||
|
||||
if (async_insert_log)
|
||||
{
|
||||
for (const auto & entry : data->entries)
|
||||
add_entry_to_asynchronous_insert_log(entry, /*parsing_exception=*/ "", /*num_rows=*/ 0, entry->chunk.byteSize());
|
||||
|
||||
auto exception = getCurrentExceptionMessage(false);
|
||||
auto flush_time = std::chrono::system_clock::now();
|
||||
appendElementsToLogSafe(*async_insert_log, std::move(log_elements), flush_time, exception);
|
||||
}
|
||||
throw;
|
||||
}
|
||||
|
||||
auto add_entry_to_asynchronous_insert_log = [&](const auto & entry,
|
||||
const auto & entry_query_for_logging,
|
||||
const auto & exception,
|
||||
size_t num_rows,
|
||||
size_t num_bytes,
|
||||
Milliseconds timeout_ms)
|
||||
{
|
||||
if (!async_insert_log)
|
||||
return;
|
||||
|
||||
AsynchronousInsertLogElement elem;
|
||||
elem.event_time = timeInSeconds(entry->create_time);
|
||||
elem.event_time_microseconds = timeInMicroseconds(entry->create_time);
|
||||
elem.query_for_logging = entry_query_for_logging;
|
||||
elem.database = query_database;
|
||||
elem.table = query_table;
|
||||
elem.format = entry->format;
|
||||
elem.query_id = entry->query_id;
|
||||
elem.bytes = num_bytes;
|
||||
elem.rows = num_rows;
|
||||
elem.exception = exception;
|
||||
elem.data_kind = entry->chunk.getDataKind();
|
||||
elem.timeout_milliseconds = timeout_ms.count();
|
||||
elem.flush_query_id = insert_query_id;
|
||||
|
||||
/// If there was a parsing error,
|
||||
/// the entry won't be flushed anyway,
|
||||
/// so add the log element immediately.
|
||||
if (!elem.exception.empty())
|
||||
{
|
||||
elem.status = AsynchronousInsertLogElement::ParsingError;
|
||||
async_insert_log->add(std::move(elem));
|
||||
}
|
||||
else
|
||||
{
|
||||
log_elements.push_back(elem);
|
||||
}
|
||||
};
|
||||
|
||||
auto finish_entries = [&]
|
||||
auto finish_entries = [&](size_t num_rows, size_t num_bytes)
|
||||
{
|
||||
for (const auto & entry : data->entries)
|
||||
{
|
||||
@ -874,20 +918,7 @@ try
|
||||
auto flush_time = std::chrono::system_clock::now();
|
||||
appendElementsToLogSafe(*async_insert_log, std::move(log_elements), flush_time, "");
|
||||
}
|
||||
};
|
||||
|
||||
Chunk chunk;
|
||||
auto header = pipeline.getHeader();
|
||||
|
||||
if (key.data_kind == DataKind::Parsed)
|
||||
chunk = processEntriesWithParsing(key, data, header, insert_context, log, add_entry_to_asynchronous_insert_log);
|
||||
else
|
||||
chunk = processPreprocessedEntries(key, data, header, insert_context, add_entry_to_asynchronous_insert_log);
|
||||
|
||||
ProfileEvents::increment(ProfileEvents::AsyncInsertRows, chunk.getNumRows());
|
||||
|
||||
auto log_and_add_finish_to_query_log = [&](size_t num_rows, size_t num_bytes)
|
||||
{
|
||||
LOG_DEBUG(log, "Flushed {} rows, {} bytes for query '{}'", num_rows, num_bytes, key.query_str);
|
||||
queue_shard_flush_time_history.updateWithCurrentTime();
|
||||
|
||||
@ -896,16 +927,24 @@ try
|
||||
query_log_elem, insert_context, key.query, pipeline, pulling_pipeline, query_span, QueryCache::Usage::None, internal);
|
||||
};
|
||||
|
||||
|
||||
if (chunk.getNumRows() == 0)
|
||||
{
|
||||
finish_entries();
|
||||
log_and_add_finish_to_query_log(0, 0);
|
||||
return;
|
||||
}
|
||||
|
||||
try
|
||||
{
|
||||
Chunk chunk;
|
||||
auto header = pipeline.getHeader();
|
||||
|
||||
if (key.data_kind == DataKind::Parsed)
|
||||
chunk = processEntriesWithParsing(key, data, header, insert_context, log, add_entry_to_asynchronous_insert_log);
|
||||
else
|
||||
chunk = processPreprocessedEntries(data, header, add_entry_to_asynchronous_insert_log);
|
||||
|
||||
ProfileEvents::increment(ProfileEvents::AsyncInsertRows, chunk.getNumRows());
|
||||
|
||||
if (chunk.getNumRows() == 0)
|
||||
{
|
||||
finish_entries(/*num_rows=*/ 0, /*num_bytes=*/ 0);
|
||||
return;
|
||||
}
|
||||
|
||||
size_t num_rows = chunk.getNumRows();
|
||||
size_t num_bytes = chunk.bytes();
|
||||
|
||||
@ -915,7 +954,7 @@ try
|
||||
CompletedPipelineExecutor completed_executor(pipeline);
|
||||
completed_executor.execute();
|
||||
|
||||
log_and_add_finish_to_query_log(num_rows, num_bytes);
|
||||
finish_entries(num_rows, num_bytes);
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
@ -929,8 +968,6 @@ try
|
||||
}
|
||||
throw;
|
||||
}
|
||||
|
||||
finish_entries();
|
||||
}
|
||||
catch (const Exception & e)
|
||||
{
|
||||
@ -991,7 +1028,6 @@ Chunk AsynchronousInsertQueue::processEntriesWithParsing(
|
||||
|
||||
StreamingFormatExecutor executor(header, format, std::move(on_error), std::move(adding_defaults_transform));
|
||||
auto chunk_info = std::make_shared<AsyncInsertInfo>();
|
||||
auto query_for_logging = serializeQuery(*key.query, insert_context->getSettingsRef().log_queries_cut_to_length);
|
||||
|
||||
for (const auto & entry : data->entries)
|
||||
{
|
||||
@ -1009,7 +1045,8 @@ Chunk AsynchronousInsertQueue::processEntriesWithParsing(
|
||||
size_t num_rows = executor.execute(*buffer);
|
||||
|
||||
total_rows += num_rows;
|
||||
/// for some reason, client can pass zero rows and bytes to server.
|
||||
|
||||
/// For some reason, client can pass zero rows and bytes to server.
|
||||
/// We don't update offsets in this case, because we assume every insert has some rows during dedup
|
||||
/// but we have nothing to deduplicate for this insert.
|
||||
if (num_rows > 0)
|
||||
@ -1018,8 +1055,7 @@ Chunk AsynchronousInsertQueue::processEntriesWithParsing(
|
||||
chunk_info->tokens.push_back(entry->async_dedup_token);
|
||||
}
|
||||
|
||||
add_to_async_insert_log(entry, query_for_logging, current_exception, num_rows, num_bytes, data->timeout_ms);
|
||||
|
||||
add_to_async_insert_log(entry, current_exception, num_rows, num_bytes);
|
||||
current_exception.clear();
|
||||
entry->resetChunk();
|
||||
}
|
||||
@ -1031,30 +1067,14 @@ Chunk AsynchronousInsertQueue::processEntriesWithParsing(
|
||||
|
||||
template <typename LogFunc>
|
||||
Chunk AsynchronousInsertQueue::processPreprocessedEntries(
|
||||
const InsertQuery & key,
|
||||
const InsertDataPtr & data,
|
||||
const Block & header,
|
||||
const ContextPtr & insert_context,
|
||||
LogFunc && add_to_async_insert_log)
|
||||
{
|
||||
size_t total_rows = 0;
|
||||
auto chunk_info = std::make_shared<AsyncInsertInfo>();
|
||||
auto result_columns = header.cloneEmptyColumns();
|
||||
|
||||
std::unordered_map<String, String> format_to_query;
|
||||
|
||||
auto get_query_by_format = [&](const String & format) -> const String &
|
||||
{
|
||||
auto [it, inserted] = format_to_query.try_emplace(format);
|
||||
if (!inserted)
|
||||
return it->second;
|
||||
|
||||
auto query = key.query->clone();
|
||||
assert_cast<ASTInsertQuery &>(*query).format = format;
|
||||
it->second = serializeQuery(*query, insert_context->getSettingsRef().log_queries_cut_to_length);
|
||||
return it->second;
|
||||
};
|
||||
|
||||
for (const auto & entry : data->entries)
|
||||
{
|
||||
const auto * block = entry->chunk.asBlock();
|
||||
@ -1062,23 +1082,26 @@ Chunk AsynchronousInsertQueue::processPreprocessedEntries(
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR,
|
||||
"Expected entry with data kind Preprocessed. Got: {}", entry->chunk.getDataKind());
|
||||
|
||||
auto columns = block->getColumns();
|
||||
Block block_to_insert = *block;
|
||||
if (!isCompatibleHeader(block_to_insert, header))
|
||||
convertBlockToHeader(block_to_insert, header);
|
||||
|
||||
auto columns = block_to_insert.getColumns();
|
||||
for (size_t i = 0, s = columns.size(); i < s; ++i)
|
||||
result_columns[i]->insertRangeFrom(*columns[i], 0, columns[i]->size());
|
||||
|
||||
total_rows += block->rows();
|
||||
/// for some reason, client can pass zero rows and bytes to server.
|
||||
total_rows += block_to_insert.rows();
|
||||
|
||||
/// For some reason, client can pass zero rows and bytes to server.
|
||||
/// We don't update offsets in this case, because we assume every insert has some rows during dedup,
|
||||
/// but we have nothing to deduplicate for this insert.
|
||||
if (block->rows())
|
||||
if (block_to_insert.rows() > 0)
|
||||
{
|
||||
chunk_info->offsets.push_back(total_rows);
|
||||
chunk_info->tokens.push_back(entry->async_dedup_token);
|
||||
}
|
||||
|
||||
const auto & query_for_logging = get_query_by_format(entry->format);
|
||||
add_to_async_insert_log(entry, query_for_logging, "", block->rows(), block->bytes(), data->timeout_ms);
|
||||
|
||||
add_to_async_insert_log(entry, /*parsing_exception=*/ "", block_to_insert.rows(), block_to_insert.bytes());
|
||||
entry->resetChunk();
|
||||
}
|
||||
|
||||
|
@ -288,10 +288,8 @@ private:
|
||||
|
||||
template <typename LogFunc>
|
||||
static Chunk processPreprocessedEntries(
|
||||
const InsertQuery & key,
|
||||
const InsertDataPtr & data,
|
||||
const Block & header,
|
||||
const ContextPtr & insert_context,
|
||||
LogFunc && add_to_async_insert_log);
|
||||
|
||||
template <typename E>
|
||||
|
@ -821,6 +821,19 @@ InterpreterCreateQuery::TableProperties InterpreterCreateQuery::getTableProperti
|
||||
{
|
||||
properties.indices = as_storage_metadata->getSecondaryIndices();
|
||||
properties.projections = as_storage_metadata->getProjections().clone();
|
||||
|
||||
/// CREATE TABLE AS should copy PRIMARY KEY, ORDER BY, and similar clauses.
|
||||
if (!create.storage->primary_key && as_storage_metadata->isPrimaryKeyDefined() && as_storage_metadata->hasPrimaryKey())
|
||||
create.storage->set(create.storage->primary_key, as_storage_metadata->getPrimaryKeyAST()->clone());
|
||||
|
||||
if (!create.storage->partition_by && as_storage_metadata->isPartitionKeyDefined() && as_storage_metadata->hasPartitionKey())
|
||||
create.storage->set(create.storage->partition_by, as_storage_metadata->getPartitionKeyAST()->clone());
|
||||
|
||||
if (!create.storage->order_by && as_storage_metadata->isSortingKeyDefined() && as_storage_metadata->hasSortingKey())
|
||||
create.storage->set(create.storage->order_by, as_storage_metadata->getSortingKeyAST()->clone());
|
||||
|
||||
if (!create.storage->sample_by && as_storage_metadata->isSamplingKeyDefined() && as_storage_metadata->hasSamplingKey())
|
||||
create.storage->set(create.storage->sample_by, as_storage_metadata->getSamplingKeyAST()->clone());
|
||||
}
|
||||
else
|
||||
{
|
||||
|
@ -30,6 +30,8 @@ ColumnsDescription ProcessorProfileLogElement::getColumnsDescription()
|
||||
{"id", std::make_shared<DataTypeUInt64>(), "ID of processor."},
|
||||
{"parent_ids", std::make_shared<DataTypeArray>(std::make_shared<DataTypeUInt64>()), "Parent processors IDs."},
|
||||
{"plan_step", std::make_shared<DataTypeUInt64>(), "ID of the query plan step which created this processor. The value is zero if the processor was not added from any step."},
|
||||
{"plan_step_name", std::make_shared<DataTypeString>(), "Name of the query plan step which created this processor. The value is empty if the processor was not added from any step."},
|
||||
{"plan_step_description", std::make_shared<DataTypeString>(), "Description of the query plan step which created this processor. The value is empty if the processor was not added from any step."},
|
||||
{"plan_group", std::make_shared<DataTypeUInt64>(), "Group of the processor if it was created by query plan step. A group is a logical partitioning of processors added from the same query plan step. Group is used only for beautifying the result of EXPLAIN PIPELINE result."},
|
||||
|
||||
{"initial_query_id", std::make_shared<DataTypeString>(), "ID of the initial query (for distributed query execution)."},
|
||||
@ -64,6 +66,8 @@ void ProcessorProfileLogElement::appendToBlock(MutableColumns & columns) const
|
||||
}
|
||||
|
||||
columns[i++]->insert(plan_step);
|
||||
columns[i++]->insert(plan_step_name);
|
||||
columns[i++]->insert(plan_step_description);
|
||||
columns[i++]->insert(plan_group);
|
||||
columns[i++]->insertData(initial_query_id.data(), initial_query_id.size());
|
||||
columns[i++]->insertData(query_id.data(), query_id.size());
|
||||
|
@ -19,6 +19,8 @@ struct ProcessorProfileLogElement
|
||||
|
||||
UInt64 plan_step{};
|
||||
UInt64 plan_group{};
|
||||
String plan_step_name;
|
||||
String plan_step_description;
|
||||
|
||||
String initial_query_id;
|
||||
String query_id;
|
||||
|
@ -478,6 +478,8 @@ void logQueryFinish(
|
||||
processor_elem.parent_ids = std::move(parents);
|
||||
|
||||
processor_elem.plan_step = reinterpret_cast<std::uintptr_t>(processor->getQueryPlanStep());
|
||||
processor_elem.plan_step_name = processor->getPlanStepName();
|
||||
processor_elem.plan_step_description = processor->getPlanStepDescription();
|
||||
processor_elem.plan_group = processor->getQueryPlanStepGroup();
|
||||
|
||||
processor_elem.processor_name = processor->getName();
|
||||
@ -793,7 +795,7 @@ static std::tuple<ASTPtr, BlockIO> executeQueryImpl(
|
||||
/// Verify that AST formatting is consistent:
|
||||
/// If you format AST, parse it back, and format it again, you get the same string.
|
||||
|
||||
String formatted1 = ast->formatWithPossiblyHidingSensitiveData(0, true, true, false);
|
||||
String formatted1 = ast->formatWithPossiblyHidingSensitiveData(0, true, true, false, false, IdentifierQuotingStyle::Backticks);
|
||||
|
||||
/// The query can become more verbose after formatting, so:
|
||||
size_t new_max_query_size = max_query_size > 0 ? (1000 + 2 * max_query_size) : 0;
|
||||
@ -818,7 +820,7 @@ static std::tuple<ASTPtr, BlockIO> executeQueryImpl(
|
||||
|
||||
chassert(ast2);
|
||||
|
||||
String formatted2 = ast2->formatWithPossiblyHidingSensitiveData(0, true, true, false);
|
||||
String formatted2 = ast2->formatWithPossiblyHidingSensitiveData(0, true, true, false, false, IdentifierQuotingStyle::Backticks);
|
||||
|
||||
if (formatted1 != formatted2)
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR,
|
||||
|
@ -26,7 +26,12 @@ inline String format(const SecretHidingFormatSettings & settings)
|
||||
&& settings.ctx->getAccess()->isGranted(AccessType::displaySecretsInShowAndSelect);
|
||||
|
||||
return settings.query.formatWithPossiblyHidingSensitiveData(
|
||||
settings.max_length, settings.one_line, show_secrets, settings.ctx->getSettingsRef().print_pretty_type_names);
|
||||
settings.max_length,
|
||||
settings.one_line,
|
||||
show_secrets,
|
||||
settings.ctx->getSettingsRef().print_pretty_type_names,
|
||||
settings.ctx->getSettingsRef().output_format_always_quote_identifiers,
|
||||
settings.ctx->getSettingsRef().output_format_identifier_quoting_style);
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -66,8 +66,8 @@ void ASTColumnDeclaration::formatImpl(const FormatSettings & format_settings, Fo
|
||||
{
|
||||
frame.need_parens = false;
|
||||
|
||||
/// We have to always backquote column names to avoid ambiguity with INDEX and other declarations in CREATE query.
|
||||
format_settings.ostr << backQuote(name);
|
||||
/// We have to always quote column names to avoid ambiguity with INDEX and other declarations in CREATE query.
|
||||
format_settings.quoteIdentifier(name);
|
||||
|
||||
if (type)
|
||||
{
|
||||
|
@ -35,7 +35,7 @@ void ASTDictionaryAttributeDeclaration::formatImpl(const FormatSettings & settin
|
||||
{
|
||||
frame.need_parens = false;
|
||||
|
||||
settings.ostr << backQuote(name);
|
||||
settings.quoteIdentifier(name);
|
||||
|
||||
if (type)
|
||||
{
|
||||
|
@ -79,7 +79,7 @@ void ASTIndexDeclaration::formatImpl(const FormatSettings & s, FormatState & sta
|
||||
}
|
||||
else
|
||||
{
|
||||
s.ostr << backQuoteIfNeed(name);
|
||||
s.writeIdentifier(name);
|
||||
s.ostr << " ";
|
||||
expr->formatImpl(s, state, frame);
|
||||
}
|
||||
|
@ -17,7 +17,7 @@ ASTPtr ASTProjectionDeclaration::clone() const
|
||||
|
||||
void ASTProjectionDeclaration::formatImpl(const FormatSettings & settings, FormatState & state, FormatStateStacked frame) const
|
||||
{
|
||||
settings.ostr << backQuoteIfNeed(name);
|
||||
settings.writeIdentifier(name);
|
||||
std::string indent_str = settings.one_line ? "" : std::string(4u * frame.indent, ' ');
|
||||
std::string nl_or_nothing = settings.one_line ? "" : "\n";
|
||||
settings.ostr << settings.nl_or_ws << indent_str << "(" << nl_or_nothing;
|
||||
|
@ -22,10 +22,8 @@ ASTPtr ASTTableOverride::clone() const
|
||||
return res;
|
||||
}
|
||||
|
||||
void ASTTableOverride::formatImpl(const FormatSettings & settings_, FormatState & state, FormatStateStacked frame) const
|
||||
void ASTTableOverride::formatImpl(const FormatSettings & settings, FormatState & state, FormatStateStacked frame) const
|
||||
{
|
||||
FormatSettings settings = settings_;
|
||||
settings.always_quote_identifiers = true;
|
||||
String nl_or_nothing = settings.one_line ? "" : "\n";
|
||||
String nl_or_ws = settings.one_line ? " " : "\n";
|
||||
String hl_keyword = settings.hilite ? hilite_keyword : "";
|
||||
|
@ -165,12 +165,21 @@ size_t IAST::checkDepthImpl(size_t max_depth) const
|
||||
return res;
|
||||
}
|
||||
|
||||
String IAST::formatWithPossiblyHidingSensitiveData(size_t max_length, bool one_line, bool show_secrets, bool print_pretty_type_names) const
|
||||
String IAST::formatWithPossiblyHidingSensitiveData(
|
||||
size_t max_length,
|
||||
bool one_line,
|
||||
bool show_secrets,
|
||||
bool print_pretty_type_names,
|
||||
bool always_quote_identifiers,
|
||||
IdentifierQuotingStyle identifier_quoting_style) const
|
||||
{
|
||||
|
||||
WriteBufferFromOwnString buf;
|
||||
FormatSettings settings(buf, one_line);
|
||||
settings.show_secrets = show_secrets;
|
||||
settings.print_pretty_type_names = print_pretty_type_names;
|
||||
settings.always_quote_identifiers = always_quote_identifiers;
|
||||
settings.identifier_quoting_style = identifier_quoting_style;
|
||||
format(settings);
|
||||
return wipeSensitiveDataAndCutToLength(buf.str(), max_length);
|
||||
}
|
||||
@ -248,6 +257,34 @@ void IAST::FormatSettings::writeIdentifier(const String & name) const
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void IAST::FormatSettings::quoteIdentifier(const String & name) const
|
||||
{
|
||||
switch (identifier_quoting_style)
|
||||
{
|
||||
case IdentifierQuotingStyle::None:
|
||||
{
|
||||
writeBackQuotedString(name, ostr);
|
||||
break;
|
||||
}
|
||||
case IdentifierQuotingStyle::Backticks:
|
||||
{
|
||||
writeBackQuotedString(name, ostr);
|
||||
break;
|
||||
}
|
||||
case IdentifierQuotingStyle::DoubleQuotes:
|
||||
{
|
||||
writeDoubleQuotedString(name, ostr);
|
||||
break;
|
||||
}
|
||||
case IdentifierQuotingStyle::BackticksMySQL:
|
||||
{
|
||||
writeBackQuotedStringMySQL(name, ostr);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void IAST::dumpTree(WriteBuffer & ostr, size_t indent) const
|
||||
{
|
||||
String indent_str(indent, '-');
|
||||
|
@ -238,6 +238,9 @@ public:
|
||||
}
|
||||
|
||||
void writeIdentifier(const String & name) const;
|
||||
// Quote identifier `name` even when `always_quote_identifiers` is false.
|
||||
// If `identifier_quoting_style` is `IdentifierQuotingStyle::None`, quote it with `IdentifierQuotingStyle::Backticks`
|
||||
void quoteIdentifier(const String & name) const;
|
||||
};
|
||||
|
||||
/// State. For example, a set of nodes can be remembered, which we already walk through.
|
||||
@ -278,7 +281,13 @@ public:
|
||||
|
||||
/// Secrets are displayed regarding show_secrets, then SensitiveDataMasker is applied.
|
||||
/// You can use Interpreters/formatWithPossiblyHidingSecrets.h for convenience.
|
||||
String formatWithPossiblyHidingSensitiveData(size_t max_length, bool one_line, bool show_secrets, bool print_pretty_type_names) const;
|
||||
String formatWithPossiblyHidingSensitiveData(
|
||||
size_t max_length,
|
||||
bool one_line,
|
||||
bool show_secrets,
|
||||
bool print_pretty_type_names,
|
||||
bool always_quote_identifiers,
|
||||
IdentifierQuotingStyle identifier_quoting_style) const;
|
||||
|
||||
/** formatForLogging and formatForErrorMessage always hide secrets. This inconsistent
|
||||
* behaviour is due to the fact such functions are called from Client which knows nothing about
|
||||
@ -287,12 +296,12 @@ public:
|
||||
*/
|
||||
String formatForLogging(size_t max_length = 0) const
|
||||
{
|
||||
return formatWithPossiblyHidingSensitiveData(max_length, true, false, false);
|
||||
return formatWithPossiblyHidingSensitiveData(max_length, true, false, false, false, IdentifierQuotingStyle::Backticks);
|
||||
}
|
||||
|
||||
String formatForErrorMessage() const
|
||||
{
|
||||
return formatWithPossiblyHidingSensitiveData(0, true, false, false);
|
||||
return formatWithPossiblyHidingSensitiveData(0, true, false, false, false, IdentifierQuotingStyle::Backticks);
|
||||
}
|
||||
|
||||
virtual bool hasSecretParts() const { return childrenHaveSecretParts(); }
|
||||
|
@ -231,47 +231,47 @@ INSTANTIATE_TEST_SUITE_P(ParserCreateDatabaseQuery, ParserTest,
|
||||
},
|
||||
{
|
||||
"CREATE DATABASE db ENGINE=MaterializeMySQL('addr:port', 'db', 'user', 'pw') TABLE OVERRIDE `tbl`\n(PARTITION BY toYYYYMM(created))",
|
||||
"CREATE DATABASE db\nENGINE = MaterializeMySQL('addr:port', 'db', 'user', 'pw')\nTABLE OVERRIDE `tbl`\n(\n PARTITION BY toYYYYMM(`created`)\n)"
|
||||
"CREATE DATABASE db\nENGINE = MaterializeMySQL('addr:port', 'db', 'user', 'pw')\nTABLE OVERRIDE tbl\n(\n PARTITION BY toYYYYMM(created)\n)"
|
||||
},
|
||||
{
|
||||
"CREATE DATABASE db ENGINE=Foo TABLE OVERRIDE `tbl` (), TABLE OVERRIDE a (COLUMNS (_created DateTime MATERIALIZED now())), TABLE OVERRIDE b (PARTITION BY rand())",
|
||||
"CREATE DATABASE db\nENGINE = Foo\nTABLE OVERRIDE `tbl`\n(\n\n),\nTABLE OVERRIDE `a`\n(\n COLUMNS\n (\n `_created` DateTime MATERIALIZED now()\n )\n),\nTABLE OVERRIDE `b`\n(\n PARTITION BY rand()\n)"
|
||||
"CREATE DATABASE db\nENGINE = Foo\nTABLE OVERRIDE tbl\n(\n\n),\nTABLE OVERRIDE a\n(\n COLUMNS\n (\n `_created` DateTime MATERIALIZED now()\n )\n),\nTABLE OVERRIDE b\n(\n PARTITION BY rand()\n)"
|
||||
},
|
||||
{
|
||||
"CREATE DATABASE db ENGINE=MaterializeMySQL('addr:port', 'db', 'user', 'pw') TABLE OVERRIDE tbl (COLUMNS (id UUID) PARTITION BY toYYYYMM(created))",
|
||||
"CREATE DATABASE db\nENGINE = MaterializeMySQL('addr:port', 'db', 'user', 'pw')\nTABLE OVERRIDE `tbl`\n(\n COLUMNS\n (\n `id` UUID\n )\n PARTITION BY toYYYYMM(`created`)\n)"
|
||||
"CREATE DATABASE db\nENGINE = MaterializeMySQL('addr:port', 'db', 'user', 'pw')\nTABLE OVERRIDE tbl\n(\n COLUMNS\n (\n `id` UUID\n )\n PARTITION BY toYYYYMM(created)\n)"
|
||||
},
|
||||
{
|
||||
"CREATE DATABASE db TABLE OVERRIDE tbl (COLUMNS (INDEX foo foo TYPE minmax GRANULARITY 1) PARTITION BY if(_staged = 1, 'staging', toYYYYMM(created)))",
|
||||
"CREATE DATABASE db\nTABLE OVERRIDE `tbl`\n(\n COLUMNS\n (\n INDEX foo `foo` TYPE minmax GRANULARITY 1\n )\n PARTITION BY if(`_staged` = 1, 'staging', toYYYYMM(`created`))\n)"
|
||||
"CREATE DATABASE db\nTABLE OVERRIDE tbl\n(\n COLUMNS\n (\n INDEX foo foo TYPE minmax GRANULARITY 1\n )\n PARTITION BY if(_staged = 1, 'staging', toYYYYMM(created))\n)"
|
||||
},
|
||||
{
|
||||
"CREATE DATABASE db TABLE OVERRIDE t1 (TTL inserted + INTERVAL 1 MONTH DELETE), TABLE OVERRIDE t2 (TTL `inserted` + INTERVAL 2 MONTH DELETE)",
|
||||
"CREATE DATABASE db\nTABLE OVERRIDE `t1`\n(\n TTL `inserted` + toIntervalMonth(1)\n),\nTABLE OVERRIDE `t2`\n(\n TTL `inserted` + toIntervalMonth(2)\n)"
|
||||
"CREATE DATABASE db\nTABLE OVERRIDE t1\n(\n TTL inserted + toIntervalMonth(1)\n),\nTABLE OVERRIDE t2\n(\n TTL inserted + toIntervalMonth(2)\n)"
|
||||
},
|
||||
{
|
||||
"CREATE DATABASE db ENGINE = MaterializeMySQL('127.0.0.1:3306', 'db', 'root', 'pw') SETTINGS allows_query_when_mysql_lost = 1 TABLE OVERRIDE tab3 (COLUMNS (_staged UInt8 MATERIALIZED 1) PARTITION BY (c3) TTL c3 + INTERVAL 10 minute), TABLE OVERRIDE tab5 (PARTITION BY (c3) TTL c3 + INTERVAL 10 minute)",
|
||||
"CREATE DATABASE db\nENGINE = MaterializeMySQL('127.0.0.1:3306', 'db', 'root', 'pw')\nSETTINGS allows_query_when_mysql_lost = 1\nTABLE OVERRIDE `tab3`\n(\n COLUMNS\n (\n `_staged` UInt8 MATERIALIZED 1\n )\n PARTITION BY `c3`\n TTL `c3` + toIntervalMinute(10)\n),\nTABLE OVERRIDE `tab5`\n(\n PARTITION BY `c3`\n TTL `c3` + toIntervalMinute(10)\n)"
|
||||
"CREATE DATABASE db\nENGINE = MaterializeMySQL('127.0.0.1:3306', 'db', 'root', 'pw')\nSETTINGS allows_query_when_mysql_lost = 1\nTABLE OVERRIDE tab3\n(\n COLUMNS\n (\n `_staged` UInt8 MATERIALIZED 1\n )\n PARTITION BY c3\n TTL c3 + toIntervalMinute(10)\n),\nTABLE OVERRIDE tab5\n(\n PARTITION BY c3\n TTL c3 + toIntervalMinute(10)\n)"
|
||||
},
|
||||
{
|
||||
"CREATE DATABASE db TABLE OVERRIDE tbl (PARTITION BY toYYYYMM(created) COLUMNS (created DateTime CODEC(Delta)))",
|
||||
"CREATE DATABASE db\nTABLE OVERRIDE `tbl`\n(\n COLUMNS\n (\n `created` DateTime CODEC(Delta)\n )\n PARTITION BY toYYYYMM(`created`)\n)"
|
||||
"CREATE DATABASE db\nTABLE OVERRIDE tbl\n(\n COLUMNS\n (\n `created` DateTime CODEC(Delta)\n )\n PARTITION BY toYYYYMM(created)\n)"
|
||||
},
|
||||
{
|
||||
"CREATE DATABASE db ENGINE = Foo() SETTINGS a = 1",
|
||||
"CREATE DATABASE db ENGINE = Foo() SETTINGS a = 1",
|
||||
"CREATE DATABASE db\nENGINE = Foo\nSETTINGS a = 1"
|
||||
},
|
||||
{
|
||||
"CREATE DATABASE db ENGINE = Foo() SETTINGS a = 1, b = 2",
|
||||
"CREATE DATABASE db ENGINE = Foo() SETTINGS a = 1, b = 2",
|
||||
"CREATE DATABASE db\nENGINE = Foo\nSETTINGS a = 1, b = 2"
|
||||
},
|
||||
{
|
||||
"CREATE DATABASE db ENGINE = Foo() SETTINGS a = 1, b = 2 TABLE OVERRIDE a (ORDER BY (id, version))",
|
||||
"CREATE DATABASE db\nENGINE = Foo\nSETTINGS a = 1, b = 2\nTABLE OVERRIDE `a`\n(\n ORDER BY (`id`, `version`)\n)"
|
||||
"CREATE DATABASE db\nENGINE = Foo\nSETTINGS a = 1, b = 2\nTABLE OVERRIDE a\n(\n ORDER BY (id, version)\n)"
|
||||
},
|
||||
{
|
||||
"CREATE DATABASE db ENGINE = Foo() SETTINGS a = 1, b = 2 COMMENT 'db comment' TABLE OVERRIDE a (ORDER BY (id, version))",
|
||||
"CREATE DATABASE db\nENGINE = Foo\nSETTINGS a = 1, b = 2\nTABLE OVERRIDE `a`\n(\n ORDER BY (`id`, `version`)\n)\nCOMMENT 'db comment'"
|
||||
"CREATE DATABASE db\nENGINE = Foo\nSETTINGS a = 1, b = 2\nTABLE OVERRIDE a\n(\n ORDER BY (id, version)\n)\nCOMMENT 'db comment'"
|
||||
}
|
||||
})));
|
||||
|
||||
|
@ -494,6 +494,12 @@ JoinClausesAndActions buildJoinClausesAndActions(
|
||||
necessary_names.push_back(name);
|
||||
};
|
||||
|
||||
bool is_join_with_special_storage = false;
|
||||
if (const auto * right_table_node = join_node.getRightTableExpression()->as<TableNode>())
|
||||
{
|
||||
is_join_with_special_storage = dynamic_cast<const StorageJoin *>(right_table_node->getStorage().get());
|
||||
}
|
||||
|
||||
for (auto & join_clause : result.join_clauses)
|
||||
{
|
||||
const auto & left_filter_condition_nodes = join_clause.getLeftFilterConditionNodes();
|
||||
@ -561,7 +567,7 @@ JoinClausesAndActions buildJoinClausesAndActions(
|
||||
if (!left_key_node->result_type->equals(*common_type))
|
||||
left_key_node = &left_join_actions.addCast(*left_key_node, common_type, {});
|
||||
|
||||
if (!right_key_node->result_type->equals(*common_type))
|
||||
if (!is_join_with_special_storage && !right_key_node->result_type->equals(*common_type))
|
||||
right_key_node = &right_join_actions.addCast(*right_key_node, common_type, {});
|
||||
}
|
||||
|
||||
|
@ -1,5 +1,6 @@
|
||||
#include <iostream>
|
||||
#include <Processors/IProcessor.h>
|
||||
#include <Processors/QueryPlan/IQueryPlanStep.h>
|
||||
|
||||
#include <Common/logger_useful.h>
|
||||
#include <IO/WriteHelpers.h>
|
||||
@ -9,6 +10,17 @@
|
||||
namespace DB
|
||||
{
|
||||
|
||||
void IProcessor::setQueryPlanStep(IQueryPlanStep * step, size_t group)
|
||||
{
|
||||
query_plan_step = step;
|
||||
query_plan_step_group = group;
|
||||
if (step)
|
||||
{
|
||||
plan_step_name = step->getName();
|
||||
plan_step_description = step->getStepDescription();
|
||||
}
|
||||
}
|
||||
|
||||
void IProcessor::cancel() noexcept
|
||||
{
|
||||
|
||||
|
@ -311,14 +311,12 @@ public:
|
||||
constexpr static size_t NO_STREAM = std::numeric_limits<size_t>::max();
|
||||
|
||||
/// Step of QueryPlan from which processor was created.
|
||||
void setQueryPlanStep(IQueryPlanStep * step, size_t group = 0)
|
||||
{
|
||||
query_plan_step = step;
|
||||
query_plan_step_group = group;
|
||||
}
|
||||
void setQueryPlanStep(IQueryPlanStep * step, size_t group = 0);
|
||||
|
||||
IQueryPlanStep * getQueryPlanStep() const { return query_plan_step; }
|
||||
size_t getQueryPlanStepGroup() const { return query_plan_step_group; }
|
||||
const String & getPlanStepName() const { return plan_step_name; }
|
||||
const String & getPlanStepDescription() const { return plan_step_description; }
|
||||
|
||||
uint64_t getElapsedNs() const { return elapsed_ns; }
|
||||
uint64_t getInputWaitElapsedNs() const { return input_wait_elapsed_ns; }
|
||||
@ -410,6 +408,8 @@ private:
|
||||
|
||||
IQueryPlanStep * query_plan_step = nullptr;
|
||||
size_t query_plan_step_group = 0;
|
||||
String plan_step_name;
|
||||
String plan_step_description;
|
||||
};
|
||||
|
||||
|
||||
|
@ -50,6 +50,7 @@ size_t tryLiftUpUnion(QueryPlan::Node * parent_node, QueryPlan::Nodes & nodes)
|
||||
expr_node.step = std::make_unique<ExpressionStep>(
|
||||
expr_node.children.front()->step->getOutputStream(),
|
||||
expression->getExpression().clone());
|
||||
expr_node.step->setStepDescription(expression->getStepDescription());
|
||||
}
|
||||
|
||||
/// - Expression - Something
|
||||
|
@ -155,7 +155,7 @@ bool isPartitionKeySuitsGroupByKey(
|
||||
return false;
|
||||
|
||||
/// We are interested only in calculations required to obtain group by keys (and not aggregate function arguments for example).
|
||||
auto key_nodes = group_by_actions.findInOutpus(aggregating.getParams().keys);
|
||||
auto key_nodes = group_by_actions.findInOutputs(aggregating.getParams().keys);
|
||||
auto group_by_key_actions = ActionsDAG::cloneSubDAG(key_nodes, /*remove_aliases=*/ true);
|
||||
|
||||
const auto & gb_key_required_columns = group_by_key_actions.getRequiredColumnsNames();
|
||||
|
@ -83,7 +83,11 @@ void WriteBufferFromHTTPServerResponse::finishSendHeaders()
|
||||
return;
|
||||
|
||||
if (!headers_started_sending)
|
||||
{
|
||||
if (compression_method != CompressionMethod::None)
|
||||
response.set("Content-Encoding", toContentEncodingName(compression_method));
|
||||
startSendHeaders();
|
||||
}
|
||||
|
||||
writeHeaderSummary();
|
||||
writeExceptionCode();
|
||||
@ -105,7 +109,13 @@ void WriteBufferFromHTTPServerResponse::nextImpl()
|
||||
initialized = true;
|
||||
|
||||
if (compression_method != CompressionMethod::None)
|
||||
response.set("Content-Encoding", toContentEncodingName(compression_method));
|
||||
{
|
||||
/// If we've already sent headers, just send the `Content-Encoding` down the socket directly
|
||||
if (headers_started_sending)
|
||||
socketSendStr("Content-Encoding: " + toContentEncodingName(compression_method) + "\r\n");
|
||||
else
|
||||
response.set("Content-Encoding", toContentEncodingName(compression_method));
|
||||
}
|
||||
|
||||
startSendHeaders();
|
||||
finishSendHeaders();
|
||||
@ -177,8 +187,12 @@ void WriteBufferFromHTTPServerResponse::finalizeImpl()
|
||||
/// If no body data just send header
|
||||
startSendHeaders();
|
||||
|
||||
/// `finalizeImpl` must be idempotent, so set `initialized` here to not send stuff twice
|
||||
if (!initialized && offset() && compression_method != CompressionMethod::None)
|
||||
{
|
||||
initialized = true;
|
||||
socketSendStr("Content-Encoding: " + toContentEncodingName(compression_method) + "\r\n");
|
||||
}
|
||||
|
||||
finishSendHeaders();
|
||||
}
|
||||
|
@ -1245,6 +1245,13 @@ void AlterCommands::prepare(const StorageInMemoryMetadata & metadata)
|
||||
{
|
||||
auto columns = metadata.columns;
|
||||
|
||||
auto ast_to_str = [](const ASTPtr & query) -> String
|
||||
{
|
||||
if (!query)
|
||||
return "";
|
||||
return queryToString(query);
|
||||
};
|
||||
|
||||
for (size_t i = 0; i < size(); ++i)
|
||||
{
|
||||
auto & command = (*this)[i];
|
||||
@ -1277,6 +1284,11 @@ void AlterCommands::prepare(const StorageInMemoryMetadata & metadata)
|
||||
if (!has_column && command.if_exists)
|
||||
command.ignore = true;
|
||||
}
|
||||
else if (command.type == AlterCommand::MODIFY_ORDER_BY)
|
||||
{
|
||||
if (ast_to_str(command.order_by) == ast_to_str(metadata.sorting_key.definition_ast))
|
||||
command.ignore = true;
|
||||
}
|
||||
}
|
||||
|
||||
prepared = true;
|
||||
|
@ -119,27 +119,16 @@ struct ReadBufferFromHDFS::ReadBufferFromHDFSImpl : public BufferWithOwnMemory<S
|
||||
return false;
|
||||
}
|
||||
|
||||
ResourceGuard rlock(read_settings.resource_link, num_bytes_to_read);
|
||||
int bytes_read;
|
||||
try
|
||||
{
|
||||
bytes_read = hdfsRead(fs.get(), fin, internal_buffer.begin(), safe_cast<int>(num_bytes_to_read));
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
read_settings.resource_link.accumulate(num_bytes_to_read); // We assume no resource was used in case of failure
|
||||
throw;
|
||||
}
|
||||
rlock.unlock();
|
||||
ResourceGuard rlock(ResourceGuard::Metrics::getIORead(), read_settings.io_scheduling.read_resource_link, num_bytes_to_read);
|
||||
int bytes_read = hdfsRead(fs.get(), fin, internal_buffer.begin(), safe_cast<int>(num_bytes_to_read));
|
||||
rlock.unlock(std::max(0, bytes_read));
|
||||
|
||||
if (bytes_read < 0)
|
||||
{
|
||||
read_settings.resource_link.accumulate(num_bytes_to_read); // We assume no resource was used in case of failure
|
||||
throw Exception(ErrorCodes::NETWORK_ERROR,
|
||||
"Fail to read from HDFS: {}, file path: {}. Error: {}",
|
||||
hdfs_uri, hdfs_file_path, std::string(hdfsGetLastError()));
|
||||
}
|
||||
read_settings.resource_link.adjust(num_bytes_to_read, bytes_read);
|
||||
|
||||
if (bytes_read)
|
||||
{
|
||||
|
@ -66,25 +66,12 @@ struct WriteBufferFromHDFS::WriteBufferFromHDFSImpl
|
||||
|
||||
int write(const char * start, size_t size)
|
||||
{
|
||||
ResourceGuard rlock(write_settings.resource_link, size);
|
||||
int bytes_written;
|
||||
try
|
||||
{
|
||||
bytes_written = hdfsWrite(fs.get(), fout, start, safe_cast<int>(size));
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
write_settings.resource_link.accumulate(size); // We assume no resource was used in case of failure
|
||||
throw;
|
||||
}
|
||||
rlock.unlock();
|
||||
ResourceGuard rlock(ResourceGuard::Metrics::getIOWrite(), write_settings.io_scheduling.write_resource_link, size);
|
||||
int bytes_written = hdfsWrite(fs.get(), fout, start, safe_cast<int>(size));
|
||||
rlock.unlock(std::max(0, bytes_written));
|
||||
|
||||
if (bytes_written < 0)
|
||||
{
|
||||
write_settings.resource_link.accumulate(size); // We assume no resource was used in case of failure
|
||||
throw Exception(ErrorCodes::NETWORK_ERROR, "Fail to write HDFS file: {} {}", hdfs_uri, std::string(hdfsGetLastError()));
|
||||
}
|
||||
write_settings.resource_link.adjust(size, bytes_written);
|
||||
|
||||
if (write_settings.remote_throttler)
|
||||
write_settings.remote_throttler->add(bytes_written, ProfileEvents::RemoteWriteThrottlerBytes, ProfileEvents::RemoteWriteThrottlerSleepMicroseconds);
|
||||
|
@ -126,6 +126,7 @@ void listFilesWithRegexpMatchingImpl(
|
||||
/// Otherwise it will not allow to work with symlinks in `user_files_path` directory.
|
||||
fs::canonical(path_for_ls + for_match);
|
||||
fs::path absolute_path = fs::absolute(path_for_ls + for_match);
|
||||
absolute_path = absolute_path.lexically_normal(); /// ensure that the resulting path is normalized (e.g., removes any redundant slashes or . and .. segments)
|
||||
result.push_back(absolute_path.string());
|
||||
}
|
||||
catch (const std::exception &) // NOLINT
|
||||
|
@ -221,14 +221,17 @@ void StorageReplicatedMergeTree::setZooKeeper()
|
||||
/// strange effects. So we always use only one session for all tables.
|
||||
/// (excluding auxiliary zookeepers)
|
||||
|
||||
std::lock_guard lock(current_zookeeper_mutex);
|
||||
if (zookeeper_name == default_zookeeper_name)
|
||||
{
|
||||
current_zookeeper = getContext()->getZooKeeper();
|
||||
auto new_keeper = getContext()->getZooKeeper();
|
||||
std::lock_guard lock(current_zookeeper_mutex);
|
||||
current_zookeeper = new_keeper;
|
||||
}
|
||||
else
|
||||
{
|
||||
current_zookeeper = getContext()->getAuxiliaryZooKeeper(zookeeper_name);
|
||||
auto new_keeper = getContext()->getAuxiliaryZooKeeper(zookeeper_name);
|
||||
std::lock_guard lock(current_zookeeper_mutex);
|
||||
current_zookeeper = new_keeper;
|
||||
}
|
||||
}
|
||||
|
||||
@ -365,7 +368,7 @@ StorageReplicatedMergeTree::StorageReplicatedMergeTree(
|
||||
bool has_zookeeper = getContext()->hasZooKeeper() || getContext()->hasAuxiliaryZooKeeper(zookeeper_name);
|
||||
if (has_zookeeper)
|
||||
{
|
||||
/// It's possible for getZooKeeper() to timeout if zookeeper host(s) can't
|
||||
/// It's possible for getZooKeeper() to timeout if zookeeper host(s) can't
|
||||
/// be reached. In such cases Poco::Exception is thrown after a connection
|
||||
/// timeout - refer to src/Common/ZooKeeper/ZooKeeperImpl.cpp:866 for more info.
|
||||
///
|
||||
|
@ -288,7 +288,7 @@ def generate_description(item: PullRequest, repo: Repository) -> Optional[Descri
|
||||
# Normalize bug fixes
|
||||
if (
|
||||
re.match(
|
||||
r".*(?i)bug\Wfix",
|
||||
r"(?i).*bug\Wfix",
|
||||
category,
|
||||
)
|
||||
# Map "Critical Bug Fix" to "Bug fix" category for changelog
|
||||
|
@ -427,6 +427,7 @@ class CI:
|
||||
pr_only=True,
|
||||
# TODO: approach with reference job names does not work because digest may not be calculated if job skipped in wf
|
||||
# reference_job_name=JobNames.INTEGRATION_TEST_TSAN,
|
||||
timeout=4 * 3600, # to be able to process many updated tests
|
||||
),
|
||||
JobNames.COMPATIBILITY_TEST: CommonJobConfigs.COMPATIBILITY_TEST.with_properties(
|
||||
required_builds=[BuildNames.PACKAGE_RELEASE],
|
||||
|
@ -33,7 +33,7 @@ CLICKHOUSE_BINARY_PATH = "usr/bin/clickhouse"
|
||||
CLICKHOUSE_ODBC_BRIDGE_BINARY_PATH = "usr/bin/clickhouse-odbc-bridge"
|
||||
CLICKHOUSE_LIBRARY_BRIDGE_BINARY_PATH = "usr/bin/clickhouse-library-bridge"
|
||||
|
||||
FLAKY_TRIES_COUNT = 10 # run whole pytest several times
|
||||
FLAKY_TRIES_COUNT = 3 # run whole pytest several times
|
||||
FLAKY_REPEAT_COUNT = 5 # runs test case in single module several times
|
||||
MAX_TIME_SECONDS = 3600
|
||||
|
||||
@ -782,47 +782,71 @@ class ClickhouseIntegrationTestsRunner:
|
||||
logging.info("Found '%s' tests to run", " ".join(tests_to_run))
|
||||
result_state = "success"
|
||||
description_prefix = "No flaky tests: "
|
||||
start = time.time()
|
||||
logging.info("Starting check with retries")
|
||||
final_retry = 0
|
||||
logs = []
|
||||
tries_num = 1 if should_fail else FLAKY_TRIES_COUNT
|
||||
for i in range(tries_num):
|
||||
if timeout_expired:
|
||||
print("Timeout expired - break flaky check execution")
|
||||
break
|
||||
final_retry += 1
|
||||
logging.info("Running tests for the %s time", i)
|
||||
counters, tests_times, log_paths = self.try_run_test_group(
|
||||
repo_path,
|
||||
"bugfix" if should_fail else "flaky",
|
||||
tests_to_run,
|
||||
1,
|
||||
1,
|
||||
FLAKY_REPEAT_COUNT,
|
||||
)
|
||||
logs += log_paths
|
||||
if counters["FAILED"]:
|
||||
logging.info("Found failed tests: %s", " ".join(counters["FAILED"]))
|
||||
description_prefix = "Failed tests found: "
|
||||
result_state = "failure"
|
||||
if not should_fail:
|
||||
counters = {
|
||||
"ERROR": [],
|
||||
"PASSED": [],
|
||||
"FAILED": [],
|
||||
"SKIPPED": [],
|
||||
"BROKEN": [],
|
||||
"NOT_FAILED": [],
|
||||
} # type: Dict
|
||||
tests_times = defaultdict(float) # type: Dict
|
||||
tests_log_paths = defaultdict(list)
|
||||
id_counter = 0
|
||||
for test_to_run in tests_to_run:
|
||||
tries_num = 1 if should_fail else FLAKY_TRIES_COUNT
|
||||
for i in range(tries_num):
|
||||
if timeout_expired:
|
||||
print("Timeout expired - break flaky check execution")
|
||||
break
|
||||
if counters["ERROR"]:
|
||||
description_prefix = "Failed tests found: "
|
||||
logging.info("Found error tests: %s", " ".join(counters["ERROR"]))
|
||||
# NOTE "error" result state will restart the whole test task,
|
||||
# so we use "failure" here
|
||||
result_state = "failure"
|
||||
if not should_fail:
|
||||
final_retry += 1
|
||||
logging.info("Running tests for the %s time", i)
|
||||
group_counters, group_test_times, log_paths = self.try_run_test_group(
|
||||
repo_path,
|
||||
f"bugfix_{id_counter}" if should_fail else f"flaky{id_counter}",
|
||||
[test_to_run],
|
||||
1,
|
||||
1,
|
||||
FLAKY_REPEAT_COUNT,
|
||||
)
|
||||
id_counter = id_counter + 1
|
||||
for counter, value in group_counters.items():
|
||||
logging.info(
|
||||
"Tests from group %s stats, %s count %s",
|
||||
test_to_run,
|
||||
counter,
|
||||
len(value),
|
||||
)
|
||||
counters[counter] += value
|
||||
|
||||
for test_name, test_time in group_test_times.items():
|
||||
tests_times[test_name] = test_time
|
||||
tests_log_paths[test_name] = log_paths
|
||||
if not should_fail and (
|
||||
group_counters["FAILED"] or group_counters["ERROR"]
|
||||
):
|
||||
logging.info(
|
||||
"Unexpected failure in group %s. Fail fast for current group",
|
||||
test_to_run,
|
||||
)
|
||||
break
|
||||
logging.info("Try is OK, all tests passed, going to clear env")
|
||||
clear_ip_tables_and_restart_daemons()
|
||||
logging.info("And going to sleep for some time")
|
||||
if time.time() - start > MAX_TIME_SECONDS:
|
||||
logging.info("Timeout reached, going to finish flaky check")
|
||||
break
|
||||
time.sleep(5)
|
||||
|
||||
if counters["FAILED"]:
|
||||
logging.info("Found failed tests: %s", " ".join(counters["FAILED"]))
|
||||
description_prefix = "Failed tests found: "
|
||||
result_state = "failure"
|
||||
if counters["ERROR"]:
|
||||
description_prefix = "Failed tests found: "
|
||||
logging.info("Found error tests: %s", " ".join(counters["ERROR"]))
|
||||
# NOTE "error" result state will restart the whole test task,
|
||||
# so we use "failure" here
|
||||
result_state = "failure"
|
||||
logging.info("Try is OK, all tests passed, going to clear env")
|
||||
clear_ip_tables_and_restart_daemons()
|
||||
logging.info("And going to sleep for some time")
|
||||
time.sleep(5)
|
||||
|
||||
test_result = []
|
||||
for state in ("ERROR", "FAILED", "PASSED", "SKIPPED"):
|
||||
@ -833,13 +857,10 @@ class ClickhouseIntegrationTestsRunner:
|
||||
else:
|
||||
text_state = state
|
||||
test_result += [
|
||||
(
|
||||
c + " (✕" + str(final_retry) + ")",
|
||||
text_state,
|
||||
f"{tests_times[c]:.2f}",
|
||||
)
|
||||
(c, text_state, f"{tests_times[c]:.2f}", tests_log_paths[c])
|
||||
for c in counters[state]
|
||||
]
|
||||
|
||||
status_text = description_prefix + ", ".join(
|
||||
[
|
||||
str(n).lower().replace("failed", "fail") + ": " + str(len(c))
|
||||
@ -847,26 +868,50 @@ class ClickhouseIntegrationTestsRunner:
|
||||
]
|
||||
)
|
||||
|
||||
return result_state, status_text, test_result, logs
|
||||
return result_state, status_text, test_result, tests_log_paths
|
||||
|
||||
def run_impl(self, repo_path, build_path):
|
||||
stopwatch = Stopwatch()
|
||||
if self.flaky_check or self.bugfix_validate_check:
|
||||
return self.run_flaky_check(
|
||||
repo_path, build_path, should_fail=self.bugfix_validate_check
|
||||
result_state, status_text, test_result, tests_log_paths = (
|
||||
self.run_flaky_check(
|
||||
repo_path, build_path, should_fail=self.bugfix_validate_check
|
||||
)
|
||||
)
|
||||
else:
|
||||
result_state, status_text, test_result, tests_log_paths = (
|
||||
self.run_normal_check(build_path, repo_path)
|
||||
)
|
||||
|
||||
self._install_clickhouse(build_path)
|
||||
if self.soft_deadline_time < time.time():
|
||||
status_text = "Timeout, " + status_text
|
||||
result_state = "failure"
|
||||
|
||||
if timeout_expired:
|
||||
logging.error(
|
||||
"Job killed by external timeout signal - setting status to failure!"
|
||||
)
|
||||
status_text = "Job timeout expired, " + status_text
|
||||
result_state = "failure"
|
||||
# add mock test case to make timeout visible in job report and in ci db
|
||||
test_result.insert(
|
||||
0, (JOB_TIMEOUT_TEST_NAME, "FAIL", f"{stopwatch.duration_seconds}", "")
|
||||
)
|
||||
|
||||
if "(memory)" in self.params["context_name"]:
|
||||
result_state = "success"
|
||||
|
||||
return result_state, status_text, test_result, tests_log_paths
|
||||
|
||||
def run_normal_check(self, build_path, repo_path):
|
||||
self._install_clickhouse(build_path)
|
||||
logging.info("Pulling images")
|
||||
self._pre_pull_images(repo_path)
|
||||
|
||||
logging.info(
|
||||
"Dump iptables before run %s",
|
||||
subprocess.check_output("sudo iptables -nvL", shell=True),
|
||||
)
|
||||
all_tests = self._get_all_tests(repo_path)
|
||||
|
||||
if self.run_by_hash_total != 0:
|
||||
grouped_tests = self.group_test_by_file(all_tests)
|
||||
all_filtered_by_hash_tests = []
|
||||
@ -874,7 +919,6 @@ class ClickhouseIntegrationTestsRunner:
|
||||
if stringhash(group) % self.run_by_hash_total == self.run_by_hash_num:
|
||||
all_filtered_by_hash_tests += tests_in_group
|
||||
all_tests = all_filtered_by_hash_tests
|
||||
|
||||
parallel_skip_tests = self._get_parallel_tests_skip_list(repo_path)
|
||||
logging.info(
|
||||
"Found %s tests first 3 %s", len(all_tests), " ".join(all_tests[:3])
|
||||
@ -906,14 +950,12 @@ class ClickhouseIntegrationTestsRunner:
|
||||
len(not_found_tests),
|
||||
" ".join(not_found_tests[:3]),
|
||||
)
|
||||
|
||||
grouped_tests = self.group_test_by_file(filtered_sequential_tests)
|
||||
i = 0
|
||||
for par_group in chunks(filtered_parallel_tests, PARALLEL_GROUP_SIZE):
|
||||
grouped_tests[f"parallel{i}"] = par_group
|
||||
i += 1
|
||||
logging.info("Found %s tests groups", len(grouped_tests))
|
||||
|
||||
counters = {
|
||||
"ERROR": [],
|
||||
"PASSED": [],
|
||||
@ -924,14 +966,11 @@ class ClickhouseIntegrationTestsRunner:
|
||||
} # type: Dict
|
||||
tests_times = defaultdict(float)
|
||||
tests_log_paths = defaultdict(list)
|
||||
|
||||
items_to_run = list(grouped_tests.items())
|
||||
|
||||
logging.info("Total test groups %s", len(items_to_run))
|
||||
if self.shuffle_test_groups():
|
||||
logging.info("Shuffling test groups")
|
||||
random.shuffle(items_to_run)
|
||||
|
||||
for group, tests in items_to_run:
|
||||
if timeout_expired:
|
||||
print("Timeout expired - break tests execution")
|
||||
@ -959,7 +998,6 @@ class ClickhouseIntegrationTestsRunner:
|
||||
if len(counters["FAILED"]) + len(counters["ERROR"]) >= 20:
|
||||
logging.info("Collected more than 20 failed/error tests, stopping")
|
||||
break
|
||||
|
||||
if counters["FAILED"] or counters["ERROR"]:
|
||||
logging.info(
|
||||
"Overall status failure, because we have tests in FAILED or ERROR state"
|
||||
@ -968,7 +1006,6 @@ class ClickhouseIntegrationTestsRunner:
|
||||
else:
|
||||
logging.info("Overall success!")
|
||||
result_state = "success"
|
||||
|
||||
test_result = []
|
||||
for state in (
|
||||
"ERROR",
|
||||
@ -988,33 +1025,14 @@ class ClickhouseIntegrationTestsRunner:
|
||||
(c, text_state, f"{tests_times[c]:.2f}", tests_log_paths[c])
|
||||
for c in counters[state]
|
||||
]
|
||||
|
||||
failed_sum = len(counters["FAILED"]) + len(counters["ERROR"])
|
||||
status_text = f"fail: {failed_sum}, passed: {len(counters['PASSED'])}"
|
||||
|
||||
if self.soft_deadline_time < time.time():
|
||||
status_text = "Timeout, " + status_text
|
||||
result_state = "failure"
|
||||
|
||||
if timeout_expired:
|
||||
logging.error(
|
||||
"Job killed by external timeout signal - setting status to failure!"
|
||||
)
|
||||
status_text = "Job timeout expired, " + status_text
|
||||
result_state = "failure"
|
||||
# add mock test case to make timeout visible in job report and in ci db
|
||||
test_result.insert(
|
||||
0, (JOB_TIMEOUT_TEST_NAME, "FAIL", f"{stopwatch.duration_seconds}", "")
|
||||
)
|
||||
|
||||
if not counters or sum(len(counter) for counter in counters.values()) == 0:
|
||||
status_text = "No tests found for some reason! It's a bug"
|
||||
result_state = "failure"
|
||||
|
||||
if "(memory)" in self.params["context_name"]:
|
||||
result_state = "success"
|
||||
|
||||
return result_state, status_text, test_result, []
|
||||
return result_state, status_text, test_result, tests_log_paths
|
||||
|
||||
|
||||
def write_results(results_file, status_file, results, status):
|
||||
@ -1047,7 +1065,9 @@ def run():
|
||||
logging.info("Clearing dmesg before run")
|
||||
subprocess.check_call("sudo -E dmesg --clear", shell=True)
|
||||
|
||||
state, description, test_results, _ = runner.run_impl(repo_path, build_path)
|
||||
state, description, test_results, _test_log_paths = runner.run_impl(
|
||||
repo_path, build_path
|
||||
)
|
||||
logging.info("Tests finished")
|
||||
|
||||
if IS_CI:
|
||||
|
@ -245,9 +245,9 @@ class PostgresManager:
|
||||
):
|
||||
postgres_database = self.database_or_default(postgres_database)
|
||||
self.created_materialized_postgres_db_list.add(materialized_database)
|
||||
self.instance.query(f"DROP DATABASE IF EXISTS {materialized_database}")
|
||||
self.instance.query(f"DROP DATABASE IF EXISTS `{materialized_database}`")
|
||||
|
||||
create_query = f"CREATE DATABASE {materialized_database} ENGINE = MaterializedPostgreSQL('{ip}:{port}', '{postgres_database}', '{user}', '{password}')"
|
||||
create_query = f"CREATE DATABASE `{materialized_database}` ENGINE = MaterializedPostgreSQL('{ip}:{port}', '{postgres_database}', '{user}', '{password}')"
|
||||
if len(settings) > 0:
|
||||
create_query += " SETTINGS "
|
||||
for i in range(len(settings)):
|
||||
@ -259,7 +259,7 @@ class PostgresManager:
|
||||
assert materialized_database in self.instance.query("SHOW DATABASES")
|
||||
|
||||
def drop_materialized_db(self, materialized_database="test_database"):
|
||||
self.instance.query(f"DROP DATABASE IF EXISTS {materialized_database} SYNC")
|
||||
self.instance.query(f"DROP DATABASE IF EXISTS `{materialized_database}` SYNC")
|
||||
if materialized_database in self.created_materialized_postgres_db_list:
|
||||
self.created_materialized_postgres_db_list.remove(materialized_database)
|
||||
|
||||
@ -329,11 +329,15 @@ def assert_nested_table_is_created(
|
||||
table = schema_name + "." + table_name
|
||||
|
||||
print(f"Checking table {table} exists in {materialized_database}")
|
||||
database_tables = instance.query(f"SHOW TABLES FROM {materialized_database}")
|
||||
database_tables = instance.query(
|
||||
f"SHOW TABLES FROM `{materialized_database}` WHERE name = '{table}'"
|
||||
)
|
||||
|
||||
while table not in database_tables:
|
||||
time.sleep(0.2)
|
||||
database_tables = instance.query(f"SHOW TABLES FROM {materialized_database}")
|
||||
database_tables = instance.query(
|
||||
f"SHOW TABLES FROM `{materialized_database}` WHERE name = '{table}'"
|
||||
)
|
||||
|
||||
assert table in database_tables
|
||||
|
||||
@ -366,9 +370,9 @@ def check_tables_are_synchronized(
|
||||
|
||||
table_path = ""
|
||||
if len(schema_name) == 0:
|
||||
table_path = f"{materialized_database}.{table_name}"
|
||||
table_path = f"`{materialized_database}`.`{table_name}`"
|
||||
else:
|
||||
table_path = f"{materialized_database}.`{schema_name}.{table_name}`"
|
||||
table_path = f"`{materialized_database}`.`{schema_name}.{table_name}`"
|
||||
|
||||
print(f"Checking table is synchronized: {table_path}")
|
||||
result_query = f"select * from {table_path} order by {order_by};"
|
||||
|
@ -73,3 +73,8 @@ def test_default_database_on_cluster(started_cluster):
|
||||
database="test_default_database",
|
||||
sql="SHOW CREATE test_local_table FORMAT TSV",
|
||||
).endswith("old_parts_lifetime = 100\n")
|
||||
|
||||
ch1.query(
|
||||
database="test_default_database",
|
||||
sql="DROP TABLE test_local_table ON CLUSTER 'cluster' SYNC",
|
||||
)
|
||||
|
@ -80,3 +80,6 @@ def test_replica_always_download(started_cluster):
|
||||
|
||||
assert int(node1_parts) < 10
|
||||
assert int(node2_parts) < 10
|
||||
|
||||
node1.query_with_retry("DROP TABLE test_table SYNC")
|
||||
node2.query_with_retry("DROP TABLE test_table SYNC")
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user