mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-22 15:42:02 +00:00
Merge branch 'master' into fix-typo-in-actionsdag
This commit is contained in:
commit
1b29a89107
@ -19,6 +19,8 @@
|
||||
|
||||
|
||||
#include <ios>
|
||||
#include <memory>
|
||||
#include <functional>
|
||||
#include "Poco/Any.h"
|
||||
#include "Poco/Buffer.h"
|
||||
#include "Poco/Exception.h"
|
||||
@ -33,6 +35,27 @@ namespace Net
|
||||
{
|
||||
|
||||
|
||||
class IHTTPSessionDataHooks
|
||||
/// Interface to control stream of data bytes being sent or received though socket by HTTPSession
|
||||
/// It allows to monitor, throttle and schedule data streams with syscall granulatrity
|
||||
{
|
||||
public:
|
||||
virtual ~IHTTPSessionDataHooks() = default;
|
||||
|
||||
virtual void atStart(int bytes) = 0;
|
||||
/// Called before sending/receiving data `bytes` to/from socket.
|
||||
|
||||
virtual void atFinish(int bytes) = 0;
|
||||
/// Called when sending/receiving of data `bytes` is successfully finished.
|
||||
|
||||
virtual void atFail() = 0;
|
||||
/// If an error occurred during send/receive `fail()` is called instead of `finish()`.
|
||||
};
|
||||
|
||||
|
||||
using HTTPSessionDataHooksPtr = std::shared_ptr<IHTTPSessionDataHooks>;
|
||||
|
||||
|
||||
class Net_API HTTPSession
|
||||
/// HTTPSession implements basic HTTP session management
|
||||
/// for both HTTP clients and HTTP servers.
|
||||
@ -73,6 +96,12 @@ namespace Net
|
||||
Poco::Timespan getReceiveTimeout() const;
|
||||
/// Returns receive timeout for the HTTP session.
|
||||
|
||||
void setSendDataHooks(const HTTPSessionDataHooksPtr & sendDataHooks = {});
|
||||
/// Sets data hooks that will be called on every sent to the socket.
|
||||
|
||||
void setReceiveDataHooks(const HTTPSessionDataHooksPtr & receiveDataHooks = {});
|
||||
/// Sets data hooks that will be called on every receive from the socket.
|
||||
|
||||
bool connected() const;
|
||||
/// Returns true if the underlying socket is connected.
|
||||
|
||||
@ -211,6 +240,10 @@ namespace Net
|
||||
Poco::Exception * _pException;
|
||||
Poco::Any _data;
|
||||
|
||||
// Data hooks
|
||||
HTTPSessionDataHooksPtr _sendDataHooks;
|
||||
HTTPSessionDataHooksPtr _receiveDataHooks;
|
||||
|
||||
friend class HTTPStreamBuf;
|
||||
friend class HTTPHeaderStreamBuf;
|
||||
friend class HTTPFixedLengthStreamBuf;
|
||||
@ -246,6 +279,16 @@ namespace Net
|
||||
return _receiveTimeout;
|
||||
}
|
||||
|
||||
inline void HTTPSession::setSendDataHooks(const HTTPSessionDataHooksPtr & sendDataHooks)
|
||||
{
|
||||
_sendDataHooks = sendDataHooks;
|
||||
}
|
||||
|
||||
inline void HTTPSession::setReceiveDataHooks(const HTTPSessionDataHooksPtr & receiveDataHooks)
|
||||
{
|
||||
_receiveDataHooks = receiveDataHooks;
|
||||
}
|
||||
|
||||
inline StreamSocket & HTTPSession::socket()
|
||||
{
|
||||
return _socket;
|
||||
|
@ -128,14 +128,14 @@ int HTTPSession::get()
|
||||
{
|
||||
if (_pCurrent == _pEnd)
|
||||
refill();
|
||||
|
||||
|
||||
if (_pCurrent < _pEnd)
|
||||
return *_pCurrent++;
|
||||
else
|
||||
return std::char_traits<char>::eof();
|
||||
}
|
||||
|
||||
|
||||
|
||||
int HTTPSession::peek()
|
||||
{
|
||||
if (_pCurrent == _pEnd)
|
||||
@ -147,7 +147,7 @@ int HTTPSession::peek()
|
||||
return std::char_traits<char>::eof();
|
||||
}
|
||||
|
||||
|
||||
|
||||
int HTTPSession::read(char* buffer, std::streamsize length)
|
||||
{
|
||||
if (_pCurrent < _pEnd)
|
||||
@ -166,10 +166,17 @@ int HTTPSession::write(const char* buffer, std::streamsize length)
|
||||
{
|
||||
try
|
||||
{
|
||||
return _socket.sendBytes(buffer, (int) length);
|
||||
if (_sendDataHooks)
|
||||
_sendDataHooks->atStart((int) length);
|
||||
int result = _socket.sendBytes(buffer, (int) length);
|
||||
if (_sendDataHooks)
|
||||
_sendDataHooks->atFinish(result);
|
||||
return result;
|
||||
}
|
||||
catch (Poco::Exception& exc)
|
||||
{
|
||||
if (_sendDataHooks)
|
||||
_sendDataHooks->atFail();
|
||||
setException(exc);
|
||||
throw;
|
||||
}
|
||||
@ -180,10 +187,17 @@ int HTTPSession::receive(char* buffer, int length)
|
||||
{
|
||||
try
|
||||
{
|
||||
return _socket.receiveBytes(buffer, length);
|
||||
if (_receiveDataHooks)
|
||||
_receiveDataHooks->atStart(length);
|
||||
int result = _socket.receiveBytes(buffer, length);
|
||||
if (_receiveDataHooks)
|
||||
_receiveDataHooks->atFinish(result);
|
||||
return result;
|
||||
}
|
||||
catch (Poco::Exception& exc)
|
||||
{
|
||||
if (_receiveDataHooks)
|
||||
_receiveDataHooks->atFail();
|
||||
setException(exc);
|
||||
throw;
|
||||
}
|
||||
|
@ -63,7 +63,7 @@ bool checkIsBrokenTimeout()
|
||||
|
||||
SocketImpl::SocketImpl():
|
||||
_sockfd(POCO_INVALID_SOCKET),
|
||||
_blocking(true),
|
||||
_blocking(true),
|
||||
_isBrokenTimeout(checkIsBrokenTimeout())
|
||||
{
|
||||
}
|
||||
@ -82,7 +82,7 @@ SocketImpl::~SocketImpl()
|
||||
close();
|
||||
}
|
||||
|
||||
|
||||
|
||||
SocketImpl* SocketImpl::acceptConnection(SocketAddress& clientAddr)
|
||||
{
|
||||
if (_sockfd == POCO_INVALID_SOCKET) throw InvalidSocketException();
|
||||
@ -118,7 +118,7 @@ void SocketImpl::connect(const SocketAddress& address)
|
||||
rc = ::connect(_sockfd, address.addr(), address.length());
|
||||
}
|
||||
while (rc != 0 && lastError() == POCO_EINTR);
|
||||
if (rc != 0)
|
||||
if (rc != 0)
|
||||
{
|
||||
int err = lastError();
|
||||
error(err, address.toString());
|
||||
@ -205,7 +205,7 @@ void SocketImpl::bind6(const SocketAddress& address, bool reuseAddress, bool reu
|
||||
#if defined(POCO_HAVE_IPv6)
|
||||
if (address.family() != SocketAddress::IPv6)
|
||||
throw Poco::InvalidArgumentException("SocketAddress must be an IPv6 address");
|
||||
|
||||
|
||||
if (_sockfd == POCO_INVALID_SOCKET)
|
||||
{
|
||||
init(address.af());
|
||||
@ -226,11 +226,11 @@ void SocketImpl::bind6(const SocketAddress& address, bool reuseAddress, bool reu
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
|
||||
void SocketImpl::listen(int backlog)
|
||||
{
|
||||
if (_sockfd == POCO_INVALID_SOCKET) throw InvalidSocketException();
|
||||
|
||||
|
||||
int rc = ::listen(_sockfd, backlog);
|
||||
if (rc != 0) error();
|
||||
}
|
||||
@ -254,7 +254,7 @@ void SocketImpl::shutdownReceive()
|
||||
if (rc != 0) error();
|
||||
}
|
||||
|
||||
|
||||
|
||||
void SocketImpl::shutdownSend()
|
||||
{
|
||||
if (_sockfd == POCO_INVALID_SOCKET) throw InvalidSocketException();
|
||||
@ -263,7 +263,7 @@ void SocketImpl::shutdownSend()
|
||||
if (rc != 0) error();
|
||||
}
|
||||
|
||||
|
||||
|
||||
void SocketImpl::shutdown()
|
||||
{
|
||||
if (_sockfd == POCO_INVALID_SOCKET) throw InvalidSocketException();
|
||||
@ -318,7 +318,7 @@ int SocketImpl::receiveBytes(void* buffer, int length, int flags)
|
||||
throw TimeoutException();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
int rc;
|
||||
do
|
||||
{
|
||||
@ -326,7 +326,7 @@ int SocketImpl::receiveBytes(void* buffer, int length, int flags)
|
||||
rc = ::recv(_sockfd, reinterpret_cast<char*>(buffer), length, flags);
|
||||
}
|
||||
while (blocking && rc < 0 && lastError() == POCO_EINTR);
|
||||
if (rc < 0)
|
||||
if (rc < 0)
|
||||
{
|
||||
int err = lastError();
|
||||
if ((err == POCO_EAGAIN || err == POCO_EWOULDBLOCK) && !blocking)
|
||||
@ -364,7 +364,7 @@ int SocketImpl::receiveFrom(void* buffer, int length, SocketAddress& address, in
|
||||
throw TimeoutException();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
sockaddr_storage abuffer;
|
||||
struct sockaddr* pSA = reinterpret_cast<struct sockaddr*>(&abuffer);
|
||||
poco_socklen_t saLen = sizeof(abuffer);
|
||||
@ -451,7 +451,7 @@ bool SocketImpl::pollImpl(Poco::Timespan& remainingTime, int mode)
|
||||
}
|
||||
while (rc < 0 && lastError() == POCO_EINTR);
|
||||
if (rc < 0) error();
|
||||
return rc > 0;
|
||||
return rc > 0;
|
||||
|
||||
#else
|
||||
|
||||
@ -494,7 +494,7 @@ bool SocketImpl::pollImpl(Poco::Timespan& remainingTime, int mode)
|
||||
}
|
||||
while (rc < 0 && errorCode == POCO_EINTR);
|
||||
if (rc < 0) error(errorCode);
|
||||
return rc > 0;
|
||||
return rc > 0;
|
||||
|
||||
#endif // POCO_HAVE_FD_POLL
|
||||
}
|
||||
@ -504,13 +504,13 @@ bool SocketImpl::poll(const Poco::Timespan& timeout, int mode)
|
||||
Poco::Timespan remainingTime(timeout);
|
||||
return pollImpl(remainingTime, mode);
|
||||
}
|
||||
|
||||
|
||||
void SocketImpl::setSendBufferSize(int size)
|
||||
{
|
||||
setOption(SOL_SOCKET, SO_SNDBUF, size);
|
||||
}
|
||||
|
||||
|
||||
|
||||
int SocketImpl::getSendBufferSize()
|
||||
{
|
||||
int result;
|
||||
@ -524,7 +524,7 @@ void SocketImpl::setReceiveBufferSize(int size)
|
||||
setOption(SOL_SOCKET, SO_RCVBUF, size);
|
||||
}
|
||||
|
||||
|
||||
|
||||
int SocketImpl::getReceiveBufferSize()
|
||||
{
|
||||
int result;
|
||||
@ -570,7 +570,7 @@ Poco::Timespan SocketImpl::getReceiveTimeout()
|
||||
return result;
|
||||
}
|
||||
|
||||
|
||||
|
||||
SocketAddress SocketImpl::address()
|
||||
{
|
||||
if (_sockfd == POCO_INVALID_SOCKET) throw InvalidSocketException();
|
||||
@ -581,7 +581,7 @@ SocketAddress SocketImpl::address()
|
||||
int rc = ::getsockname(_sockfd, pSA, &saLen);
|
||||
if (rc == 0)
|
||||
return SocketAddress(pSA, saLen);
|
||||
else
|
||||
else
|
||||
error();
|
||||
return SocketAddress();
|
||||
}
|
||||
|
@ -13,7 +13,8 @@ entry="/usr/share/clickhouse-test/performance/scripts/entrypoint.sh"
|
||||
# https://www.kernel.org/doc/Documentation/filesystems/tmpfs.txt
|
||||
# Double-escaped backslashes are a tribute to the engineering wonder of docker --
|
||||
# it gives '/bin/sh: 1: [bash,: not found' otherwise.
|
||||
numactl --hardware
|
||||
echo > compare.log
|
||||
numactl --hardware | tee -a compare.log
|
||||
node=$(( RANDOM % $(numactl --hardware | sed -n 's/^.*available:\(.*\)nodes.*$/\1/p') ));
|
||||
echo Will bind to NUMA node $node;
|
||||
echo Will bind to NUMA node $node | tee -a compare.log
|
||||
numactl --cpunodebind=$node --membind=$node $entry
|
||||
|
32
docs/changelogs/v24.3.10.33-lts.md
Normal file
32
docs/changelogs/v24.3.10.33-lts.md
Normal file
@ -0,0 +1,32 @@
|
||||
---
|
||||
sidebar_position: 1
|
||||
sidebar_label: 2024
|
||||
---
|
||||
|
||||
# 2024 Changelog
|
||||
|
||||
### ClickHouse release v24.3.10.33-lts (37b6502ebf0) FIXME as compared to v24.3.9.5-lts (a939270465e)
|
||||
|
||||
#### Improvement
|
||||
* Backported in [#68870](https://github.com/ClickHouse/ClickHouse/issues/68870): Make allow_experimental_analyzer be controlled by the initiator for distributed queries. This ensures compatibility and correctness during operations in mixed version clusters. [#65777](https://github.com/ClickHouse/ClickHouse/pull/65777) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||
* Backported in [#69095](https://github.com/ClickHouse/ClickHouse/issues/69095): Support for the Spanish language in the embedded dictionaries. [#69035](https://github.com/ClickHouse/ClickHouse/pull/69035) ([Vasily Okunev](https://github.com/VOkunev)).
|
||||
|
||||
#### Bug Fix (user-visible misbehavior in an official stable release)
|
||||
* Backported in [#68995](https://github.com/ClickHouse/ClickHouse/issues/68995): Fix the upper bound of the function `fromModifiedJulianDay`. It was supposed to be `9999-12-31` but was mistakenly set to `9999-01-01`. [#67583](https://github.com/ClickHouse/ClickHouse/pull/67583) ([PHO](https://github.com/depressed-pho)).
|
||||
* Backported in [#68844](https://github.com/ClickHouse/ClickHouse/issues/68844): Fixed crash in Parquet filtering when data types in the file substantially differ from requested types (e.g. `... FROM file('a.parquet', Parquet, 'x String')`, but the file has `x Int64`). Without this fix, use `input_format_parquet_filter_push_down = 0` as a workaround. [#68131](https://github.com/ClickHouse/ClickHouse/pull/68131) ([Michael Kolupaev](https://github.com/al13n321)).
|
||||
* Backported in [#68881](https://github.com/ClickHouse/ClickHouse/issues/68881): Fixes [#50868](https://github.com/ClickHouse/ClickHouse/issues/50868). Small DateTime64 constant values returned by a nested subquery inside a distributed query were wrongly transformed to Nulls, thus causing errors and possible incorrect query results. [#68323](https://github.com/ClickHouse/ClickHouse/pull/68323) ([Shankar](https://github.com/shiyer7474)).
|
||||
* Backported in [#69054](https://github.com/ClickHouse/ClickHouse/issues/69054): Added back virtual columns ` _table` and `_database` to distributed tables. They were available until version 24.3. [#68672](https://github.com/ClickHouse/ClickHouse/pull/68672) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Backported in [#68856](https://github.com/ClickHouse/ClickHouse/issues/68856): Fix possible error `Size of permutation (0) is less than required (...)` during Variant column permutation. [#68681](https://github.com/ClickHouse/ClickHouse/pull/68681) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Backported in [#69152](https://github.com/ClickHouse/ClickHouse/issues/69152): Fix possible wrong result during anyHeavy state merge. [#68950](https://github.com/ClickHouse/ClickHouse/pull/68950) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Backported in [#69112](https://github.com/ClickHouse/ClickHouse/issues/69112): Fix logical error when we have empty async insert. [#69080](https://github.com/ClickHouse/ClickHouse/pull/69080) ([Han Fei](https://github.com/hanfei1991)).
|
||||
|
||||
#### NO CL CATEGORY
|
||||
|
||||
* Backported in [#68938](https://github.com/ClickHouse/ClickHouse/issues/68938):. [#68897](https://github.com/ClickHouse/ClickHouse/pull/68897) ([Alexander Gololobov](https://github.com/davenger)).
|
||||
|
||||
#### NOT FOR CHANGELOG / INSIGNIFICANT
|
||||
|
||||
* Backported in [#68826](https://github.com/ClickHouse/ClickHouse/issues/68826): Turn off fault injection for insert in `01396_inactive_replica_cleanup_nodes_zookeeper`. [#68715](https://github.com/ClickHouse/ClickHouse/pull/68715) ([alesapin](https://github.com/alesapin)).
|
||||
* Backported in [#68754](https://github.com/ClickHouse/ClickHouse/issues/68754): To make patch release possible from every commit on release branch, package_debug build is required and must not be skipped. [#68750](https://github.com/ClickHouse/ClickHouse/pull/68750) ([Max K.](https://github.com/maxknv)).
|
||||
* Backported in [#69044](https://github.com/ClickHouse/ClickHouse/issues/69044): Fix 01114_database_atomic flakiness. [#68930](https://github.com/ClickHouse/ClickHouse/pull/68930) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
|
29
docs/changelogs/v24.5.7.31-stable.md
Normal file
29
docs/changelogs/v24.5.7.31-stable.md
Normal file
@ -0,0 +1,29 @@
|
||||
---
|
||||
sidebar_position: 1
|
||||
sidebar_label: 2024
|
||||
---
|
||||
|
||||
# 2024 Changelog
|
||||
|
||||
### ClickHouse release v24.5.7.31-stable (6c185e9aec1) FIXME as compared to v24.5.6.45-stable (bdca8604c29)
|
||||
|
||||
#### Bug Fix (user-visible misbehavior in an official stable release)
|
||||
* Backported in [#68564](https://github.com/ClickHouse/ClickHouse/issues/68564): Fix indexHint function case found by fuzzer. [#66286](https://github.com/ClickHouse/ClickHouse/pull/66286) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Backported in [#68996](https://github.com/ClickHouse/ClickHouse/issues/68996): Fix the upper bound of the function `fromModifiedJulianDay`. It was supposed to be `9999-12-31` but was mistakenly set to `9999-01-01`. [#67583](https://github.com/ClickHouse/ClickHouse/pull/67583) ([PHO](https://github.com/depressed-pho)).
|
||||
* Backported in [#68865](https://github.com/ClickHouse/ClickHouse/issues/68865): Fixed crash in Parquet filtering when data types in the file substantially differ from requested types (e.g. `... FROM file('a.parquet', Parquet, 'x String')`, but the file has `x Int64`). Without this fix, use `input_format_parquet_filter_push_down = 0` as a workaround. [#68131](https://github.com/ClickHouse/ClickHouse/pull/68131) ([Michael Kolupaev](https://github.com/al13n321)).
|
||||
* Backported in [#69004](https://github.com/ClickHouse/ClickHouse/issues/69004): After https://github.com/ClickHouse/ClickHouse/pull/61984 `schema_inference_make_columns_nullable=0` still can make columns `Nullable` in Parquet/Arrow formats. The change was backward incompatible and users noticed the changes in the behaviour. This PR makes `schema_inference_make_columns_nullable=0` to work as before (no Nullable columns will be inferred) and introduces new value `auto` for this setting that will make columns `Nullable` only if data has information about nullability. [#68298](https://github.com/ClickHouse/ClickHouse/pull/68298) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Backported in [#68882](https://github.com/ClickHouse/ClickHouse/issues/68882): Fixes [#50868](https://github.com/ClickHouse/ClickHouse/issues/50868). Small DateTime64 constant values returned by a nested subquery inside a distributed query were wrongly transformed to Nulls, thus causing errors and possible incorrect query results. [#68323](https://github.com/ClickHouse/ClickHouse/pull/68323) ([Shankar](https://github.com/shiyer7474)).
|
||||
* Backported in [#69023](https://github.com/ClickHouse/ClickHouse/issues/69023): Added back virtual columns ` _table` and `_database` to distributed tables. They were available until version 24.3. [#68672](https://github.com/ClickHouse/ClickHouse/pull/68672) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Backported in [#68858](https://github.com/ClickHouse/ClickHouse/issues/68858): Fix possible error `Size of permutation (0) is less than required (...)` during Variant column permutation. [#68681](https://github.com/ClickHouse/ClickHouse/pull/68681) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Backported in [#68784](https://github.com/ClickHouse/ClickHouse/issues/68784): Fix issue with materialized constant keys when hashing maps with arrays as keys in functions `sipHash(64/128)Keyed`. [#68731](https://github.com/ClickHouse/ClickHouse/pull/68731) ([Salvatore Mesoraca](https://github.com/aiven-sal)).
|
||||
* Backported in [#69154](https://github.com/ClickHouse/ClickHouse/issues/69154): Fix possible wrong result during anyHeavy state merge. [#68950](https://github.com/ClickHouse/ClickHouse/pull/68950) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
|
||||
#### NO CL CATEGORY
|
||||
|
||||
* Backported in [#68940](https://github.com/ClickHouse/ClickHouse/issues/68940):. [#68897](https://github.com/ClickHouse/ClickHouse/pull/68897) ([Alexander Gololobov](https://github.com/davenger)).
|
||||
|
||||
#### NOT FOR CHANGELOG / INSIGNIFICANT
|
||||
|
||||
* Backported in [#68828](https://github.com/ClickHouse/ClickHouse/issues/68828): Turn off fault injection for insert in `01396_inactive_replica_cleanup_nodes_zookeeper`. [#68715](https://github.com/ClickHouse/ClickHouse/pull/68715) ([alesapin](https://github.com/alesapin)).
|
||||
* Backported in [#69046](https://github.com/ClickHouse/ClickHouse/issues/69046): Fix 01114_database_atomic flakiness. [#68930](https://github.com/ClickHouse/ClickHouse/pull/68930) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
|
29
docs/changelogs/v24.6.5.30-stable.md
Normal file
29
docs/changelogs/v24.6.5.30-stable.md
Normal file
@ -0,0 +1,29 @@
|
||||
---
|
||||
sidebar_position: 1
|
||||
sidebar_label: 2024
|
||||
---
|
||||
|
||||
# 2024 Changelog
|
||||
|
||||
### ClickHouse release v24.6.5.30-stable (e6e196c92d6) FIXME as compared to v24.6.4.42-stable (c534bb4b4dd)
|
||||
|
||||
#### Bug Fix (user-visible misbehavior in an official stable release)
|
||||
* Backported in [#68969](https://github.com/ClickHouse/ClickHouse/issues/68969): Fix the upper bound of the function `fromModifiedJulianDay`. It was supposed to be `9999-12-31` but was mistakenly set to `9999-01-01`. [#67583](https://github.com/ClickHouse/ClickHouse/pull/67583) ([PHO](https://github.com/depressed-pho)).
|
||||
* Backported in [#68814](https://github.com/ClickHouse/ClickHouse/issues/68814): Fixed crash in Parquet filtering when data types in the file substantially differ from requested types (e.g. `... FROM file('a.parquet', Parquet, 'x String')`, but the file has `x Int64`). Without this fix, use `input_format_parquet_filter_push_down = 0` as a workaround. [#68131](https://github.com/ClickHouse/ClickHouse/pull/68131) ([Michael Kolupaev](https://github.com/al13n321)).
|
||||
* Backported in [#69005](https://github.com/ClickHouse/ClickHouse/issues/69005): After https://github.com/ClickHouse/ClickHouse/pull/61984 `schema_inference_make_columns_nullable=0` still can make columns `Nullable` in Parquet/Arrow formats. The change was backward incompatible and users noticed the changes in the behaviour. This PR makes `schema_inference_make_columns_nullable=0` to work as before (no Nullable columns will be inferred) and introduces new value `auto` for this setting that will make columns `Nullable` only if data has information about nullability. [#68298](https://github.com/ClickHouse/ClickHouse/pull/68298) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Backported in [#68883](https://github.com/ClickHouse/ClickHouse/issues/68883): Fixes [#50868](https://github.com/ClickHouse/ClickHouse/issues/50868). Small DateTime64 constant values returned by a nested subquery inside a distributed query were wrongly transformed to Nulls, thus causing errors and possible incorrect query results. [#68323](https://github.com/ClickHouse/ClickHouse/pull/68323) ([Shankar](https://github.com/shiyer7474)).
|
||||
* Backported in [#69025](https://github.com/ClickHouse/ClickHouse/issues/69025): Added back virtual columns ` _table` and `_database` to distributed tables. They were available until version 24.3. [#68672](https://github.com/ClickHouse/ClickHouse/pull/68672) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Backported in [#68860](https://github.com/ClickHouse/ClickHouse/issues/68860): Fix possible error `Size of permutation (0) is less than required (...)` during Variant column permutation. [#68681](https://github.com/ClickHouse/ClickHouse/pull/68681) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Backported in [#68786](https://github.com/ClickHouse/ClickHouse/issues/68786): Fix issue with materialized constant keys when hashing maps with arrays as keys in functions `sipHash(64/128)Keyed`. [#68731](https://github.com/ClickHouse/ClickHouse/pull/68731) ([Salvatore Mesoraca](https://github.com/aiven-sal)).
|
||||
* Backported in [#69156](https://github.com/ClickHouse/ClickHouse/issues/69156): Fix possible wrong result during anyHeavy state merge. [#68950](https://github.com/ClickHouse/ClickHouse/pull/68950) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Backported in [#69116](https://github.com/ClickHouse/ClickHouse/issues/69116): Fix logical error when we have empty async insert. [#69080](https://github.com/ClickHouse/ClickHouse/pull/69080) ([Han Fei](https://github.com/hanfei1991)).
|
||||
|
||||
#### NO CL CATEGORY
|
||||
|
||||
* Backported in [#68942](https://github.com/ClickHouse/ClickHouse/issues/68942):. [#68897](https://github.com/ClickHouse/ClickHouse/pull/68897) ([Alexander Gololobov](https://github.com/davenger)).
|
||||
|
||||
#### NOT FOR CHANGELOG / INSIGNIFICANT
|
||||
|
||||
* Backported in [#68830](https://github.com/ClickHouse/ClickHouse/issues/68830): Turn off fault injection for insert in `01396_inactive_replica_cleanup_nodes_zookeeper`. [#68715](https://github.com/ClickHouse/ClickHouse/pull/68715) ([alesapin](https://github.com/alesapin)).
|
||||
* Backported in [#69048](https://github.com/ClickHouse/ClickHouse/issues/69048): Fix 01114_database_atomic flakiness. [#68930](https://github.com/ClickHouse/ClickHouse/pull/68930) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
|
@ -58,7 +58,7 @@ Connection: Close
|
||||
Content-Type: text/tab-separated-values; charset=UTF-8
|
||||
X-ClickHouse-Server-Display-Name: clickhouse.ru-central1.internal
|
||||
X-ClickHouse-Query-Id: 5abe861c-239c-467f-b955-8a201abb8b7f
|
||||
X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0","elapsed_ns":"662334"}
|
||||
X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0","elapsed_ns":"662334", "real_time_microseconds": "0"}
|
||||
|
||||
1
|
||||
```
|
||||
@ -472,7 +472,7 @@ $ curl -v 'http://localhost:8123/predefined_query'
|
||||
< X-ClickHouse-Format: Template
|
||||
< X-ClickHouse-Timezone: Asia/Shanghai
|
||||
< Keep-Alive: timeout=10
|
||||
< X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0","elapsed_ns":"662334"}
|
||||
< X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0","elapsed_ns":"662334", "real_time_microseconds":"0"}
|
||||
<
|
||||
# HELP "Query" "Number of executing queries"
|
||||
# TYPE "Query" counter
|
||||
@ -668,7 +668,7 @@ $ curl -vv -H 'XXX:xxx' 'http://localhost:8123/hi'
|
||||
< Content-Type: text/html; charset=UTF-8
|
||||
< Transfer-Encoding: chunked
|
||||
< Keep-Alive: timeout=10
|
||||
< X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0","elapsed_ns":"662334"}
|
||||
< X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0","elapsed_ns":"662334", "real_time_microseconds":"0"}
|
||||
<
|
||||
* Connection #0 to host localhost left intact
|
||||
Say Hi!%
|
||||
@ -708,7 +708,7 @@ $ curl -v -H 'XXX:xxx' 'http://localhost:8123/get_config_static_handler'
|
||||
< Content-Type: text/plain; charset=UTF-8
|
||||
< Transfer-Encoding: chunked
|
||||
< Keep-Alive: timeout=10
|
||||
< X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0","elapsed_ns":"662334"}
|
||||
< X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0","elapsed_ns":"662334", "real_time_microseconds":"0"}
|
||||
<
|
||||
* Connection #0 to host localhost left intact
|
||||
<html ng-app="SMI2"><head><base href="http://ui.tabix.io/"></head><body><div ui-view="" class="content-ui"></div><script src="http://loader.tabix.io/master.js"></script></body></html>%
|
||||
@ -766,7 +766,7 @@ $ curl -vv -H 'XXX:xxx' 'http://localhost:8123/get_absolute_path_static_handler'
|
||||
< Content-Type: text/html; charset=UTF-8
|
||||
< Transfer-Encoding: chunked
|
||||
< Keep-Alive: timeout=10
|
||||
< X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0","elapsed_ns":"662334"}
|
||||
< X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0","elapsed_ns":"662334", "real_time_microseconds":"0"}
|
||||
<
|
||||
<html><body>Absolute Path File</body></html>
|
||||
* Connection #0 to host localhost left intact
|
||||
@ -785,7 +785,7 @@ $ curl -vv -H 'XXX:xxx' 'http://localhost:8123/get_relative_path_static_handler'
|
||||
< Content-Type: text/html; charset=UTF-8
|
||||
< Transfer-Encoding: chunked
|
||||
< Keep-Alive: timeout=10
|
||||
< X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0","elapsed_ns":"662334"}
|
||||
< X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0","elapsed_ns":"662334", "real_time_microseconds":"0"}
|
||||
<
|
||||
<html><body>Relative Path File</body></html>
|
||||
* Connection #0 to host localhost left intact
|
||||
|
@ -104,7 +104,7 @@ Events that occur at the same second may lay in the sequence in an undefined ord
|
||||
|
||||
**Parameters**
|
||||
|
||||
- `pattern` — Pattern string. See [Pattern syntax](#sequencematch).
|
||||
- `pattern` — Pattern string. See [Pattern syntax](#pattern-syntax).
|
||||
|
||||
**Returned values**
|
||||
|
||||
@ -113,8 +113,7 @@ Events that occur at the same second may lay in the sequence in an undefined ord
|
||||
|
||||
Type: `UInt8`.
|
||||
|
||||
<a name="sequence-function-pattern-syntax"></a>
|
||||
**Pattern syntax**
|
||||
#### Pattern syntax
|
||||
|
||||
- `(?N)` — Matches the condition argument at position `N`. Conditions are numbered in the `[1, 32]` range. For example, `(?1)` matches the argument passed to the `cond1` parameter.
|
||||
|
||||
@ -196,7 +195,7 @@ sequenceCount(pattern)(timestamp, cond1, cond2, ...)
|
||||
|
||||
**Parameters**
|
||||
|
||||
- `pattern` — Pattern string. See [Pattern syntax](#sequencematch).
|
||||
- `pattern` — Pattern string. See [Pattern syntax](#pattern-syntax).
|
||||
|
||||
**Returned values**
|
||||
|
||||
|
@ -453,8 +453,8 @@ As we can see, after inserting paths `e` and `f.g` the limit was reached and we
|
||||
|
||||
### During merges of data parts in MergeTree table engines
|
||||
|
||||
During merge of several data parts in MergeTree table the `JSON` column in the resulting data part can reach the limit of dynamic paths won't be able to store all paths from source parts as subcolumns.
|
||||
In this case ClickHouse chooses what paths will remain as subcolumns after merge and what types will be stored in the shared data structure. In most cases ClickHouse tries to keep paths that contains
|
||||
During merge of several data parts in MergeTree table the `JSON` column in the resulting data part can reach the limit of dynamic paths and won't be able to store all paths from source parts as subcolumns.
|
||||
In this case ClickHouse chooses what paths will remain as subcolumns after merge and what paths will be stored in the shared data structure. In most cases ClickHouse tries to keep paths that contain
|
||||
the largest number of non-null values and move the rarest paths to the shared data structure, but it depends on the implementation.
|
||||
|
||||
Let's see an example of such merge. First, let's create a table with `JSON` column, set the limit of dynamic paths to `3` and insert values with `5` different paths:
|
||||
|
@ -2019,7 +2019,7 @@ Alias: `dateTrunc`.
|
||||
|
||||
`unit` argument is case-insensitive.
|
||||
|
||||
- `value` — Date and time. [DateTime](../data-types/datetime.md) or [DateTime64](../data-types/datetime64.md).
|
||||
- `value` — Date and time. [Date](../data-types/date.md), [Date32](../data-types/date32.md), [DateTime](../data-types/datetime.md) or [DateTime64](../data-types/datetime64.md).
|
||||
- `timezone` — [Timezone name](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-timezone) for the returned value (optional). If not specified, the function uses the timezone of the `value` parameter. [String](../data-types/string.md).
|
||||
|
||||
**Returned value**
|
||||
|
@ -49,6 +49,55 @@ SETTINGS cast_keep_nullable = 1
|
||||
└──────────────────┴─────────────────────┴──────────────────┘
|
||||
```
|
||||
|
||||
## toBool
|
||||
|
||||
Converts an input value to a value of type [`Bool`](../data-types/boolean.md). Throws an exception in case of an error.
|
||||
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
toBool(expr)
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `expr` — Expression returning a number or a string. [Expression](../syntax.md/#syntax-expressions).
|
||||
|
||||
Supported arguments:
|
||||
- Values of type (U)Int8/16/32/64/128/256.
|
||||
- Values of type Float32/64.
|
||||
- Strings `true` or `false` (case-insensitive).
|
||||
|
||||
**Returned value**
|
||||
|
||||
- Returns `true` or `false` based on evaluation of the argument. [Bool](../data-types/boolean.md).
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
```sql
|
||||
SELECT
|
||||
toBool(toUInt8(1)),
|
||||
toBool(toInt8(-1)),
|
||||
toBool(toFloat32(1.01)),
|
||||
toBool('true'),
|
||||
toBool('false'),
|
||||
toBool('FALSE')
|
||||
FORMAT Vertical
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```response
|
||||
toBool(toUInt8(1)): true
|
||||
toBool(toInt8(-1)): true
|
||||
toBool(toFloat32(1.01)): true
|
||||
toBool('true'): true
|
||||
toBool('false'): false
|
||||
toBool('FALSE'): false
|
||||
```
|
||||
|
||||
## toInt8
|
||||
|
||||
Converts an input value to a value of type [`Int8`](../data-types/int-uint.md). Throws an exception in case of an error.
|
||||
|
@ -10,7 +10,7 @@ title: The Lightweight DELETE Statement
|
||||
The lightweight `DELETE` statement removes rows from the table `[db.]table` that match the expression `expr`. It is only available for the *MergeTree table engine family.
|
||||
|
||||
``` sql
|
||||
DELETE FROM [db.]table [ON CLUSTER cluster] WHERE expr;
|
||||
DELETE FROM [db.]table [ON CLUSTER cluster] [IN PARTITION partition_expr] WHERE expr;
|
||||
```
|
||||
|
||||
It is called "lightweight `DELETE`" to contrast it to the [ALTER table DELETE](/en/sql-reference/statements/alter/delete) command, which is a heavyweight process.
|
||||
|
@ -50,7 +50,7 @@ Connection: Close
|
||||
Content-Type: text/tab-separated-values; charset=UTF-8
|
||||
X-ClickHouse-Server-Display-Name: clickhouse.ru-central1.internal
|
||||
X-ClickHouse-Query-Id: 5abe861c-239c-467f-b955-8a201abb8b7f
|
||||
X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0","elapsed_ns":"662334"}
|
||||
X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0","elapsed_ns":"662334", "real_time_microseconds":"0"}
|
||||
|
||||
1
|
||||
```
|
||||
@ -367,7 +367,7 @@ $ curl -v 'http://localhost:8123/predefined_query'
|
||||
< X-ClickHouse-Format: Template
|
||||
< X-ClickHouse-Timezone: Asia/Shanghai
|
||||
< Keep-Alive: timeout=10
|
||||
< X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0"}
|
||||
< X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0", "elapsed_ns":"662334", "real_time_microseconds":"0"}
|
||||
<
|
||||
# HELP "Query" "Number of executing queries"
|
||||
# TYPE "Query" counter
|
||||
@ -601,7 +601,7 @@ $ curl -v -H 'XXX:xxx' 'http://localhost:8123/get_config_static_handler'
|
||||
< Content-Type: text/plain; charset=UTF-8
|
||||
< Transfer-Encoding: chunked
|
||||
< Keep-Alive: timeout=10
|
||||
< X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0","elapsed_ns":"662334"}
|
||||
< X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0","elapsed_ns":"662334", "real_time_microseconds":"0"}
|
||||
<
|
||||
* Connection #0 to host localhost left intact
|
||||
<html ng-app="SMI2"><head><base href="http://ui.tabix.io/"></head><body><div ui-view="" class="content-ui"></div><script src="http://loader.tabix.io/master.js"></script></body></html>%
|
||||
@ -659,7 +659,7 @@ $ curl -vv -H 'XXX:xxx' 'http://localhost:8123/get_absolute_path_static_handler'
|
||||
< Content-Type: text/html; charset=UTF-8
|
||||
< Transfer-Encoding: chunked
|
||||
< Keep-Alive: timeout=10
|
||||
< X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0","elapsed_ns":"662334"}
|
||||
< X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0","elapsed_ns":"662334", "real_time_microseconds":"0"}
|
||||
<
|
||||
<html><body>Absolute Path File</body></html>
|
||||
* Connection #0 to host localhost left intact
|
||||
@ -678,7 +678,7 @@ $ curl -vv -H 'XXX:xxx' 'http://localhost:8123/get_relative_path_static_handler'
|
||||
< Content-Type: text/html; charset=UTF-8
|
||||
< Transfer-Encoding: chunked
|
||||
< Keep-Alive: timeout=10
|
||||
< X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0","elapsed_ns":"662334"}
|
||||
< X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0","elapsed_ns":"662334", "real_time_microseconds":"0"}
|
||||
<
|
||||
<html><body>Relative Path File</body></html>
|
||||
* Connection #0 to host localhost left intact
|
||||
|
@ -53,7 +53,7 @@ Connection: Close
|
||||
Content-Type: text/tab-separated-values; charset=UTF-8
|
||||
X-ClickHouse-Server-Display-Name: clickhouse.ru-central1.internal
|
||||
X-ClickHouse-Query-Id: 5abe861c-239c-467f-b955-8a201abb8b7f
|
||||
X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0","elapsed_ns":"662334"}
|
||||
X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0","elapsed_ns":"662334","real_time_microseconds":"0"}
|
||||
|
||||
1
|
||||
```
|
||||
@ -363,7 +363,7 @@ $ curl -v 'http://localhost:8123/predefined_query'
|
||||
< X-ClickHouse-Format: Template
|
||||
< X-ClickHouse-Timezone: Asia/Shanghai
|
||||
< Keep-Alive: timeout=10
|
||||
< X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0","elapsed_ns":"662334"}
|
||||
< X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0","elapsed_ns":"662334", "real_time_microseconds":"0"}
|
||||
<
|
||||
# HELP "Query" "Number of executing queries"
|
||||
# TYPE "Query" counter
|
||||
@ -524,7 +524,7 @@ $ curl -vv -H 'XXX:xxx' 'http://localhost:8123/hi'
|
||||
< Content-Type: text/html; charset=UTF-8
|
||||
< Transfer-Encoding: chunked
|
||||
< Keep-Alive: timeout=10
|
||||
< X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0","elapsed_ns":"662334"}
|
||||
< X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0","elapsed_ns":"662334", "real_time_microseconds":"0"}
|
||||
<
|
||||
* Connection #0 to host localhost left intact
|
||||
Say Hi!%
|
||||
@ -564,7 +564,7 @@ $ curl -v -H 'XXX:xxx' 'http://localhost:8123/get_config_static_handler'
|
||||
< Content-Type: text/plain; charset=UTF-8
|
||||
< Transfer-Encoding: chunked
|
||||
< Keep-Alive: timeout=10
|
||||
< X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0","elapsed_ns":"662334"}
|
||||
< X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0","elapsed_ns":"662334","real_time_microseconds":"0"}
|
||||
<
|
||||
* Connection #0 to host localhost left intact
|
||||
<html ng-app="SMI2"><head><base href="http://ui.tabix.io/"></head><body><div ui-view="" class="content-ui"></div><script src="http://loader.tabix.io/master.js"></script></body></html>%
|
||||
@ -616,7 +616,7 @@ $ curl -vv -H 'XXX:xxx' 'http://localhost:8123/get_absolute_path_static_handler'
|
||||
< Content-Type: text/html; charset=UTF-8
|
||||
< Transfer-Encoding: chunked
|
||||
< Keep-Alive: timeout=10
|
||||
< X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0","elapsed_ns":"662334"}
|
||||
< X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0","elapsed_ns":"662334","real_time_microseconds":"0"}
|
||||
<
|
||||
<html><body>Absolute Path File</body></html>
|
||||
* Connection #0 to host localhost left intact
|
||||
@ -635,7 +635,7 @@ $ curl -vv -H 'XXX:xxx' 'http://localhost:8123/get_relative_path_static_handler'
|
||||
< Content-Type: text/html; charset=UTF-8
|
||||
< Transfer-Encoding: chunked
|
||||
< Keep-Alive: timeout=10
|
||||
< X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0","elapsed_ns":"662334"}
|
||||
< X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0","elapsed_ns":"662334","real_time_microseconds":"0"}
|
||||
<
|
||||
<html><body>Relative Path File</body></html>
|
||||
* Connection #0 to host localhost left intact
|
||||
|
@ -677,4 +677,122 @@ void GetAllChildrenNumberCommand::execute(const ASTKeeperQuery * query, KeeperCl
|
||||
std::cout << totalNumChildren << "\n";
|
||||
}
|
||||
|
||||
namespace
|
||||
{
|
||||
|
||||
class CPMVOperation
|
||||
{
|
||||
constexpr static UInt64 kTryLimit = 1000;
|
||||
|
||||
public:
|
||||
CPMVOperation(String src_, String dest_, bool remove_src_, KeeperClient * client_)
|
||||
: src(std::move(src_)), dest(std::move(dest_)), remove_src(remove_src_), client(client_)
|
||||
{
|
||||
}
|
||||
|
||||
bool isTryLimitReached() const { return failed_tries_count >= kTryLimit; }
|
||||
|
||||
bool isCompleted() const { return is_completed; }
|
||||
|
||||
void perform()
|
||||
{
|
||||
Coordination::Stat src_stat;
|
||||
String data = client->zookeeper->get(src, &src_stat);
|
||||
|
||||
Coordination::Requests ops{
|
||||
zkutil::makeCheckRequest(src, src_stat.version),
|
||||
zkutil::makeCreateRequest(dest, data, zkutil::CreateMode::Persistent), // Do we need to copy ACLs here?
|
||||
};
|
||||
|
||||
if (remove_src)
|
||||
ops.push_back(zkutil::makeRemoveRequest(src, src_stat.version));
|
||||
|
||||
Coordination::Responses responses;
|
||||
auto code = client->zookeeper->tryMulti(ops, responses);
|
||||
|
||||
switch (code)
|
||||
{
|
||||
case Coordination::Error::ZOK: {
|
||||
is_completed = true;
|
||||
return;
|
||||
}
|
||||
case Coordination::Error::ZBADVERSION: {
|
||||
++failed_tries_count;
|
||||
|
||||
if (isTryLimitReached())
|
||||
zkutil::KeeperMultiException::check(code, ops, responses);
|
||||
|
||||
return;
|
||||
}
|
||||
default:
|
||||
zkutil::KeeperMultiException::check(code, ops, responses);
|
||||
}
|
||||
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Unreachable");
|
||||
}
|
||||
|
||||
private:
|
||||
String src;
|
||||
String dest;
|
||||
bool remove_src = false;
|
||||
KeeperClient * client = nullptr;
|
||||
|
||||
bool is_completed = false;
|
||||
uint64_t failed_tries_count = 0;
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
bool CPCommand::parse(IParser::Pos & pos, std::shared_ptr<ASTKeeperQuery> & node, [[maybe_unused]] Expected & expected) const
|
||||
{
|
||||
String src_path;
|
||||
if (!parseKeeperPath(pos, expected, src_path))
|
||||
return false;
|
||||
node->args.push_back(std::move(src_path));
|
||||
|
||||
String to_path;
|
||||
if (!parseKeeperPath(pos, expected, to_path))
|
||||
return false;
|
||||
node->args.push_back(std::move(to_path));
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void CPCommand::execute(const ASTKeeperQuery * query, KeeperClient * client) const
|
||||
{
|
||||
auto src = client->getAbsolutePath(query->args[0].safeGet<String>());
|
||||
auto dest = client->getAbsolutePath(query->args[1].safeGet<String>());
|
||||
|
||||
CPMVOperation operation(std::move(src), std::move(dest), /*remove_src_=*/false, /*client_=*/client);
|
||||
|
||||
while (!operation.isTryLimitReached() && !operation.isCompleted())
|
||||
operation.perform();
|
||||
}
|
||||
|
||||
bool MVCommand::parse(IParser::Pos & pos, std::shared_ptr<ASTKeeperQuery> & node, Expected & expected) const
|
||||
{
|
||||
String src_path;
|
||||
if (!parseKeeperPath(pos, expected, src_path))
|
||||
return false;
|
||||
node->args.push_back(std::move(src_path));
|
||||
|
||||
String to_path;
|
||||
if (!parseKeeperPath(pos, expected, to_path))
|
||||
return false;
|
||||
node->args.push_back(std::move(to_path));
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void MVCommand::execute(const ASTKeeperQuery * query, KeeperClient * client) const
|
||||
{
|
||||
auto src = client->getAbsolutePath(query->args[0].safeGet<String>());
|
||||
auto dest = client->getAbsolutePath(query->args[1].safeGet<String>());
|
||||
|
||||
CPMVOperation operation(std::move(src), std::move(dest), /*remove_src_=*/true, /*client_=*/client);
|
||||
|
||||
while (!operation.isTryLimitReached() && !operation.isCompleted())
|
||||
operation.perform();
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -266,4 +266,32 @@ class GetAllChildrenNumberCommand : public IKeeperClientCommand
|
||||
}
|
||||
};
|
||||
|
||||
class CPCommand : public IKeeperClientCommand
|
||||
{
|
||||
String getName() const override { return "cp"; }
|
||||
|
||||
bool parse(IParser::Pos & pos, std::shared_ptr<ASTKeeperQuery> & node, Expected & expected) const override;
|
||||
|
||||
void execute(const ASTKeeperQuery * query, KeeperClient * client) const override;
|
||||
|
||||
String getHelpMessage() const override
|
||||
{
|
||||
return "{} <src> <dest> -- Copies 'src' node to 'dest' path.";
|
||||
}
|
||||
};
|
||||
|
||||
class MVCommand : public IKeeperClientCommand
|
||||
{
|
||||
String getName() const override { return "mv"; }
|
||||
|
||||
bool parse(IParser::Pos & pos, std::shared_ptr<ASTKeeperQuery> & node, Expected & expected) const override;
|
||||
|
||||
void execute(const ASTKeeperQuery * query, KeeperClient * client) const override;
|
||||
|
||||
String getHelpMessage() const override
|
||||
{
|
||||
return "{} <src> <dest> -- Moves 'src' node to the 'dest' path.";
|
||||
}
|
||||
};
|
||||
|
||||
}
|
||||
|
@ -212,6 +212,8 @@ void KeeperClient::initialize(Poco::Util::Application & /* self */)
|
||||
std::make_shared<FourLetterWordCommand>(),
|
||||
std::make_shared<GetDirectChildrenNumberCommand>(),
|
||||
std::make_shared<GetAllChildrenNumberCommand>(),
|
||||
std::make_shared<CPCommand>(),
|
||||
std::make_shared<MVCommand>(),
|
||||
});
|
||||
|
||||
String home_path;
|
||||
|
@ -68,7 +68,10 @@ public:
|
||||
if (data().isEqualTo(to.data()))
|
||||
counter += to.counter;
|
||||
else if (!data().has() || counter < to.counter)
|
||||
{
|
||||
data().set(to.data(), arena);
|
||||
counter = to.counter - counter;
|
||||
}
|
||||
else
|
||||
counter -= to.counter;
|
||||
}
|
||||
|
@ -34,6 +34,7 @@
|
||||
#include <Parsers/Access/ASTCreateUserQuery.h>
|
||||
#include <Parsers/Access/ASTAuthenticationData.h>
|
||||
#include <Parsers/ASTDropQuery.h>
|
||||
#include <Parsers/ASTExplainQuery.h>
|
||||
#include <Parsers/ASTSelectQuery.h>
|
||||
#include <Parsers/ASTSetQuery.h>
|
||||
#include <Parsers/ASTUseQuery.h>
|
||||
@ -2111,6 +2112,15 @@ MultiQueryProcessingStage ClientBase::analyzeMultiQueryText(
|
||||
// - Other formats (e.g. FORMAT CSV) are arbitrarily more complex and tricky to parse. For example, we may be unable to distinguish if the semicolon
|
||||
// is part of the data or ends the statement. In this case, we simply assume that the end of the INSERT statement is determined by \n\n (two newlines).
|
||||
auto * insert_ast = parsed_query->as<ASTInsertQuery>();
|
||||
// We also consider the INSERT query in EXPLAIN queries (same as normal INSERT queries)
|
||||
if (!insert_ast)
|
||||
{
|
||||
auto * explain_ast = parsed_query->as<ASTExplainQuery>();
|
||||
if (explain_ast && explain_ast->getExplainedQuery())
|
||||
{
|
||||
insert_ast = explain_ast->getExplainedQuery()->as<ASTInsertQuery>();
|
||||
}
|
||||
}
|
||||
const char * query_to_execute_end = this_query_end;
|
||||
if (insert_ast && insert_ast->data)
|
||||
{
|
||||
|
@ -75,9 +75,9 @@
|
||||
M(GlobalThread, "Number of threads in global thread pool.") \
|
||||
M(GlobalThreadActive, "Number of threads in global thread pool running a task.") \
|
||||
M(GlobalThreadScheduled, "Number of queued or active jobs in global thread pool.") \
|
||||
M(LocalThread, "Number of threads in local thread pools. The threads in local thread pools are taken from the global thread pool.") \
|
||||
M(LocalThreadActive, "Number of threads in local thread pools running a task.") \
|
||||
M(LocalThreadScheduled, "Number of queued or active jobs in local thread pools.") \
|
||||
M(LocalThread, "Obsolete. Number of threads in local thread pools. The threads in local thread pools are taken from the global thread pool.") \
|
||||
M(LocalThreadActive, "Obsolete. Number of threads in local thread pools running a task.") \
|
||||
M(LocalThreadScheduled, "Obsolete. Number of queued or active jobs in local thread pools.") \
|
||||
M(MergeTreeDataSelectExecutorThreads, "Number of threads in the MergeTreeDataSelectExecutor thread pool.") \
|
||||
M(MergeTreeDataSelectExecutorThreadsActive, "Number of threads in the MergeTreeDataSelectExecutor thread pool running a task.") \
|
||||
M(MergeTreeDataSelectExecutorThreadsScheduled, "Number of queued or active jobs in the MergeTreeDataSelectExecutor thread pool.") \
|
||||
@ -292,6 +292,9 @@
|
||||
M(DistrCacheWriteRequests, "Number of executed Write requests to Distributed Cache") \
|
||||
M(DistrCacheServerConnections, "Number of open connections to ClickHouse server from Distributed Cache") \
|
||||
\
|
||||
M(SchedulerIOReadScheduled, "Number of IO reads are being scheduled currently") \
|
||||
M(SchedulerIOWriteScheduled, "Number of IO writes are being scheduled currently") \
|
||||
\
|
||||
M(StorageConnectionsStored, "Total count of sessions stored in the session pool for storages") \
|
||||
M(StorageConnectionsTotal, "Total count of all sessions: stored in the pool and actively used right now for storages") \
|
||||
\
|
||||
|
@ -113,6 +113,56 @@ std::string_view CurrentThread::getQueryId()
|
||||
return current_thread->getQueryId();
|
||||
}
|
||||
|
||||
void CurrentThread::attachReadResource(ResourceLink link)
|
||||
{
|
||||
if (unlikely(!current_thread))
|
||||
return;
|
||||
if (current_thread->read_resource_link)
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Thread #{} has been already attached to read resource", std::to_string(getThreadId()));
|
||||
current_thread->read_resource_link = link;
|
||||
}
|
||||
|
||||
void CurrentThread::detachReadResource()
|
||||
{
|
||||
if (unlikely(!current_thread))
|
||||
return;
|
||||
if (!current_thread->read_resource_link)
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Thread #{} has not been attached to read resource", std::to_string(getThreadId()));
|
||||
current_thread->read_resource_link.reset();
|
||||
}
|
||||
|
||||
ResourceLink CurrentThread::getReadResourceLink()
|
||||
{
|
||||
if (unlikely(!current_thread))
|
||||
return {};
|
||||
return current_thread->read_resource_link;
|
||||
}
|
||||
|
||||
void CurrentThread::attachWriteResource(ResourceLink link)
|
||||
{
|
||||
if (unlikely(!current_thread))
|
||||
return;
|
||||
if (current_thread->write_resource_link)
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Thread #{} has been already attached to write resource", std::to_string(getThreadId()));
|
||||
current_thread->write_resource_link = link;
|
||||
}
|
||||
|
||||
void CurrentThread::detachWriteResource()
|
||||
{
|
||||
if (unlikely(!current_thread))
|
||||
return;
|
||||
if (!current_thread->write_resource_link)
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Thread #{} has not been attached to write resource", std::to_string(getThreadId()));
|
||||
current_thread->write_resource_link.reset();
|
||||
}
|
||||
|
||||
ResourceLink CurrentThread::getWriteResourceLink()
|
||||
{
|
||||
if (unlikely(!current_thread))
|
||||
return {};
|
||||
return current_thread->write_resource_link;
|
||||
}
|
||||
|
||||
MemoryTracker * CurrentThread::getUserMemoryTracker()
|
||||
{
|
||||
if (unlikely(!current_thread))
|
||||
|
@ -2,6 +2,7 @@
|
||||
|
||||
#include <Interpreters/Context_fwd.h>
|
||||
#include <Common/ThreadStatus.h>
|
||||
#include <Common/Scheduler/ResourceLink.h>
|
||||
|
||||
#include <memory>
|
||||
#include <string>
|
||||
@ -23,7 +24,6 @@ class QueryStatus;
|
||||
struct Progress;
|
||||
class InternalTextLogsQueue;
|
||||
|
||||
|
||||
/** Collection of static methods to work with thread-local objects.
|
||||
* Allows to attach and detach query/process (thread group) to a thread
|
||||
* (to calculate query-related metrics and to allow to obtain query-related data from a thread).
|
||||
@ -92,6 +92,14 @@ public:
|
||||
|
||||
static std::string_view getQueryId();
|
||||
|
||||
// For IO Scheduling
|
||||
static void attachReadResource(ResourceLink link);
|
||||
static void detachReadResource();
|
||||
static ResourceLink getReadResourceLink();
|
||||
static void attachWriteResource(ResourceLink link);
|
||||
static void detachWriteResource();
|
||||
static ResourceLink getWriteResourceLink();
|
||||
|
||||
/// Initializes query with current thread as master thread in constructor, and detaches it in destructor
|
||||
struct QueryScope : private boost::noncopyable
|
||||
{
|
||||
@ -102,6 +110,39 @@ public:
|
||||
void logPeakMemoryUsage();
|
||||
bool log_peak_memory_usage_in_destructor = true;
|
||||
};
|
||||
|
||||
/// Scoped attach/detach of IO resource links
|
||||
struct IOScope : private boost::noncopyable
|
||||
{
|
||||
explicit IOScope(ResourceLink read_resource_link, ResourceLink write_resource_link)
|
||||
{
|
||||
if (read_resource_link)
|
||||
{
|
||||
attachReadResource(read_resource_link);
|
||||
read_attached = true;
|
||||
}
|
||||
if (write_resource_link)
|
||||
{
|
||||
attachWriteResource(write_resource_link);
|
||||
write_attached = true;
|
||||
}
|
||||
}
|
||||
|
||||
explicit IOScope(const IOSchedulingSettings & settings)
|
||||
: IOScope(settings.read_resource_link, settings.write_resource_link)
|
||||
{}
|
||||
|
||||
~IOScope()
|
||||
{
|
||||
if (read_attached)
|
||||
detachReadResource();
|
||||
if (write_attached)
|
||||
detachWriteResource();
|
||||
}
|
||||
|
||||
bool read_attached = false;
|
||||
bool write_attached = false;
|
||||
};
|
||||
};
|
||||
|
||||
}
|
||||
|
@ -2,6 +2,7 @@
|
||||
#include <Common/HostResolvePool.h>
|
||||
|
||||
#include <Common/ProfileEvents.h>
|
||||
#include <Common/Stopwatch.h>
|
||||
#include <Common/CurrentMetrics.h>
|
||||
#include <Common/logger_useful.h>
|
||||
#include <Common/Exception.h>
|
||||
@ -9,6 +10,7 @@
|
||||
#include <Common/ProxyConfiguration.h>
|
||||
#include <Common/MemoryTrackerSwitcher.h>
|
||||
#include <Common/SipHash.h>
|
||||
#include <Common/Scheduler/ResourceGuard.h>
|
||||
#include <Common/proxyConfigurationToPocoProxyConfig.h>
|
||||
|
||||
#include <Poco/Net/HTTPChunkedStream.h>
|
||||
@ -236,6 +238,59 @@ public:
|
||||
};
|
||||
|
||||
|
||||
// Session data hooks implementation for integration with resource scheduler.
|
||||
// Hooks are created per every request-response pair and are registered/unregistered in HTTP session.
|
||||
// * `atStart()` send resource request to the scheduler every time HTTP session is going to send or receive
|
||||
// data to/from socket. `start()` waits for the scheduler confirmation. This way scheduler might
|
||||
// throttle and/or schedule socket data streams.
|
||||
// * `atFinish()` hook is called on successful socket read/write operation.
|
||||
// It informs the scheduler that operation is complete, which allows the scheduler to control the total
|
||||
// amount of in-flight bytes and/or operations.
|
||||
// * `atFail()` hook is called on failure of socket operation. The purpose is to correct the amount of bytes
|
||||
// passed through the scheduler queue to ensure fair bandwidth allocation even in presence of errors.
|
||||
struct ResourceGuardSessionDataHooks : public Poco::Net::IHTTPSessionDataHooks
|
||||
{
|
||||
ResourceGuardSessionDataHooks(ResourceLink link_, const ResourceGuard::Metrics * metrics, LoggerPtr log_, const String & method, const String & uri)
|
||||
: link(link_)
|
||||
, log(log_)
|
||||
, http_request(method + " " + uri)
|
||||
{
|
||||
request.metrics = metrics;
|
||||
chassert(link);
|
||||
}
|
||||
|
||||
~ResourceGuardSessionDataHooks() override
|
||||
{
|
||||
request.assertFinished(); // Never destruct with an active request
|
||||
}
|
||||
|
||||
void atStart(int bytes) override
|
||||
{
|
||||
Stopwatch timer;
|
||||
request.enqueue(bytes, link);
|
||||
request.wait();
|
||||
timer.stop();
|
||||
if (timer.elapsedMilliseconds() >= 5000)
|
||||
LOG_INFO(log, "Resource request took too long to finish: {} ms for {}", timer.elapsedMilliseconds(), http_request);
|
||||
}
|
||||
|
||||
void atFinish(int bytes) override
|
||||
{
|
||||
request.finish(bytes, link);
|
||||
}
|
||||
|
||||
void atFail() override
|
||||
{
|
||||
request.finish(0, link);
|
||||
}
|
||||
|
||||
ResourceLink link;
|
||||
ResourceGuard::Request request;
|
||||
LoggerPtr log;
|
||||
String http_request;
|
||||
};
|
||||
|
||||
|
||||
// EndpointConnectionPool manage connections to the endpoint
|
||||
// Features:
|
||||
// - it uses HostResolver for address selecting. See Common/HostResolver.h for more info.
|
||||
@ -246,8 +301,6 @@ public:
|
||||
// - `Session::reconnect()` uses the pool as well
|
||||
// - comprehensive sensors
|
||||
// - session is reused according its inner state, automatically
|
||||
|
||||
|
||||
template <class Session>
|
||||
class EndpointConnectionPool : public std::enable_shared_from_this<EndpointConnectionPool<Session>>, public IExtendedPool
|
||||
{
|
||||
@ -337,6 +390,13 @@ private:
|
||||
std::ostream & sendRequest(Poco::Net::HTTPRequest & request) override
|
||||
{
|
||||
auto idle = idleTime();
|
||||
|
||||
// Set data hooks for IO scheduling
|
||||
if (ResourceLink link = CurrentThread::getReadResourceLink())
|
||||
Session::setReceiveDataHooks(std::make_shared<ResourceGuardSessionDataHooks>(link, ResourceGuard::Metrics::getIORead(), log, request.getMethod(), request.getURI()));
|
||||
if (ResourceLink link = CurrentThread::getWriteResourceLink())
|
||||
Session::setSendDataHooks(std::make_shared<ResourceGuardSessionDataHooks>(link, ResourceGuard::Metrics::getIOWrite(), log, request.getMethod(), request.getURI()));
|
||||
|
||||
std::ostream & result = Session::sendRequest(request);
|
||||
result.exceptions(std::ios::badbit);
|
||||
|
||||
@ -393,6 +453,8 @@ private:
|
||||
}
|
||||
}
|
||||
response_stream = nullptr;
|
||||
Session::setSendDataHooks();
|
||||
Session::setReceiveDataHooks();
|
||||
|
||||
group->atConnectionDestroy();
|
||||
|
||||
|
@ -86,6 +86,20 @@
|
||||
M(NetworkReceiveBytes, "Total number of bytes received from network. Only ClickHouse-related network interaction is included, not by 3rd party libraries.") \
|
||||
M(NetworkSendBytes, "Total number of bytes send to network. Only ClickHouse-related network interaction is included, not by 3rd party libraries.") \
|
||||
\
|
||||
M(GlobalThreadPoolExpansions, "Counts the total number of times new threads have been added to the global thread pool. This metric indicates the frequency of expansions in the global thread pool to accommodate increased processing demands.") \
|
||||
M(GlobalThreadPoolShrinks, "Counts the total number of times the global thread pool has shrunk by removing threads. This occurs when the number of idle threads exceeds max_thread_pool_free_size, indicating adjustments in the global thread pool size in response to decreased thread utilization.") \
|
||||
M(GlobalThreadPoolThreadCreationMicroseconds, "Total time spent waiting for new threads to start.") \
|
||||
M(GlobalThreadPoolLockWaitMicroseconds, "Total time threads have spent waiting for locks in the global thread pool.") \
|
||||
M(GlobalThreadPoolJobs, "Counts the number of jobs that have been pushed to the global thread pool.") \
|
||||
M(GlobalThreadPoolJobWaitTimeMicroseconds, "Measures the elapsed time from when a job is scheduled in the thread pool to when it is picked up for execution by a worker thread. This metric helps identify delays in job processing, indicating the responsiveness of the thread pool to new tasks.") \
|
||||
M(LocalThreadPoolExpansions, "Counts the total number of times threads have been borrowed from the global thread pool to expand local thread pools.") \
|
||||
M(LocalThreadPoolShrinks, "Counts the total number of times threads have been returned to the global thread pool from local thread pools.") \
|
||||
M(LocalThreadPoolThreadCreationMicroseconds, "Total time local thread pools have spent waiting to borrow a thread from the global pool.") \
|
||||
M(LocalThreadPoolLockWaitMicroseconds, "Total time threads have spent waiting for locks in the local thread pools.") \
|
||||
M(LocalThreadPoolJobs, "Counts the number of jobs that have been pushed to the local thread pools.") \
|
||||
M(LocalThreadPoolBusyMicroseconds, "Total time threads have spent executing the actual work.") \
|
||||
M(LocalThreadPoolJobWaitTimeMicroseconds, "Measures the elapsed time from when a job is scheduled in the thread pool to when it is picked up for execution by a worker thread. This metric helps identify delays in job processing, indicating the responsiveness of the thread pool to new tasks.") \
|
||||
\
|
||||
M(DiskS3GetRequestThrottlerCount, "Number of DiskS3 GET and SELECT requests passed through throttler.") \
|
||||
M(DiskS3GetRequestThrottlerSleepMicroseconds, "Total time a query was sleeping to conform DiskS3 GET and SELECT request throttling.") \
|
||||
M(DiskS3PutRequestThrottlerCount, "Number of DiskS3 PUT, COPY, POST and LIST requests passed through throttler.") \
|
||||
@ -106,6 +120,13 @@
|
||||
M(PartsWithAppliedMutationsOnFly, "Total number of parts for which there was any mutation applied on fly") \
|
||||
M(MutationsAppliedOnFlyInAllParts, "The sum of number of applied mutations on-fly for part among all read parts") \
|
||||
\
|
||||
M(SchedulerIOReadRequests, "Resource requests passed through scheduler for IO reads.") \
|
||||
M(SchedulerIOReadBytes, "Bytes passed through scheduler for IO reads.") \
|
||||
M(SchedulerIOReadWaitMicroseconds, "Total time a query was waiting on resource requests for IO reads.") \
|
||||
M(SchedulerIOWriteRequests, "Resource requests passed through scheduler for IO writes.") \
|
||||
M(SchedulerIOWriteBytes, "Bytes passed through scheduler for IO writes.") \
|
||||
M(SchedulerIOWriteWaitMicroseconds, "Total time a query was waiting on resource requests for IO writes.") \
|
||||
\
|
||||
M(QueryMaskingRulesMatch, "Number of times query masking rules was successfully matched.") \
|
||||
\
|
||||
M(ReplicatedPartFetches, "Number of times a data part was downloaded from replica of a ReplicatedMergeTree table.") \
|
||||
|
@ -34,13 +34,16 @@ bool ProgressIndication::updateProgress(const Progress & value)
|
||||
|
||||
void ProgressIndication::resetProgress()
|
||||
{
|
||||
watch.restart();
|
||||
progress.reset();
|
||||
show_progress_bar = false;
|
||||
written_progress_chars = 0;
|
||||
write_progress_on_update = false;
|
||||
{
|
||||
std::lock_guard lock(progress_mutex);
|
||||
progress.reset();
|
||||
show_progress_bar = false;
|
||||
written_progress_chars = 0;
|
||||
write_progress_on_update = false;
|
||||
}
|
||||
{
|
||||
std::lock_guard lock(profile_events_mutex);
|
||||
watch.restart();
|
||||
cpu_usage_meter.reset(getElapsedNanoseconds());
|
||||
hosts_data.clear();
|
||||
}
|
||||
@ -90,6 +93,8 @@ ProgressIndication::MemoryUsage ProgressIndication::getMemoryUsage() const
|
||||
|
||||
void ProgressIndication::writeFinalProgress()
|
||||
{
|
||||
std::lock_guard lock(progress_mutex);
|
||||
|
||||
if (progress.read_rows < 1000)
|
||||
return;
|
||||
|
||||
@ -271,6 +276,8 @@ void ProgressIndication::writeProgress(WriteBufferFromFileDescriptor & message)
|
||||
|
||||
void ProgressIndication::clearProgressOutput(WriteBufferFromFileDescriptor & message)
|
||||
{
|
||||
std::lock_guard lock(progress_mutex);
|
||||
|
||||
if (written_progress_chars)
|
||||
{
|
||||
written_progress_chars = 0;
|
||||
|
@ -115,6 +115,8 @@ private:
|
||||
/// It is possible concurrent access to the following:
|
||||
/// - writeProgress() (class properties) (guarded with progress_mutex)
|
||||
/// - hosts_data/cpu_usage_meter (guarded with profile_events_mutex)
|
||||
///
|
||||
/// It is also possible to have more races if query is cancelled, so that clearProgressOutput() is called concurrently
|
||||
mutable std::mutex profile_events_mutex;
|
||||
mutable std::mutex progress_mutex;
|
||||
|
||||
|
@ -22,10 +22,13 @@ public:
|
||||
{}
|
||||
|
||||
// Wrapper for `enqueueRequest()` that should be used to account for available resource budget
|
||||
void enqueueRequestUsingBudget(ResourceRequest * request)
|
||||
// Returns `estimated_cost` that should be passed later to `adjustBudget()`
|
||||
[[ nodiscard ]] ResourceCost enqueueRequestUsingBudget(ResourceRequest * request)
|
||||
{
|
||||
request->cost = budget.ask(request->cost);
|
||||
ResourceCost estimated_cost = request->cost;
|
||||
request->cost = budget.ask(estimated_cost);
|
||||
enqueueRequest(request);
|
||||
return estimated_cost;
|
||||
}
|
||||
|
||||
// Should be called to account for difference between real and estimated costs
|
||||
@ -34,18 +37,6 @@ public:
|
||||
budget.adjust(estimated_cost, real_cost);
|
||||
}
|
||||
|
||||
// Adjust budget to account for extra consumption of `cost` resource units
|
||||
void consumeBudget(ResourceCost cost)
|
||||
{
|
||||
adjustBudget(0, cost);
|
||||
}
|
||||
|
||||
// Adjust budget to account for requested, but not consumed `cost` resource units
|
||||
void accumulateBudget(ResourceCost cost)
|
||||
{
|
||||
adjustBudget(cost, 0);
|
||||
}
|
||||
|
||||
/// Enqueue new request to be executed using underlying resource.
|
||||
/// Should be called outside of scheduling subsystem, implementation must be thread-safe.
|
||||
virtual void enqueueRequest(ResourceRequest * request) = 0;
|
||||
|
@ -232,12 +232,13 @@ struct ResourceTestManager : public ResourceTestBase
|
||||
ResourceTestManager & t;
|
||||
|
||||
Guard(ResourceTestManager & t_, ResourceLink link_, ResourceCost cost)
|
||||
: ResourceGuard(link_, cost, PostponeLocking)
|
||||
: ResourceGuard(ResourceGuard::Metrics::getIOWrite(), link_, cost, Lock::Defer)
|
||||
, t(t_)
|
||||
{
|
||||
t.onEnqueue(link);
|
||||
lock();
|
||||
t.onExecute(link);
|
||||
consume(cost);
|
||||
}
|
||||
};
|
||||
|
||||
@ -310,8 +311,9 @@ struct ResourceTestManager : public ResourceTestBase
|
||||
// NOTE: actually leader's request(s) make their own small busy period.
|
||||
void blockResource(ResourceLink link)
|
||||
{
|
||||
ResourceGuard g(link, 1, ResourceGuard::PostponeLocking);
|
||||
ResourceGuard g(ResourceGuard::Metrics::getIOWrite(), link, 1, ResourceGuard::Lock::Defer);
|
||||
g.lock();
|
||||
g.consume(1);
|
||||
// NOTE: at this point we assume resource to be blocked by single request (<max_requests>1</max_requests>)
|
||||
busy_period.arrive_and_wait(); // (1) notify all followers that resource is blocked
|
||||
busy_period.arrive_and_wait(); // (2) wait all followers to enqueue their requests
|
||||
@ -320,10 +322,11 @@ struct ResourceTestManager : public ResourceTestBase
|
||||
{
|
||||
getLinkData(link).left += total_requests + 1;
|
||||
busy_period.arrive_and_wait(); // (1) wait leader to block resource
|
||||
ResourceGuard g(link, cost, ResourceGuard::PostponeLocking);
|
||||
ResourceGuard g(ResourceGuard::Metrics::getIOWrite(), link, cost, ResourceGuard::Lock::Defer);
|
||||
onEnqueue(link);
|
||||
busy_period.arrive_and_wait(); // (2) notify leader to unblock
|
||||
g.lock();
|
||||
g.consume(cost);
|
||||
onExecute(link);
|
||||
}
|
||||
};
|
||||
|
@ -36,11 +36,16 @@ TEST(SchedulerDynamicResourceManager, Smoke)
|
||||
|
||||
for (int i = 0; i < 10; i++)
|
||||
{
|
||||
ResourceGuard gA(cA->get("res1"), ResourceGuard::PostponeLocking);
|
||||
ResourceGuard gA(ResourceGuard::Metrics::getIOWrite(), cA->get("res1"), 1, ResourceGuard::Lock::Defer);
|
||||
gA.lock();
|
||||
gA.consume(1);
|
||||
gA.unlock();
|
||||
|
||||
ResourceGuard gB(cB->get("res1"));
|
||||
ResourceGuard gB(ResourceGuard::Metrics::getIOWrite(), cB->get("res1"));
|
||||
gB.unlock();
|
||||
|
||||
ResourceGuard gC(ResourceGuard::Metrics::getIORead(), cB->get("res1"));
|
||||
gB.consume(2);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1,11 +1,13 @@
|
||||
#include <gtest/gtest.h>
|
||||
|
||||
#include <Common/Scheduler/SchedulerRoot.h>
|
||||
|
||||
#include <Common/Scheduler/Nodes/tests/ResourceTest.h>
|
||||
|
||||
#include <Common/Scheduler/SchedulerRoot.h>
|
||||
#include <Common/randomSeed.h>
|
||||
|
||||
#include <barrier>
|
||||
#include <future>
|
||||
#include <pcg_random.hpp>
|
||||
|
||||
using namespace DB;
|
||||
|
||||
@ -22,6 +24,17 @@ struct ResourceTest : public ResourceTestBase
|
||||
{
|
||||
scheduler.stop(true);
|
||||
}
|
||||
|
||||
std::mutex rng_mutex;
|
||||
pcg64 rng{randomSeed()};
|
||||
|
||||
template <typename T>
|
||||
T randomInt(T from, T to)
|
||||
{
|
||||
std::uniform_int_distribution<T> distribution(from, to);
|
||||
std::lock_guard lock(rng_mutex);
|
||||
return distribution(rng);
|
||||
}
|
||||
};
|
||||
|
||||
struct ResourceHolder
|
||||
@ -109,26 +122,55 @@ TEST(SchedulerRoot, Smoke)
|
||||
r2.registerResource();
|
||||
|
||||
{
|
||||
ResourceGuard rg(a);
|
||||
ResourceGuard rg(ResourceGuard::Metrics::getIOWrite(), a);
|
||||
EXPECT_TRUE(fc1->requests.contains(&rg.request));
|
||||
rg.consume(1);
|
||||
}
|
||||
|
||||
{
|
||||
ResourceGuard rg(b);
|
||||
ResourceGuard rg(ResourceGuard::Metrics::getIOWrite(), b);
|
||||
EXPECT_TRUE(fc1->requests.contains(&rg.request));
|
||||
rg.consume(1);
|
||||
}
|
||||
|
||||
{
|
||||
ResourceGuard rg(c);
|
||||
ResourceGuard rg(ResourceGuard::Metrics::getIOWrite(), c);
|
||||
EXPECT_TRUE(fc2->requests.contains(&rg.request));
|
||||
rg.consume(1);
|
||||
}
|
||||
|
||||
{
|
||||
ResourceGuard rg(d);
|
||||
ResourceGuard rg(ResourceGuard::Metrics::getIOWrite(), d);
|
||||
EXPECT_TRUE(fc2->requests.contains(&rg.request));
|
||||
rg.consume(1);
|
||||
}
|
||||
}
|
||||
|
||||
TEST(SchedulerRoot, Budget)
|
||||
{
|
||||
ResourceTest t;
|
||||
|
||||
ResourceHolder r1(t);
|
||||
r1.add<ConstraintTest>("/", "<max_requests>1</max_requests>");
|
||||
r1.add<PriorityPolicy>("/prio");
|
||||
auto a = r1.addQueue("/prio/A", "");
|
||||
r1.registerResource();
|
||||
|
||||
ResourceCost total_real_cost = 0;
|
||||
int total_requests = 10;
|
||||
for (int i = 0 ; i < total_requests; i++)
|
||||
{
|
||||
ResourceCost est_cost = t.randomInt(1, 10);
|
||||
ResourceCost real_cost = t.randomInt(0, 10);
|
||||
ResourceGuard rg(ResourceGuard::Metrics::getIOWrite(), a, est_cost);
|
||||
rg.consume(real_cost);
|
||||
total_real_cost += real_cost;
|
||||
}
|
||||
|
||||
EXPECT_EQ(total_requests, a.queue->dequeued_requests);
|
||||
EXPECT_EQ(total_real_cost, a.queue->dequeued_cost - a.queue->getBudget());
|
||||
}
|
||||
|
||||
TEST(SchedulerRoot, Cancel)
|
||||
{
|
||||
ResourceTest t;
|
||||
|
@ -1,25 +0,0 @@
|
||||
#include <Common/Scheduler/ISchedulerQueue.h>
|
||||
#include <Common/Scheduler/ResourceLink.h>
|
||||
#include <Common/Scheduler/ResourceRequest.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
void ResourceLink::adjust(ResourceCost estimated_cost, ResourceCost real_cost) const
|
||||
{
|
||||
if (queue)
|
||||
queue->adjustBudget(estimated_cost, real_cost);
|
||||
}
|
||||
|
||||
void ResourceLink::consumed(ResourceCost cost) const
|
||||
{
|
||||
if (queue)
|
||||
queue->consumeBudget(cost);
|
||||
}
|
||||
|
||||
void ResourceLink::accumulate(DB::ResourceCost cost) const
|
||||
{
|
||||
if (queue)
|
||||
queue->accumulateBudget(cost);
|
||||
}
|
||||
}
|
||||
|
@ -7,10 +7,30 @@
|
||||
#include <Common/Scheduler/ResourceRequest.h>
|
||||
#include <Common/Scheduler/ResourceLink.h>
|
||||
|
||||
#include <Common/CurrentThread.h>
|
||||
#include <Common/ProfileEvents.h>
|
||||
#include <Common/CurrentMetrics.h>
|
||||
|
||||
#include <condition_variable>
|
||||
#include <mutex>
|
||||
|
||||
|
||||
namespace ProfileEvents
|
||||
{
|
||||
extern const Event SchedulerIOReadRequests;
|
||||
extern const Event SchedulerIOReadBytes;
|
||||
extern const Event SchedulerIOReadWaitMicroseconds;
|
||||
extern const Event SchedulerIOWriteRequests;
|
||||
extern const Event SchedulerIOWriteBytes;
|
||||
extern const Event SchedulerIOWriteWaitMicroseconds;
|
||||
}
|
||||
|
||||
namespace CurrentMetrics
|
||||
{
|
||||
extern const Metric SchedulerIOReadScheduled;
|
||||
extern const Metric SchedulerIOWriteScheduled;
|
||||
}
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
@ -22,12 +42,42 @@ namespace DB
|
||||
class ResourceGuard
|
||||
{
|
||||
public:
|
||||
enum ResourceGuardCtor
|
||||
enum class Lock
|
||||
{
|
||||
LockStraightAway, /// Locks inside constructor (default)
|
||||
Default, /// Locks inside constructor
|
||||
|
||||
// WARNING: Only for tests. It is not exception-safe because `lock()` must be called after construction.
|
||||
PostponeLocking /// Don't lock in constructor, but send request
|
||||
Defer /// Don't lock in constructor, but send request
|
||||
};
|
||||
|
||||
struct Metrics
|
||||
{
|
||||
const ProfileEvents::Event requests = ProfileEvents::end();
|
||||
const ProfileEvents::Event cost = ProfileEvents::end();
|
||||
const ProfileEvents::Event wait_microseconds = ProfileEvents::end();
|
||||
const CurrentMetrics::Metric scheduled_count = CurrentMetrics::end();
|
||||
|
||||
static const Metrics * getIORead()
|
||||
{
|
||||
static Metrics metrics{
|
||||
.requests = ProfileEvents::SchedulerIOReadRequests,
|
||||
.cost = ProfileEvents::SchedulerIOReadBytes,
|
||||
.wait_microseconds = ProfileEvents::SchedulerIOReadWaitMicroseconds,
|
||||
.scheduled_count = CurrentMetrics::SchedulerIOReadScheduled
|
||||
};
|
||||
return &metrics;
|
||||
}
|
||||
|
||||
static const Metrics * getIOWrite()
|
||||
{
|
||||
static Metrics metrics{
|
||||
.requests = ProfileEvents::SchedulerIOWriteRequests,
|
||||
.cost = ProfileEvents::SchedulerIOWriteBytes,
|
||||
.wait_microseconds = ProfileEvents::SchedulerIOWriteWaitMicroseconds,
|
||||
.scheduled_count = CurrentMetrics::SchedulerIOWriteScheduled
|
||||
};
|
||||
return &metrics;
|
||||
}
|
||||
};
|
||||
|
||||
enum RequestState
|
||||
@ -46,60 +96,74 @@ public:
|
||||
chassert(state == Finished);
|
||||
state = Enqueued;
|
||||
ResourceRequest::reset(cost_);
|
||||
link_.queue->enqueueRequestUsingBudget(this);
|
||||
estimated_cost = link_.queue->enqueueRequestUsingBudget(this); // NOTE: it modifies `cost` and enqueues request
|
||||
}
|
||||
|
||||
// This function is executed inside scheduler thread and wakes thread issued this `request`.
|
||||
// That thread will continue execution and do real consumption of requested resource synchronously.
|
||||
void execute() override
|
||||
{
|
||||
{
|
||||
std::unique_lock lock(mutex);
|
||||
chassert(state == Enqueued);
|
||||
state = Dequeued;
|
||||
}
|
||||
std::unique_lock lock(mutex);
|
||||
chassert(state == Enqueued);
|
||||
state = Dequeued;
|
||||
dequeued_cv.notify_one();
|
||||
}
|
||||
|
||||
void wait()
|
||||
{
|
||||
CurrentMetrics::Increment scheduled(metrics->scheduled_count);
|
||||
auto timer = CurrentThread::getProfileEvents().timer(metrics->wait_microseconds);
|
||||
std::unique_lock lock(mutex);
|
||||
dequeued_cv.wait(lock, [this] { return state == Dequeued; });
|
||||
}
|
||||
|
||||
void finish()
|
||||
void finish(ResourceCost real_cost_, ResourceLink link_)
|
||||
{
|
||||
// lock(mutex) is not required because `Dequeued` request cannot be used by the scheduler thread
|
||||
chassert(state == Dequeued);
|
||||
state = Finished;
|
||||
if (estimated_cost != real_cost_)
|
||||
link_.queue->adjustBudget(estimated_cost, real_cost_);
|
||||
ResourceRequest::finish();
|
||||
ProfileEvents::increment(metrics->requests);
|
||||
ProfileEvents::increment(metrics->cost, real_cost_);
|
||||
}
|
||||
|
||||
static Request & local()
|
||||
void assertFinished()
|
||||
{
|
||||
// lock(mutex) is not required because `Finished` request cannot be used by the scheduler thread
|
||||
chassert(state == Finished);
|
||||
}
|
||||
|
||||
static Request & local(const Metrics * metrics)
|
||||
{
|
||||
// Since single thread cannot use more than one resource request simultaneously,
|
||||
// we can reuse thread-local request to avoid allocations
|
||||
static thread_local Request instance;
|
||||
instance.metrics = metrics;
|
||||
return instance;
|
||||
}
|
||||
|
||||
const Metrics * metrics = nullptr; // Must be initialized before use
|
||||
|
||||
private:
|
||||
ResourceCost estimated_cost = 0; // Stores initial `cost` value in case budget was used to modify it
|
||||
std::mutex mutex;
|
||||
std::condition_variable dequeued_cv;
|
||||
RequestState state = Finished;
|
||||
};
|
||||
|
||||
/// Creates pending request for resource; blocks while resource is not available (unless `PostponeLocking`)
|
||||
explicit ResourceGuard(ResourceLink link_, ResourceCost cost = 1, ResourceGuardCtor ctor = LockStraightAway)
|
||||
/// Creates pending request for resource; blocks while resource is not available (unless `Lock::Defer`)
|
||||
explicit ResourceGuard(const Metrics * metrics, ResourceLink link_, ResourceCost cost = 1, ResourceGuard::Lock type = ResourceGuard::Lock::Default)
|
||||
: link(link_)
|
||||
, request(Request::local())
|
||||
, request(Request::local(metrics))
|
||||
{
|
||||
if (cost == 0)
|
||||
link.queue = nullptr; // Ignore zero-cost requests
|
||||
else if (link.queue)
|
||||
link.reset(); // Ignore zero-cost requests
|
||||
else if (link)
|
||||
{
|
||||
request.enqueue(cost, link);
|
||||
if (ctor == LockStraightAway)
|
||||
if (type == Lock::Default)
|
||||
request.wait();
|
||||
}
|
||||
}
|
||||
@ -112,22 +176,29 @@ public:
|
||||
/// Blocks until resource is available
|
||||
void lock()
|
||||
{
|
||||
if (link.queue)
|
||||
if (link)
|
||||
request.wait();
|
||||
}
|
||||
|
||||
/// Report resource consumption has finished
|
||||
void unlock()
|
||||
void consume(ResourceCost cost)
|
||||
{
|
||||
if (link.queue)
|
||||
real_cost += cost;
|
||||
}
|
||||
|
||||
/// Report resource consumption has finished
|
||||
void unlock(ResourceCost consumed = 0)
|
||||
{
|
||||
consume(consumed);
|
||||
if (link)
|
||||
{
|
||||
request.finish();
|
||||
link.queue = nullptr;
|
||||
request.finish(real_cost, link);
|
||||
link.reset();
|
||||
}
|
||||
}
|
||||
|
||||
ResourceLink link;
|
||||
Request & request;
|
||||
ResourceCost real_cost = 0;
|
||||
};
|
||||
|
||||
}
|
||||
|
@ -13,13 +13,28 @@ using ResourceCost = Int64;
|
||||
struct ResourceLink
|
||||
{
|
||||
ISchedulerQueue * queue = nullptr;
|
||||
|
||||
bool operator==(const ResourceLink &) const = default;
|
||||
explicit operator bool() const { return queue != nullptr; }
|
||||
|
||||
void adjust(ResourceCost estimated_cost, ResourceCost real_cost) const;
|
||||
void reset()
|
||||
{
|
||||
queue = nullptr;
|
||||
}
|
||||
};
|
||||
|
||||
void consumed(ResourceCost cost) const;
|
||||
/*
|
||||
* Everything required for IO scheduling.
|
||||
* Note that raw pointer are stored inside, so make sure that `ClassifierPtr` that produced
|
||||
* resource links will outlive them. Usually classifier is stored in query `Context`.
|
||||
*/
|
||||
struct IOSchedulingSettings
|
||||
{
|
||||
ResourceLink read_resource_link;
|
||||
ResourceLink write_resource_link;
|
||||
|
||||
void accumulate(ResourceCost cost) const;
|
||||
bool operator==(const IOSchedulingSettings &) const = default;
|
||||
explicit operator bool() const { return read_resource_link && write_resource_link; }
|
||||
};
|
||||
|
||||
}
|
||||
|
@ -45,7 +45,7 @@ constexpr ResourceCost ResourceCostMax = std::numeric_limits<int>::max();
|
||||
class ResourceRequest : public boost::intrusive::list_base_hook<>
|
||||
{
|
||||
public:
|
||||
/// Cost of request execution; should be filled before request enqueueing.
|
||||
/// Cost of request execution; should be filled before request enqueueing and remain constant until `finish()`.
|
||||
/// NOTE: If cost is not known in advance, ResourceBudget should be used (note that every ISchedulerQueue has it)
|
||||
ResourceCost cost;
|
||||
|
||||
|
@ -1,4 +1,5 @@
|
||||
#include <Common/ThreadPool.h>
|
||||
#include <Common/ProfileEvents.h>
|
||||
#include <Common/setThreadName.h>
|
||||
#include <Common/Exception.h>
|
||||
#include <Common/getNumberOfPhysicalCPUCores.h>
|
||||
@ -27,6 +28,25 @@ namespace CurrentMetrics
|
||||
extern const Metric GlobalThreadScheduled;
|
||||
}
|
||||
|
||||
namespace ProfileEvents
|
||||
{
|
||||
extern const Event GlobalThreadPoolExpansions;
|
||||
extern const Event GlobalThreadPoolShrinks;
|
||||
extern const Event GlobalThreadPoolThreadCreationMicroseconds;
|
||||
extern const Event GlobalThreadPoolLockWaitMicroseconds;
|
||||
extern const Event GlobalThreadPoolJobs;
|
||||
extern const Event GlobalThreadPoolJobWaitTimeMicroseconds;
|
||||
|
||||
extern const Event LocalThreadPoolExpansions;
|
||||
extern const Event LocalThreadPoolShrinks;
|
||||
extern const Event LocalThreadPoolThreadCreationMicroseconds;
|
||||
extern const Event LocalThreadPoolLockWaitMicroseconds;
|
||||
extern const Event LocalThreadPoolJobs;
|
||||
extern const Event LocalThreadPoolBusyMicroseconds;
|
||||
extern const Event LocalThreadPoolJobWaitTimeMicroseconds;
|
||||
|
||||
}
|
||||
|
||||
class JobWithPriority
|
||||
{
|
||||
public:
|
||||
@ -40,6 +60,7 @@ public:
|
||||
/// Call stacks of all jobs' schedulings leading to this one
|
||||
std::vector<StackTrace::FramePointers> frame_pointers;
|
||||
bool enable_job_stack_trace = false;
|
||||
Stopwatch job_create_time;
|
||||
|
||||
JobWithPriority(
|
||||
Job job_, Priority priority_, CurrentMetrics::Metric metric,
|
||||
@ -59,6 +80,13 @@ public:
|
||||
{
|
||||
return priority > rhs.priority; // Reversed for `priority_queue` max-heap to yield minimum value (i.e. highest priority) first
|
||||
}
|
||||
|
||||
UInt64 elapsedMicroseconds() const
|
||||
{
|
||||
return job_create_time.elapsedMicroseconds();
|
||||
}
|
||||
|
||||
|
||||
};
|
||||
|
||||
static constexpr auto DEFAULT_THREAD_NAME = "ThreadPool";
|
||||
@ -180,14 +208,18 @@ ReturnType ThreadPoolImpl<Thread>::scheduleImpl(Job job, Priority priority, std:
|
||||
};
|
||||
|
||||
{
|
||||
Stopwatch watch;
|
||||
std::unique_lock lock(mutex);
|
||||
ProfileEvents::increment(
|
||||
std::is_same_v<Thread, std::thread> ? ProfileEvents::GlobalThreadPoolLockWaitMicroseconds : ProfileEvents::LocalThreadPoolLockWaitMicroseconds,
|
||||
watch.elapsedMicroseconds());
|
||||
|
||||
if (CannotAllocateThreadFaultInjector::injectFault())
|
||||
return on_error("fault injected");
|
||||
|
||||
auto pred = [this] { return !queue_size || scheduled_jobs < queue_size || shutdown; };
|
||||
|
||||
if (wait_microseconds) /// Check for optional. Condition is true if the optional is set and the value is zero.
|
||||
if (wait_microseconds) /// Check for optional. Condition is true if the optional is set. Even if the value is zero.
|
||||
{
|
||||
if (!job_finished.wait_for(lock, std::chrono::microseconds(*wait_microseconds), pred))
|
||||
return on_error(fmt::format("no free thread (timeout={})", *wait_microseconds));
|
||||
@ -216,7 +248,13 @@ ReturnType ThreadPoolImpl<Thread>::scheduleImpl(Job job, Priority priority, std:
|
||||
|
||||
try
|
||||
{
|
||||
Stopwatch watch2;
|
||||
threads.front() = Thread([this, it = threads.begin()] { worker(it); });
|
||||
ProfileEvents::increment(
|
||||
std::is_same_v<Thread, std::thread> ? ProfileEvents::GlobalThreadPoolThreadCreationMicroseconds : ProfileEvents::LocalThreadPoolThreadCreationMicroseconds,
|
||||
watch2.elapsedMicroseconds());
|
||||
ProfileEvents::increment(
|
||||
std::is_same_v<Thread, std::thread> ? ProfileEvents::GlobalThreadPoolExpansions : ProfileEvents::LocalThreadPoolExpansions);
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
@ -239,6 +277,8 @@ ReturnType ThreadPoolImpl<Thread>::scheduleImpl(Job job, Priority priority, std:
|
||||
/// Wake up a free thread to run the new job.
|
||||
new_job_or_shutdown.notify_one();
|
||||
|
||||
ProfileEvents::increment(std::is_same_v<Thread, std::thread> ? ProfileEvents::GlobalThreadPoolJobs : ProfileEvents::LocalThreadPoolJobs);
|
||||
|
||||
return static_cast<ReturnType>(true);
|
||||
}
|
||||
|
||||
@ -262,7 +302,14 @@ void ThreadPoolImpl<Thread>::startNewThreadsNoLock()
|
||||
|
||||
try
|
||||
{
|
||||
Stopwatch watch;
|
||||
threads.front() = Thread([this, it = threads.begin()] { worker(it); });
|
||||
ProfileEvents::increment(
|
||||
std::is_same_v<Thread, std::thread> ? ProfileEvents::GlobalThreadPoolThreadCreationMicroseconds : ProfileEvents::LocalThreadPoolThreadCreationMicroseconds,
|
||||
watch.elapsedMicroseconds());
|
||||
ProfileEvents::increment(
|
||||
std::is_same_v<Thread, std::thread> ? ProfileEvents::GlobalThreadPoolExpansions : ProfileEvents::LocalThreadPoolExpansions);
|
||||
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
@ -293,7 +340,11 @@ void ThreadPoolImpl<Thread>::scheduleOrThrow(Job job, Priority priority, uint64_
|
||||
template <typename Thread>
|
||||
void ThreadPoolImpl<Thread>::wait()
|
||||
{
|
||||
Stopwatch watch;
|
||||
std::unique_lock lock(mutex);
|
||||
ProfileEvents::increment(
|
||||
std::is_same_v<Thread, std::thread> ? ProfileEvents::GlobalThreadPoolLockWaitMicroseconds : ProfileEvents::LocalThreadPoolLockWaitMicroseconds,
|
||||
watch.elapsedMicroseconds());
|
||||
/// Signal here just in case.
|
||||
/// If threads are waiting on condition variables, but there are some jobs in the queue
|
||||
/// then it will prevent us from deadlock.
|
||||
@ -334,7 +385,11 @@ void ThreadPoolImpl<Thread>::finalize()
|
||||
|
||||
/// Wait for all currently running jobs to finish (we don't wait for all scheduled jobs here like the function wait() does).
|
||||
for (auto & thread : threads)
|
||||
{
|
||||
thread.join();
|
||||
ProfileEvents::increment(
|
||||
std::is_same_v<Thread, std::thread> ? ProfileEvents::GlobalThreadPoolShrinks : ProfileEvents::LocalThreadPoolShrinks);
|
||||
}
|
||||
|
||||
threads.clear();
|
||||
}
|
||||
@ -391,7 +446,11 @@ void ThreadPoolImpl<Thread>::worker(typename std::list<Thread>::iterator thread_
|
||||
std::optional<JobWithPriority> job_data;
|
||||
|
||||
{
|
||||
Stopwatch watch;
|
||||
std::unique_lock lock(mutex);
|
||||
ProfileEvents::increment(
|
||||
std::is_same_v<Thread, std::thread> ? ProfileEvents::GlobalThreadPoolLockWaitMicroseconds : ProfileEvents::LocalThreadPoolLockWaitMicroseconds,
|
||||
watch.elapsedMicroseconds());
|
||||
|
||||
// Finish with previous job if any
|
||||
if (job_is_done)
|
||||
@ -424,6 +483,8 @@ void ThreadPoolImpl<Thread>::worker(typename std::list<Thread>::iterator thread_
|
||||
{
|
||||
thread_it->detach();
|
||||
threads.erase(thread_it);
|
||||
ProfileEvents::increment(
|
||||
std::is_same_v<Thread, std::thread> ? ProfileEvents::GlobalThreadPoolShrinks : ProfileEvents::LocalThreadPoolShrinks);
|
||||
}
|
||||
return;
|
||||
}
|
||||
@ -433,6 +494,10 @@ void ThreadPoolImpl<Thread>::worker(typename std::list<Thread>::iterator thread_
|
||||
job_data = std::move(const_cast<JobWithPriority &>(jobs.top()));
|
||||
jobs.pop();
|
||||
|
||||
ProfileEvents::increment(
|
||||
std::is_same_v<Thread, std::thread> ? ProfileEvents::GlobalThreadPoolJobWaitTimeMicroseconds : ProfileEvents::LocalThreadPoolJobWaitTimeMicroseconds,
|
||||
job_data->elapsedMicroseconds());
|
||||
|
||||
/// We don't run jobs after `shutdown` is set, but we have to properly dequeue all jobs and finish them.
|
||||
if (shutdown)
|
||||
{
|
||||
@ -459,7 +524,22 @@ void ThreadPoolImpl<Thread>::worker(typename std::list<Thread>::iterator thread_
|
||||
|
||||
CurrentMetrics::Increment metric_active_pool_threads(metric_active_threads);
|
||||
|
||||
job_data->job();
|
||||
if constexpr (!std::is_same_v<Thread, std::thread>)
|
||||
{
|
||||
Stopwatch watch;
|
||||
job_data->job();
|
||||
// This metric is less relevant for the global thread pool, as it would show large values (time while
|
||||
// a thread was used by local pools) and increment only when local pools are destroyed.
|
||||
//
|
||||
// In cases where global pool threads are used directly (without a local thread pool), distinguishing
|
||||
// them is difficult.
|
||||
ProfileEvents::increment(ProfileEvents::LocalThreadPoolBusyMicroseconds, watch.elapsedMicroseconds());
|
||||
}
|
||||
else
|
||||
{
|
||||
job_data->job();
|
||||
}
|
||||
|
||||
|
||||
if (thread_trace_context.root_span.isTraceEnabled())
|
||||
{
|
||||
|
@ -131,7 +131,7 @@ private:
|
||||
bool threads_remove_themselves = true;
|
||||
const bool shutdown_on_exception = true;
|
||||
|
||||
boost::heap::priority_queue<JobWithPriority> jobs;
|
||||
boost::heap::priority_queue<JobWithPriority,boost::heap::stable<true>> jobs;
|
||||
std::list<Thread> threads;
|
||||
std::exception_ptr first_exception;
|
||||
std::stack<OnDestroyCallback> on_destroy_callbacks;
|
||||
|
@ -7,11 +7,11 @@
|
||||
#include <Common/MemoryTracker.h>
|
||||
#include <Common/ProfileEvents.h>
|
||||
#include <Common/Stopwatch.h>
|
||||
#include <Common/Scheduler/ResourceLink.h>
|
||||
|
||||
#include <boost/noncopyable.hpp>
|
||||
|
||||
#include <functional>
|
||||
#include <map>
|
||||
#include <memory>
|
||||
#include <mutex>
|
||||
#include <unordered_set>
|
||||
@ -188,6 +188,10 @@ public:
|
||||
Progress progress_in;
|
||||
Progress progress_out;
|
||||
|
||||
/// IO scheduling
|
||||
ResourceLink read_resource_link;
|
||||
ResourceLink write_resource_link;
|
||||
|
||||
private:
|
||||
/// Group of threads, to which this thread attached
|
||||
ThreadGroupPtr thread_group;
|
||||
|
@ -210,7 +210,7 @@ namespace
|
||||
{
|
||||
UInt64 stringToMaxThreads(const String & str)
|
||||
{
|
||||
if (startsWith(str, "auto"))
|
||||
if (startsWith(str, "auto") || startsWith(str, "'auto"))
|
||||
return 0;
|
||||
return parseFromString<UInt64>(str);
|
||||
}
|
||||
@ -237,7 +237,8 @@ SettingFieldMaxThreads & SettingFieldMaxThreads::operator=(const Field & f)
|
||||
String SettingFieldMaxThreads::toString() const
|
||||
{
|
||||
if (is_auto)
|
||||
return "auto(" + ::DB::toString(value) + ")";
|
||||
/// Removing quotes here will introduce an incompatibility between replicas with different versions.
|
||||
return "'auto(" + ::DB::toString(value) + ")'";
|
||||
else
|
||||
return ::DB::toString(value);
|
||||
}
|
||||
|
@ -519,10 +519,10 @@ static DataTypePtr createJSON(const ASTPtr & arguments)
|
||||
if (!context)
|
||||
context = Context::getGlobalContextInstance();
|
||||
|
||||
if (context->getSettingsRef().use_json_alias_for_old_object_type)
|
||||
if (context->getSettingsRef().allow_experimental_object_type && context->getSettingsRef().use_json_alias_for_old_object_type)
|
||||
{
|
||||
if (arguments && !arguments->children.empty())
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Experimental Object type doesn't support any arguments. If you want to use new JSON type, set setting allow_experimental_json_type = 1");
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Experimental Object type doesn't support any arguments. If you want to use new JSON type, set settings allow_experimental_json_type = 1 and use_json_alias_for_old_object_type = 0");
|
||||
|
||||
return std::make_shared<DataTypeObjectDeprecated>("JSON", false);
|
||||
}
|
||||
|
@ -35,9 +35,10 @@ class RegionsNames
|
||||
M(et, ru, 11) \
|
||||
M(pt, en, 12) \
|
||||
M(he, en, 13) \
|
||||
M(vi, en, 14)
|
||||
M(vi, en, 14) \
|
||||
M(es, en, 15)
|
||||
|
||||
static constexpr size_t total_languages = 15;
|
||||
static constexpr size_t total_languages = 16;
|
||||
|
||||
public:
|
||||
enum class Language : size_t
|
||||
|
@ -8,6 +8,7 @@
|
||||
#include <IO/ReadBufferFromString.h>
|
||||
#include <Common/logger_useful.h>
|
||||
#include <Common/Throttler.h>
|
||||
#include <Common/Scheduler/ResourceGuard.h>
|
||||
#include <base/sleep.h>
|
||||
#include <Common/ProfileEvents.h>
|
||||
#include <IO/SeekableReadBuffer.h>
|
||||
@ -113,7 +114,9 @@ bool ReadBufferFromAzureBlobStorage::nextImpl()
|
||||
{
|
||||
try
|
||||
{
|
||||
ResourceGuard rlock(ResourceGuard::Metrics::getIORead(), read_settings.io_scheduling.read_resource_link, to_read_bytes);
|
||||
bytes_read = data_stream->ReadToCount(reinterpret_cast<uint8_t *>(data_ptr), to_read_bytes);
|
||||
rlock.unlock(bytes_read); // Do not hold resource under bandwidth throttler
|
||||
if (read_settings.remote_throttler)
|
||||
read_settings.remote_throttler->add(bytes_read, ProfileEvents::RemoteReadThrottlerBytes, ProfileEvents::RemoteReadThrottlerSleepMicroseconds);
|
||||
break;
|
||||
|
@ -101,15 +101,13 @@ void WriteBufferFromAzureBlobStorage::execWithRetry(std::function<void()> func,
|
||||
{
|
||||
try
|
||||
{
|
||||
ResourceGuard rlock(write_settings.resource_link, cost); // Note that zero-cost requests are ignored
|
||||
ResourceGuard rlock(ResourceGuard::Metrics::getIOWrite(), write_settings.io_scheduling.write_resource_link, cost); // Note that zero-cost requests are ignored
|
||||
func();
|
||||
rlock.unlock(cost);
|
||||
break;
|
||||
}
|
||||
catch (const Azure::Core::RequestFailedException & e)
|
||||
{
|
||||
if (cost)
|
||||
write_settings.resource_link.accumulate(cost); // Accumulate resource for later use, because we have failed to consume it
|
||||
|
||||
if (i == num_tries - 1 || !isRetryableAzureException(e))
|
||||
throw;
|
||||
|
||||
@ -117,8 +115,6 @@ void WriteBufferFromAzureBlobStorage::execWithRetry(std::function<void()> func,
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
if (cost)
|
||||
write_settings.resource_link.accumulate(cost); // We assume no resource was used in case of failure
|
||||
throw;
|
||||
}
|
||||
}
|
||||
|
@ -461,14 +461,17 @@ DiskObjectStoragePtr DiskObjectStorage::createDiskObjectStorage()
|
||||
}
|
||||
|
||||
template <class Settings>
|
||||
static inline Settings updateResourceLink(const Settings & settings, const String & resource_name)
|
||||
static inline Settings updateIOSchedulingSettings(const Settings & settings, const String & read_resource_name, const String & write_resource_name)
|
||||
{
|
||||
if (resource_name.empty())
|
||||
if (read_resource_name.empty() && write_resource_name.empty())
|
||||
return settings;
|
||||
if (auto query_context = CurrentThread::getQueryContext())
|
||||
{
|
||||
Settings result(settings);
|
||||
result.resource_link = query_context->getWorkloadClassifier()->get(resource_name);
|
||||
if (!read_resource_name.empty())
|
||||
result.io_scheduling.read_resource_link = query_context->getWorkloadClassifier()->get(read_resource_name);
|
||||
if (!write_resource_name.empty())
|
||||
result.io_scheduling.write_resource_link = query_context->getWorkloadClassifier()->get(write_resource_name);
|
||||
return result;
|
||||
}
|
||||
return settings;
|
||||
@ -500,7 +503,7 @@ std::unique_ptr<ReadBufferFromFileBase> DiskObjectStorage::readFile(
|
||||
|
||||
return object_storage->readObjects(
|
||||
storage_objects,
|
||||
updateResourceLink(settings, getReadResourceName()),
|
||||
updateIOSchedulingSettings(settings, getReadResourceName(), getWriteResourceName()),
|
||||
read_hint,
|
||||
file_size);
|
||||
}
|
||||
@ -513,7 +516,7 @@ std::unique_ptr<WriteBufferFromFileBase> DiskObjectStorage::writeFile(
|
||||
{
|
||||
LOG_TEST(log, "Write file: {}", path);
|
||||
|
||||
WriteSettings write_settings = updateResourceLink(settings, getWriteResourceName());
|
||||
WriteSettings write_settings = updateIOSchedulingSettings(settings, getReadResourceName(), getWriteResourceName());
|
||||
auto transaction = createObjectStorageTransaction();
|
||||
return transaction->writeFile(path, buf_size, mode, write_settings);
|
||||
}
|
||||
|
@ -406,7 +406,7 @@ void UserDefinedSQLObjectsZooKeeperStorage::syncObjects(const zkutil::ZooKeeperP
|
||||
LOG_DEBUG(log, "Syncing user-defined {} objects", object_type);
|
||||
Strings object_names = getObjectNamesAndSetWatch(zookeeper, object_type);
|
||||
|
||||
getLock();
|
||||
auto lock = getLock();
|
||||
|
||||
/// Remove stale objects
|
||||
removeAllObjectsExcept(object_names);
|
||||
|
@ -2,6 +2,7 @@
|
||||
#include <Columns/ColumnString.h>
|
||||
#include <Columns/ColumnsNumber.h>
|
||||
#include <DataTypes/DataTypeDate.h>
|
||||
#include <DataTypes/DataTypeDate32.h>
|
||||
#include <DataTypes/DataTypeDateTime.h>
|
||||
#include <DataTypes/DataTypeInterval.h>
|
||||
#include <Formats/FormatSettings.h>
|
||||
@ -43,6 +44,7 @@ public:
|
||||
enum ResultType
|
||||
{
|
||||
Date,
|
||||
Date32,
|
||||
DateTime,
|
||||
DateTime64,
|
||||
};
|
||||
@ -75,15 +77,15 @@ public:
|
||||
|
||||
bool second_argument_is_date = false;
|
||||
auto check_second_argument = [&] {
|
||||
if (!isDate(arguments[1].type) && !isDateTime(arguments[1].type) && !isDateTime64(arguments[1].type))
|
||||
if (!isDateOrDate32(arguments[1].type) && !isDateTime(arguments[1].type) && !isDateTime64(arguments[1].type))
|
||||
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Illegal type {} of 2nd argument of function {}. "
|
||||
"Should be a date or a date with time", arguments[1].type->getName(), getName());
|
||||
|
||||
second_argument_is_date = isDate(arguments[1].type);
|
||||
second_argument_is_date = isDateOrDate32(arguments[1].type);
|
||||
|
||||
if (second_argument_is_date && ((datepart_kind == IntervalKind::Kind::Hour)
|
||||
|| (datepart_kind == IntervalKind::Kind::Minute) || (datepart_kind == IntervalKind::Kind::Second)))
|
||||
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Illegal type Date of argument for function {}", getName());
|
||||
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Illegal type {} of argument for function {}", arguments[1].type->getName(), getName());
|
||||
};
|
||||
|
||||
auto check_timezone_argument = [&] {
|
||||
@ -119,6 +121,8 @@ public:
|
||||
|
||||
if (result_type == ResultType::Date)
|
||||
return std::make_shared<DataTypeDate>();
|
||||
if (result_type == ResultType::Date32)
|
||||
return std::make_shared<DataTypeDate32>();
|
||||
else if (result_type == ResultType::DateTime)
|
||||
return std::make_shared<DataTypeDateTime>(extractTimeZoneNameFromFunctionArguments(arguments, 2, 1, false));
|
||||
else
|
||||
|
@ -44,9 +44,9 @@ public:
|
||||
auto check_first_argument = [&]
|
||||
{
|
||||
const DataTypePtr & type_arg1 = arguments[0].type;
|
||||
if (!isDate(type_arg1) && !isDateTime(type_arg1) && !isDateTime64(type_arg1))
|
||||
if (!isDateOrDate32(type_arg1) && !isDateTime(type_arg1) && !isDateTime64(type_arg1))
|
||||
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT,
|
||||
"Illegal type {} of 1st argument of function {}, expected a Date, DateTime or DateTime64",
|
||||
"Illegal type {} of 1st argument of function {}, expected a Date, Date32, DateTime or DateTime64",
|
||||
type_arg1->getName(), getName());
|
||||
value_is_date = isDate(type_arg1);
|
||||
};
|
||||
@ -56,6 +56,7 @@ public:
|
||||
enum class ResultType : uint8_t
|
||||
{
|
||||
Date,
|
||||
Date32,
|
||||
DateTime,
|
||||
DateTime64
|
||||
};
|
||||
@ -128,6 +129,8 @@ public:
|
||||
{
|
||||
case ResultType::Date:
|
||||
return std::make_shared<DataTypeDate>();
|
||||
case ResultType::Date32:
|
||||
return std::make_shared<DataTypeDate32>();
|
||||
case ResultType::DateTime:
|
||||
return std::make_shared<DataTypeDateTime>(extractTimeZoneNameFromFunctionArguments(arguments, 2, 0, false));
|
||||
case ResultType::DateTime64:
|
||||
@ -185,7 +188,13 @@ private:
|
||||
if (time_column_vec)
|
||||
return dispatchForIntervalColumn(assert_cast<const DataTypeDate &>(time_column_type), *time_column_vec, interval_column, result_type, time_zone, input_rows_count);
|
||||
}
|
||||
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Illegal column for 1st argument of function {}, expected a Date, DateTime or DateTime64", getName());
|
||||
else if (isDate32(time_column_type))
|
||||
{
|
||||
const auto * time_column_vec = checkAndGetColumn<ColumnDate32>(&time_column_col);
|
||||
if (time_column_vec)
|
||||
return dispatchForIntervalColumn(assert_cast<const DataTypeDate32 &>(time_column_type), *time_column_vec, interval_column, result_type, time_zone, input_rows_count);
|
||||
}
|
||||
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Illegal column for 1st argument of function {}, expected a Date, Date32, DateTime or DateTime64", getName());
|
||||
}
|
||||
|
||||
template <typename TimeDataType, typename TimeColumnType>
|
||||
|
@ -91,6 +91,8 @@ void ProgressValues::writeJSON(WriteBuffer & out) const
|
||||
writeText(result_bytes, out);
|
||||
writeCString("\",\"elapsed_ns\":\"", out);
|
||||
writeText(elapsed_ns, out);
|
||||
writeCString("\",\"real_time_microseconds\":\"", out);
|
||||
writeText(real_time_microseconds, out);
|
||||
writeCString("\"", out);
|
||||
writeCString("}", out);
|
||||
}
|
||||
@ -110,6 +112,7 @@ bool Progress::incrementPiecewiseAtomically(const Progress & rhs)
|
||||
result_bytes += rhs.result_bytes;
|
||||
|
||||
elapsed_ns += rhs.elapsed_ns;
|
||||
real_time_microseconds += rhs.real_time_microseconds;
|
||||
|
||||
return rhs.read_rows || rhs.written_rows;
|
||||
}
|
||||
@ -129,6 +132,7 @@ void Progress::reset()
|
||||
result_bytes = 0;
|
||||
|
||||
elapsed_ns = 0;
|
||||
real_time_microseconds = 0;
|
||||
}
|
||||
|
||||
ProgressValues Progress::getValues() const
|
||||
@ -148,6 +152,7 @@ ProgressValues Progress::getValues() const
|
||||
res.result_bytes = result_bytes.load(std::memory_order_relaxed);
|
||||
|
||||
res.elapsed_ns = elapsed_ns.load(std::memory_order_relaxed);
|
||||
res.real_time_microseconds = real_time_microseconds.load(std::memory_order_relaxed);
|
||||
|
||||
return res;
|
||||
}
|
||||
@ -169,6 +174,7 @@ ProgressValues Progress::fetchValuesAndResetPiecewiseAtomically()
|
||||
res.result_bytes = result_bytes.fetch_and(0);
|
||||
|
||||
res.elapsed_ns = elapsed_ns.fetch_and(0);
|
||||
res.real_time_microseconds = real_time_microseconds.fetch_and(0);
|
||||
|
||||
return res;
|
||||
}
|
||||
@ -190,6 +196,7 @@ Progress Progress::fetchAndResetPiecewiseAtomically()
|
||||
res.result_bytes = result_bytes.fetch_and(0);
|
||||
|
||||
res.elapsed_ns = elapsed_ns.fetch_and(0);
|
||||
res.real_time_microseconds = real_time_microseconds.fetch_and(0);
|
||||
|
||||
return res;
|
||||
}
|
||||
@ -209,6 +216,7 @@ Progress & Progress::operator=(Progress && other) noexcept
|
||||
result_bytes = other.result_bytes.load(std::memory_order_relaxed);
|
||||
|
||||
elapsed_ns = other.elapsed_ns.load(std::memory_order_relaxed);
|
||||
real_time_microseconds = other.real_time_microseconds.load(std::memory_order_relaxed);
|
||||
|
||||
return *this;
|
||||
}
|
||||
@ -244,4 +252,9 @@ void Progress::incrementElapsedNs(UInt64 elapsed_ns_)
|
||||
elapsed_ns.fetch_add(elapsed_ns_, std::memory_order_relaxed);
|
||||
}
|
||||
|
||||
void Progress::incrementRealTimeMicroseconds(UInt64 microseconds)
|
||||
{
|
||||
real_time_microseconds.fetch_add(microseconds, std::memory_order_relaxed);
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -28,6 +28,7 @@ struct ProgressValues
|
||||
UInt64 result_bytes = 0;
|
||||
|
||||
UInt64 elapsed_ns = 0;
|
||||
UInt64 real_time_microseconds = 0;
|
||||
|
||||
void read(ReadBuffer & in, UInt64 server_revision);
|
||||
void write(WriteBuffer & out, UInt64 client_revision) const;
|
||||
@ -40,6 +41,7 @@ struct ReadProgress
|
||||
UInt64 read_bytes = 0;
|
||||
UInt64 total_rows_to_read = 0;
|
||||
UInt64 total_bytes_to_read = 0;
|
||||
UInt64 real_time_microseconds = 0;
|
||||
|
||||
ReadProgress(UInt64 read_rows_, UInt64 read_bytes_, UInt64 total_rows_to_read_ = 0, UInt64 total_bytes_to_read_ = 0)
|
||||
: read_rows(read_rows_), read_bytes(read_bytes_), total_rows_to_read(total_rows_to_read_), total_bytes_to_read(total_bytes_to_read_) {}
|
||||
@ -96,6 +98,8 @@ struct Progress
|
||||
|
||||
std::atomic<UInt64> elapsed_ns {0};
|
||||
|
||||
std::atomic<UInt64> real_time_microseconds {0};
|
||||
|
||||
Progress() = default;
|
||||
|
||||
Progress(UInt64 read_rows_, UInt64 read_bytes_, UInt64 total_rows_to_read_ = 0, UInt64 total_bytes_to_read_ = 0)
|
||||
@ -125,6 +129,8 @@ struct Progress
|
||||
|
||||
void incrementElapsedNs(UInt64 elapsed_ns_);
|
||||
|
||||
void incrementRealTimeMicroseconds(UInt64 microseconds);
|
||||
|
||||
void reset();
|
||||
|
||||
ProgressValues getValues() const;
|
||||
|
@ -6,7 +6,6 @@
|
||||
|
||||
#include <IO/ReadBufferFromIStream.h>
|
||||
#include <IO/ReadBufferFromS3.h>
|
||||
#include <Common/Scheduler/ResourceGuard.h>
|
||||
#include <IO/S3/getObjectInfo.h>
|
||||
#include <IO/S3/Requests.h>
|
||||
|
||||
@ -423,22 +422,13 @@ Aws::S3::Model::GetObjectResult ReadBufferFromS3::sendRequest(size_t attempt, si
|
||||
ProfileEventTimeIncrement<Microseconds> watch(ProfileEvents::ReadBufferFromS3InitMicroseconds);
|
||||
|
||||
// We do not know in advance how many bytes we are going to consume, to avoid blocking estimated it from below
|
||||
constexpr ResourceCost estimated_cost = 1;
|
||||
ResourceGuard rlock(read_settings.resource_link, estimated_cost);
|
||||
|
||||
CurrentThread::IOScope io_scope(read_settings.io_scheduling);
|
||||
Aws::S3::Model::GetObjectOutcome outcome = client_ptr->GetObject(req);
|
||||
|
||||
rlock.unlock();
|
||||
|
||||
if (outcome.IsSuccess())
|
||||
{
|
||||
ResourceCost bytes_read = outcome.GetResult().GetContentLength();
|
||||
read_settings.resource_link.adjust(estimated_cost, bytes_read);
|
||||
return outcome.GetResultWithOwnership();
|
||||
}
|
||||
else
|
||||
{
|
||||
read_settings.resource_link.accumulate(estimated_cost);
|
||||
const auto & error = outcome.GetError();
|
||||
throw S3Exception(error.GetMessage(), error.GetErrorType());
|
||||
}
|
||||
|
@ -118,8 +118,7 @@ struct ReadSettings
|
||||
ThrottlerPtr remote_throttler;
|
||||
ThrottlerPtr local_throttler;
|
||||
|
||||
// Resource to be used during reading
|
||||
ResourceLink resource_link;
|
||||
IOSchedulingSettings io_scheduling;
|
||||
|
||||
size_t http_max_tries = 10;
|
||||
size_t http_retry_initial_backoff_ms = 100;
|
||||
|
@ -11,7 +11,6 @@
|
||||
#include <Common/Throttler.h>
|
||||
#include <Interpreters/Cache/FileCache.h>
|
||||
|
||||
#include <Common/Scheduler/ResourceGuard.h>
|
||||
#include <IO/WriteHelpers.h>
|
||||
#include <IO/S3Common.h>
|
||||
#include <IO/S3/Requests.h>
|
||||
@ -558,12 +557,11 @@ void WriteBufferFromS3::writePart(WriteBufferFromS3::PartData && data)
|
||||
|
||||
auto & request = std::get<0>(*worker_data);
|
||||
|
||||
ResourceCost cost = request.GetContentLength();
|
||||
ResourceGuard rlock(write_settings.resource_link, cost);
|
||||
CurrentThread::IOScope io_scope(write_settings.io_scheduling);
|
||||
|
||||
Stopwatch watch;
|
||||
auto outcome = client_ptr->UploadPart(request);
|
||||
watch.stop();
|
||||
rlock.unlock(); // Avoid acquiring other locks under resource lock
|
||||
|
||||
ProfileEvents::increment(ProfileEvents::WriteBufferFromS3Microseconds, watch.elapsedMicroseconds());
|
||||
|
||||
@ -577,7 +575,6 @@ void WriteBufferFromS3::writePart(WriteBufferFromS3::PartData && data)
|
||||
if (!outcome.IsSuccess())
|
||||
{
|
||||
ProfileEvents::increment(ProfileEvents::WriteBufferFromS3RequestsErrors, 1);
|
||||
write_settings.resource_link.accumulate(cost); // We assume no resource was used in case of failure
|
||||
throw S3Exception(outcome.GetError().GetMessage(), outcome.GetError().GetErrorType());
|
||||
}
|
||||
|
||||
@ -715,12 +712,11 @@ void WriteBufferFromS3::makeSinglepartUpload(WriteBufferFromS3::PartData && data
|
||||
if (client_ptr->isClientForDisk())
|
||||
ProfileEvents::increment(ProfileEvents::DiskS3PutObject);
|
||||
|
||||
ResourceCost cost = request.GetContentLength();
|
||||
ResourceGuard rlock(write_settings.resource_link, cost);
|
||||
CurrentThread::IOScope io_scope(write_settings.io_scheduling);
|
||||
|
||||
Stopwatch watch;
|
||||
auto outcome = client_ptr->PutObject(request);
|
||||
watch.stop();
|
||||
rlock.unlock();
|
||||
|
||||
ProfileEvents::increment(ProfileEvents::WriteBufferFromS3Microseconds, watch.elapsedMicroseconds());
|
||||
if (blob_log)
|
||||
@ -734,7 +730,6 @@ void WriteBufferFromS3::makeSinglepartUpload(WriteBufferFromS3::PartData && data
|
||||
}
|
||||
|
||||
ProfileEvents::increment(ProfileEvents::WriteBufferFromS3RequestsErrors, 1);
|
||||
write_settings.resource_link.accumulate(cost); // We assume no resource was used in case of failure
|
||||
|
||||
if (outcome.GetError().GetErrorType() == Aws::S3::S3Errors::NO_SUCH_KEY)
|
||||
{
|
||||
|
@ -13,8 +13,7 @@ struct WriteSettings
|
||||
ThrottlerPtr remote_throttler;
|
||||
ThrottlerPtr local_throttler;
|
||||
|
||||
// Resource to be used during reading
|
||||
ResourceLink resource_link;
|
||||
IOSchedulingSettings io_scheduling;
|
||||
|
||||
/// Filesystem cache settings
|
||||
bool enable_filesystem_cache_on_write_operations = false;
|
||||
|
@ -1009,8 +1009,14 @@ Chunk AsynchronousInsertQueue::processEntriesWithParsing(
|
||||
size_t num_rows = executor.execute(*buffer);
|
||||
|
||||
total_rows += num_rows;
|
||||
chunk_info->offsets.push_back(total_rows);
|
||||
chunk_info->tokens.push_back(entry->async_dedup_token);
|
||||
/// for some reason, client can pass zero rows and bytes to server.
|
||||
/// We don't update offsets in this case, because we assume every insert has some rows during dedup
|
||||
/// but we have nothing to deduplicate for this insert.
|
||||
if (num_rows > 0)
|
||||
{
|
||||
chunk_info->offsets.push_back(total_rows);
|
||||
chunk_info->tokens.push_back(entry->async_dedup_token);
|
||||
}
|
||||
|
||||
add_to_async_insert_log(entry, query_for_logging, current_exception, num_rows, num_bytes, data->timeout_ms);
|
||||
|
||||
@ -1061,8 +1067,14 @@ Chunk AsynchronousInsertQueue::processPreprocessedEntries(
|
||||
result_columns[i]->insertRangeFrom(*columns[i], 0, columns[i]->size());
|
||||
|
||||
total_rows += block->rows();
|
||||
chunk_info->offsets.push_back(total_rows);
|
||||
chunk_info->tokens.push_back(entry->async_dedup_token);
|
||||
/// for some reason, client can pass zero rows and bytes to server.
|
||||
/// We don't update offsets in this case, because we assume every insert has some rows during dedup,
|
||||
/// but we have nothing to deduplicate for this insert.
|
||||
if (block->rows())
|
||||
{
|
||||
chunk_info->offsets.push_back(total_rows);
|
||||
chunk_info->tokens.push_back(entry->async_dedup_token);
|
||||
}
|
||||
|
||||
const auto & query_for_logging = get_query_by_format(entry->format);
|
||||
add_to_async_insert_log(entry, query_for_logging, "", block->rows(), block->bytes(), data->timeout_ms);
|
||||
|
@ -821,6 +821,19 @@ InterpreterCreateQuery::TableProperties InterpreterCreateQuery::getTableProperti
|
||||
{
|
||||
properties.indices = as_storage_metadata->getSecondaryIndices();
|
||||
properties.projections = as_storage_metadata->getProjections().clone();
|
||||
|
||||
/// CREATE TABLE AS should copy PRIMARY KEY, ORDER BY, and similar clauses.
|
||||
if (!create.storage->primary_key && as_storage_metadata->isPrimaryKeyDefined() && as_storage_metadata->hasPrimaryKey())
|
||||
create.storage->set(create.storage->primary_key, as_storage_metadata->getPrimaryKeyAST()->clone());
|
||||
|
||||
if (!create.storage->partition_by && as_storage_metadata->isPartitionKeyDefined() && as_storage_metadata->hasPartitionKey())
|
||||
create.storage->set(create.storage->partition_by, as_storage_metadata->getPartitionKeyAST()->clone());
|
||||
|
||||
if (!create.storage->order_by && as_storage_metadata->isSortingKeyDefined() && as_storage_metadata->hasSortingKey())
|
||||
create.storage->set(create.storage->order_by, as_storage_metadata->getSortingKeyAST()->clone());
|
||||
|
||||
if (!create.storage->sample_by && as_storage_metadata->isSamplingKeyDefined() && as_storage_metadata->hasSamplingKey())
|
||||
create.storage->set(create.storage->sample_by, as_storage_metadata->getSamplingKeyAST()->clone());
|
||||
}
|
||||
else
|
||||
{
|
||||
|
@ -107,7 +107,9 @@ BlockIO InterpreterDeleteQuery::execute()
|
||||
String alter_query =
|
||||
"ALTER TABLE " + table->getStorageID().getFullTableName()
|
||||
+ (delete_query.cluster.empty() ? "" : " ON CLUSTER " + backQuoteIfNeed(delete_query.cluster))
|
||||
+ " UPDATE `_row_exists` = 0 WHERE " + serializeAST(*delete_query.predicate);
|
||||
+ " UPDATE `_row_exists` = 0"
|
||||
+ (delete_query.partition ? " IN PARTITION " + serializeAST(*delete_query.partition) : "")
|
||||
+ " WHERE " + serializeAST(*delete_query.predicate);
|
||||
|
||||
ParserAlterQuery parser;
|
||||
ASTPtr alter_ast = parseQuery(
|
||||
|
@ -90,6 +90,7 @@ namespace ProfileEvents
|
||||
extern const Event SelectQueryTimeMicroseconds;
|
||||
extern const Event InsertQueryTimeMicroseconds;
|
||||
extern const Event OtherQueryTimeMicroseconds;
|
||||
extern const Event RealTimeMicroseconds;
|
||||
}
|
||||
|
||||
namespace DB
|
||||
@ -398,9 +399,14 @@ void logQueryFinish(
|
||||
/// Update performance counters before logging to query_log
|
||||
CurrentThread::finalizePerformanceCounters();
|
||||
|
||||
QueryStatusInfo info = process_list_elem->getInfo(true, context->getSettingsRef().log_profile_events);
|
||||
elem.type = QueryLogElementType::QUERY_FINISH;
|
||||
std::shared_ptr<ProfileEvents::Counters::Snapshot> profile_counters;
|
||||
QueryStatusInfo info = process_list_elem->getInfo(true, true);
|
||||
if (context->getSettingsRef().log_profile_events)
|
||||
profile_counters = info.profile_counters;
|
||||
else
|
||||
profile_counters.swap(info.profile_counters);
|
||||
|
||||
elem.type = QueryLogElementType::QUERY_FINISH;
|
||||
addStatusInfoToQueryLogElement(elem, info, query_ast, context);
|
||||
|
||||
if (pulling_pipeline)
|
||||
@ -419,6 +425,7 @@ void logQueryFinish(
|
||||
{
|
||||
Progress p;
|
||||
p.incrementPiecewiseAtomically(Progress{ResultProgress{elem.result_rows, elem.result_bytes}});
|
||||
p.incrementRealTimeMicroseconds((*profile_counters)[ProfileEvents::RealTimeMicroseconds]);
|
||||
progress_callback(p);
|
||||
}
|
||||
|
||||
|
@ -45,6 +45,12 @@ void ASTDeleteQuery::formatQueryImpl(const FormatSettings & settings, FormatStat
|
||||
|
||||
formatOnCluster(settings);
|
||||
|
||||
if (partition)
|
||||
{
|
||||
settings.ostr << (settings.hilite ? hilite_keyword : "") << " IN PARTITION " << (settings.hilite ? hilite_none : "");
|
||||
partition->formatImpl(settings, state, frame);
|
||||
}
|
||||
|
||||
settings.ostr << (settings.hilite ? hilite_keyword : "") << " WHERE " << (settings.hilite ? hilite_none : "");
|
||||
predicate->formatImpl(settings, state, frame);
|
||||
}
|
||||
|
@ -19,6 +19,11 @@ public:
|
||||
return removeOnCluster<ASTDeleteQuery>(clone(), params.default_database);
|
||||
}
|
||||
|
||||
/** Used in DELETE FROM queries.
|
||||
* The value or ID of the partition is stored here.
|
||||
*/
|
||||
ASTPtr partition;
|
||||
|
||||
ASTPtr predicate;
|
||||
|
||||
protected:
|
||||
|
@ -3,6 +3,7 @@
|
||||
#include <Parsers/parseDatabaseAndTableName.h>
|
||||
#include <Parsers/ExpressionListParsers.h>
|
||||
#include <Parsers/ParserSetQuery.h>
|
||||
#include <Parsers/ParserPartition.h>
|
||||
|
||||
|
||||
namespace DB
|
||||
@ -15,11 +16,14 @@ bool ParserDeleteQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expected)
|
||||
|
||||
ParserKeyword s_delete(Keyword::DELETE);
|
||||
ParserKeyword s_from(Keyword::FROM);
|
||||
ParserKeyword s_in_partition(Keyword::IN_PARTITION);
|
||||
ParserKeyword s_where(Keyword::WHERE);
|
||||
ParserExpression parser_exp_elem;
|
||||
ParserKeyword s_settings(Keyword::SETTINGS);
|
||||
ParserKeyword s_on{Keyword::ON};
|
||||
|
||||
ParserPartition parser_partition;
|
||||
|
||||
if (s_delete.ignore(pos, expected))
|
||||
{
|
||||
if (!s_from.ignore(pos, expected))
|
||||
@ -36,6 +40,12 @@ bool ParserDeleteQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expected)
|
||||
query->cluster = cluster_str;
|
||||
}
|
||||
|
||||
if (s_in_partition.ignore(pos, expected))
|
||||
{
|
||||
if (!parser_partition.parse(pos, query->partition, expected))
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!s_where.ignore(pos, expected))
|
||||
return false;
|
||||
|
||||
@ -53,6 +63,9 @@ bool ParserDeleteQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expected)
|
||||
else
|
||||
return false;
|
||||
|
||||
if (query->partition)
|
||||
query->children.push_back(query->partition);
|
||||
|
||||
if (query->predicate)
|
||||
query->children.push_back(query->predicate);
|
||||
|
||||
|
@ -494,6 +494,12 @@ JoinClausesAndActions buildJoinClausesAndActions(
|
||||
necessary_names.push_back(name);
|
||||
};
|
||||
|
||||
bool is_join_with_special_storage = false;
|
||||
if (const auto * right_table_node = join_node.getRightTableExpression()->as<TableNode>())
|
||||
{
|
||||
is_join_with_special_storage = dynamic_cast<const StorageJoin *>(right_table_node->getStorage().get());
|
||||
}
|
||||
|
||||
for (auto & join_clause : result.join_clauses)
|
||||
{
|
||||
const auto & left_filter_condition_nodes = join_clause.getLeftFilterConditionNodes();
|
||||
@ -561,7 +567,7 @@ JoinClausesAndActions buildJoinClausesAndActions(
|
||||
if (!left_key_node->result_type->equals(*common_type))
|
||||
left_key_node = &left_join_actions.addCast(*left_key_node, common_type, {});
|
||||
|
||||
if (!right_key_node->result_type->equals(*common_type))
|
||||
if (!is_join_with_special_storage && !right_key_node->result_type->equals(*common_type))
|
||||
right_key_node = &right_join_actions.addCast(*right_key_node, common_type, {});
|
||||
}
|
||||
|
||||
|
@ -50,6 +50,7 @@ size_t tryLiftUpUnion(QueryPlan::Node * parent_node, QueryPlan::Nodes & nodes)
|
||||
expr_node.step = std::make_unique<ExpressionStep>(
|
||||
expr_node.children.front()->step->getOutputStream(),
|
||||
expression->getExpression().clone());
|
||||
expr_node.step->setStepDescription(expression->getStepDescription());
|
||||
}
|
||||
|
||||
/// - Expression - Something
|
||||
|
@ -1245,6 +1245,13 @@ void AlterCommands::prepare(const StorageInMemoryMetadata & metadata)
|
||||
{
|
||||
auto columns = metadata.columns;
|
||||
|
||||
auto ast_to_str = [](const ASTPtr & query) -> String
|
||||
{
|
||||
if (!query)
|
||||
return "";
|
||||
return queryToString(query);
|
||||
};
|
||||
|
||||
for (size_t i = 0; i < size(); ++i)
|
||||
{
|
||||
auto & command = (*this)[i];
|
||||
@ -1277,6 +1284,11 @@ void AlterCommands::prepare(const StorageInMemoryMetadata & metadata)
|
||||
if (!has_column && command.if_exists)
|
||||
command.ignore = true;
|
||||
}
|
||||
else if (command.type == AlterCommand::MODIFY_ORDER_BY)
|
||||
{
|
||||
if (ast_to_str(command.order_by) == ast_to_str(metadata.sorting_key.definition_ast))
|
||||
command.ignore = true;
|
||||
}
|
||||
}
|
||||
|
||||
prepared = true;
|
||||
|
@ -119,27 +119,16 @@ struct ReadBufferFromHDFS::ReadBufferFromHDFSImpl : public BufferWithOwnMemory<S
|
||||
return false;
|
||||
}
|
||||
|
||||
ResourceGuard rlock(read_settings.resource_link, num_bytes_to_read);
|
||||
int bytes_read;
|
||||
try
|
||||
{
|
||||
bytes_read = hdfsRead(fs.get(), fin, internal_buffer.begin(), safe_cast<int>(num_bytes_to_read));
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
read_settings.resource_link.accumulate(num_bytes_to_read); // We assume no resource was used in case of failure
|
||||
throw;
|
||||
}
|
||||
rlock.unlock();
|
||||
ResourceGuard rlock(ResourceGuard::Metrics::getIORead(), read_settings.io_scheduling.read_resource_link, num_bytes_to_read);
|
||||
int bytes_read = hdfsRead(fs.get(), fin, internal_buffer.begin(), safe_cast<int>(num_bytes_to_read));
|
||||
rlock.unlock(std::max(0, bytes_read));
|
||||
|
||||
if (bytes_read < 0)
|
||||
{
|
||||
read_settings.resource_link.accumulate(num_bytes_to_read); // We assume no resource was used in case of failure
|
||||
throw Exception(ErrorCodes::NETWORK_ERROR,
|
||||
"Fail to read from HDFS: {}, file path: {}. Error: {}",
|
||||
hdfs_uri, hdfs_file_path, std::string(hdfsGetLastError()));
|
||||
}
|
||||
read_settings.resource_link.adjust(num_bytes_to_read, bytes_read);
|
||||
|
||||
if (bytes_read)
|
||||
{
|
||||
|
@ -66,25 +66,12 @@ struct WriteBufferFromHDFS::WriteBufferFromHDFSImpl
|
||||
|
||||
int write(const char * start, size_t size)
|
||||
{
|
||||
ResourceGuard rlock(write_settings.resource_link, size);
|
||||
int bytes_written;
|
||||
try
|
||||
{
|
||||
bytes_written = hdfsWrite(fs.get(), fout, start, safe_cast<int>(size));
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
write_settings.resource_link.accumulate(size); // We assume no resource was used in case of failure
|
||||
throw;
|
||||
}
|
||||
rlock.unlock();
|
||||
ResourceGuard rlock(ResourceGuard::Metrics::getIOWrite(), write_settings.io_scheduling.write_resource_link, size);
|
||||
int bytes_written = hdfsWrite(fs.get(), fout, start, safe_cast<int>(size));
|
||||
rlock.unlock(std::max(0, bytes_written));
|
||||
|
||||
if (bytes_written < 0)
|
||||
{
|
||||
write_settings.resource_link.accumulate(size); // We assume no resource was used in case of failure
|
||||
throw Exception(ErrorCodes::NETWORK_ERROR, "Fail to write HDFS file: {} {}", hdfs_uri, std::string(hdfsGetLastError()));
|
||||
}
|
||||
write_settings.resource_link.adjust(size, bytes_written);
|
||||
|
||||
if (write_settings.remote_throttler)
|
||||
write_settings.remote_throttler->add(bytes_written, ProfileEvents::RemoteWriteThrottlerBytes, ProfileEvents::RemoteWriteThrottlerSleepMicroseconds);
|
||||
|
@ -126,6 +126,7 @@ void listFilesWithRegexpMatchingImpl(
|
||||
/// Otherwise it will not allow to work with symlinks in `user_files_path` directory.
|
||||
fs::canonical(path_for_ls + for_match);
|
||||
fs::path absolute_path = fs::absolute(path_for_ls + for_match);
|
||||
absolute_path = absolute_path.lexically_normal(); /// ensure that the resulting path is normalized (e.g., removes any redundant slashes or . and .. segments)
|
||||
result.push_back(absolute_path.string());
|
||||
}
|
||||
catch (const std::exception &) // NOLINT
|
||||
|
@ -221,14 +221,17 @@ void StorageReplicatedMergeTree::setZooKeeper()
|
||||
/// strange effects. So we always use only one session for all tables.
|
||||
/// (excluding auxiliary zookeepers)
|
||||
|
||||
std::lock_guard lock(current_zookeeper_mutex);
|
||||
if (zookeeper_name == default_zookeeper_name)
|
||||
{
|
||||
current_zookeeper = getContext()->getZooKeeper();
|
||||
auto new_keeper = getContext()->getZooKeeper();
|
||||
std::lock_guard lock(current_zookeeper_mutex);
|
||||
current_zookeeper = new_keeper;
|
||||
}
|
||||
else
|
||||
{
|
||||
current_zookeeper = getContext()->getAuxiliaryZooKeeper(zookeeper_name);
|
||||
auto new_keeper = getContext()->getAuxiliaryZooKeeper(zookeeper_name);
|
||||
std::lock_guard lock(current_zookeeper_mutex);
|
||||
current_zookeeper = new_keeper;
|
||||
}
|
||||
}
|
||||
|
||||
@ -365,7 +368,7 @@ StorageReplicatedMergeTree::StorageReplicatedMergeTree(
|
||||
bool has_zookeeper = getContext()->hasZooKeeper() || getContext()->hasAuxiliaryZooKeeper(zookeeper_name);
|
||||
if (has_zookeeper)
|
||||
{
|
||||
/// It's possible for getZooKeeper() to timeout if zookeeper host(s) can't
|
||||
/// It's possible for getZooKeeper() to timeout if zookeeper host(s) can't
|
||||
/// be reached. In such cases Poco::Exception is thrown after a connection
|
||||
/// timeout - refer to src/Common/ZooKeeper/ZooKeeperImpl.cpp:866 for more info.
|
||||
///
|
||||
|
@ -288,7 +288,7 @@ def generate_description(item: PullRequest, repo: Repository) -> Optional[Descri
|
||||
# Normalize bug fixes
|
||||
if (
|
||||
re.match(
|
||||
r".*(?i)bug\Wfix",
|
||||
r"(?i).*bug\Wfix",
|
||||
category,
|
||||
)
|
||||
# Map "Critical Bug Fix" to "Bug fix" category for changelog
|
||||
|
@ -5,7 +5,7 @@ networks:
|
||||
enable_ipv6: true
|
||||
ipam:
|
||||
config:
|
||||
- subnet: 10.5.0.0/12
|
||||
gateway: 10.5.1.1
|
||||
- subnet: 10.0.0.0/12
|
||||
gateway: 10.0.0.1
|
||||
- subnet: 2001:3984:3989::/64
|
||||
gateway: 2001:3984:3989::1
|
||||
|
@ -67,6 +67,11 @@ def test_aggregate_states(start_cluster):
|
||||
f"select hex(initializeAggregation('{function_name}State', 'foo'))"
|
||||
).strip()
|
||||
|
||||
def get_final_value_unhex(node, function_name, value):
|
||||
return node.query(
|
||||
f"select finalizeAggregation(unhex('{value}')::AggregateFunction({function_name}, String))"
|
||||
).strip()
|
||||
|
||||
for aggregate_function in aggregate_functions:
|
||||
logging.info("Checking %s", aggregate_function)
|
||||
|
||||
@ -99,13 +104,39 @@ def test_aggregate_states(start_cluster):
|
||||
|
||||
upstream_state = get_aggregate_state_hex(upstream, aggregate_function)
|
||||
if upstream_state != backward_state:
|
||||
logging.info(
|
||||
"Failed %s, %s (backward) != %s (upstream)",
|
||||
aggregate_function,
|
||||
backward_state,
|
||||
upstream_state,
|
||||
)
|
||||
failed += 1
|
||||
allowed_changes_if_result_is_the_same = ["anyHeavy"]
|
||||
|
||||
if aggregate_function in allowed_changes_if_result_is_the_same:
|
||||
backward_final_from_upstream = get_final_value_unhex(
|
||||
backward, aggregate_function, upstream_state
|
||||
)
|
||||
upstream_final_from_backward = get_final_value_unhex(
|
||||
upstream, aggregate_function, backward_state
|
||||
)
|
||||
|
||||
if backward_final_from_upstream == upstream_final_from_backward:
|
||||
logging.info(
|
||||
"OK %s (but different intermediate states)", aggregate_function
|
||||
)
|
||||
passed += 1
|
||||
else:
|
||||
logging.error(
|
||||
"Failed %s, Intermediate: %s (backward) != %s (upstream). Final from intermediate: %s (backward from upstream state) != %s (upstream from backward state)",
|
||||
aggregate_function,
|
||||
backward_state,
|
||||
upstream_state,
|
||||
backward_final_from_upstream,
|
||||
upstream_final_from_backward,
|
||||
)
|
||||
failed += 1
|
||||
else:
|
||||
logging.error(
|
||||
"Failed %s, %s (backward) != %s (upstream)",
|
||||
aggregate_function,
|
||||
backward_state,
|
||||
upstream_state,
|
||||
)
|
||||
failed += 1
|
||||
else:
|
||||
logging.info("OK %s", aggregate_function)
|
||||
passed += 1
|
||||
|
@ -46,7 +46,7 @@ def test_cgroup_cpu_limit():
|
||||
"clickhouse local -q \"select value from system.settings where name='max_threads'\"",
|
||||
num_cpus,
|
||||
)
|
||||
expect_output = (r"auto({})".format(math.ceil(num_cpus))).encode()
|
||||
expect_output = (r"\'auto({})\'".format(math.ceil(num_cpus))).encode()
|
||||
assert (
|
||||
result.strip() == expect_output
|
||||
), f"fail for cpu limit={num_cpus}, result={result.strip()}, expect={expect_output}"
|
||||
|
@ -1,11 +0,0 @@
|
||||
<clickhouse>
|
||||
<remote_servers>
|
||||
<test_cluster>
|
||||
<shard>
|
||||
<replica>
|
||||
<host>node</host>
|
||||
</replica>
|
||||
</shard>
|
||||
</test_cluster>
|
||||
</remote_servers>
|
||||
</clickhouse>
|
@ -1,9 +0,0 @@
|
||||
<clickhouse>
|
||||
<logger>
|
||||
<level>information</level>
|
||||
<log>/var/log/clickhouse-server/clickhouse-server.log</log>
|
||||
<errorlog>/var/log/clickhouse-server/clickhouse-server.err.log</errorlog>
|
||||
<size>1000M</size>
|
||||
<count>10</count>
|
||||
</logger>
|
||||
</clickhouse>
|
@ -1,54 +0,0 @@
|
||||
import logging
|
||||
import pytest
|
||||
from helpers.cluster import ClickHouseCluster
|
||||
|
||||
|
||||
@pytest.fixture(scope="module")
|
||||
def cluster():
|
||||
try:
|
||||
cluster = ClickHouseCluster(__file__)
|
||||
cluster.add_instance(
|
||||
"node",
|
||||
main_configs=[
|
||||
"configs/config.d/cluster.xml",
|
||||
],
|
||||
)
|
||||
logging.info("Starting cluster...")
|
||||
cluster.start()
|
||||
logging.info("Cluster started")
|
||||
|
||||
node = cluster.instances["node"]
|
||||
node.query(
|
||||
"""
|
||||
CREATE TABLE tab
|
||||
(
|
||||
a DateTime,
|
||||
pk String
|
||||
) Engine = MergeTree() ORDER BY pk;
|
||||
"""
|
||||
)
|
||||
|
||||
yield cluster
|
||||
finally:
|
||||
cluster.shutdown()
|
||||
|
||||
|
||||
def test_incorrect_datetime_format(cluster):
|
||||
"""
|
||||
Test for an MSan issue which is caused by parsing incorrect datetime string
|
||||
"""
|
||||
|
||||
node = cluster.instances["node"]
|
||||
|
||||
res = node.query("SELECT count(*) FROM tab WHERE a = '2024-08-06 09:58:09'").strip()
|
||||
assert res == "0"
|
||||
|
||||
error = node.query_and_get_error(
|
||||
"SELECT count(*) FROM tab WHERE a = '2024-08-06 09:58:0'"
|
||||
).strip()
|
||||
assert "Cannot parse time component of DateTime 09:58:0" in error
|
||||
|
||||
error = node.query_and_get_error(
|
||||
"SELECT count(*) FROM tab WHERE a = '2024-08-0 09:58:09'"
|
||||
).strip()
|
||||
assert "Cannot convert string '2024-08-0 09:58:09' to type DateTime" in error
|
@ -0,0 +1,8 @@
|
||||
<?xml version="1.0" encoding="utf-8"?>
|
||||
|
||||
<clickhouse>
|
||||
<logger>
|
||||
<level>test</level>
|
||||
</logger>
|
||||
|
||||
</clickhouse>
|
@ -3,6 +3,7 @@
|
||||
import pytest
|
||||
import time
|
||||
import threading
|
||||
import uuid
|
||||
from helpers.cluster import ClickHouseCluster
|
||||
from multiprocessing.dummy import Pool
|
||||
from helpers.network import PartitionManager
|
||||
@ -10,8 +11,12 @@ from helpers.client import QueryRuntimeException
|
||||
from helpers.test_tools import assert_eq_with_retry
|
||||
|
||||
cluster = ClickHouseCluster(__file__)
|
||||
|
||||
node1 = cluster.add_instance("node1", with_zookeeper=True)
|
||||
node1 = cluster.add_instance(
|
||||
"node1",
|
||||
main_configs=["configs/storage_conf.xml"],
|
||||
with_zookeeper=True,
|
||||
with_minio=True,
|
||||
)
|
||||
|
||||
|
||||
@pytest.fixture(scope="module")
|
||||
@ -25,10 +30,16 @@ def started_cluster():
|
||||
cluster.shutdown()
|
||||
|
||||
|
||||
def test_replica_inserts_with_keeper_restart(started_cluster):
|
||||
@pytest.mark.parametrize(
|
||||
"engine,storage_policy",
|
||||
[
|
||||
("ReplicatedMergeTree", "default"),
|
||||
],
|
||||
)
|
||||
def test_replica_inserts_with_keeper_restart(started_cluster, engine, storage_policy):
|
||||
try:
|
||||
node1.query(
|
||||
"CREATE TABLE r (a UInt64, b String) ENGINE=ReplicatedMergeTree('/test/r', '0') ORDER BY tuple()"
|
||||
f"CREATE TABLE r (a UInt64, b String) ENGINE={engine}('/test/r', '0') ORDER BY tuple() SETTINGS storage_policy='{storage_policy}'"
|
||||
)
|
||||
|
||||
p = Pool(1)
|
||||
@ -60,10 +71,18 @@ def test_replica_inserts_with_keeper_restart(started_cluster):
|
||||
node1.query("DROP TABLE IF EXISTS r SYNC")
|
||||
|
||||
|
||||
def test_replica_inserts_with_keeper_disconnect(started_cluster):
|
||||
@pytest.mark.parametrize(
|
||||
"engine,storage_policy",
|
||||
[
|
||||
("ReplicatedMergeTree", "default"),
|
||||
],
|
||||
)
|
||||
def test_replica_inserts_with_keeper_disconnect(
|
||||
started_cluster, engine, storage_policy
|
||||
):
|
||||
try:
|
||||
node1.query(
|
||||
"CREATE TABLE r (a UInt64, b String) ENGINE=ReplicatedMergeTree('/test/r', '0') ORDER BY tuple()"
|
||||
f"CREATE TABLE r2 (a UInt64, b String) ENGINE={engine}('/test/r2', '0') ORDER BY tuple() SETTINGS storage_policy='{storage_policy}'"
|
||||
)
|
||||
|
||||
p = Pool(1)
|
||||
@ -84,26 +103,32 @@ def test_replica_inserts_with_keeper_disconnect(started_cluster):
|
||||
disconnect_event.wait(90)
|
||||
|
||||
node1.query(
|
||||
"INSERT INTO r SELECT number, toString(number) FROM numbers(10) SETTINGS insert_keeper_max_retries=20"
|
||||
"INSERT INTO r2 SELECT number, toString(number) FROM numbers(10) SETTINGS insert_keeper_max_retries=20"
|
||||
)
|
||||
node1.query(
|
||||
"INSERT INTO r SELECT number, toString(number) FROM numbers(10, 10) SETTINGS insert_keeper_max_retries=20"
|
||||
"INSERT INTO r2 SELECT number, toString(number) FROM numbers(10, 10) SETTINGS insert_keeper_max_retries=20"
|
||||
)
|
||||
|
||||
job.wait()
|
||||
p.close()
|
||||
p.join()
|
||||
|
||||
assert node1.query("SELECT COUNT() FROM r") == "20\n"
|
||||
assert node1.query("SELECT COUNT() FROM r2") == "20\n"
|
||||
|
||||
finally:
|
||||
node1.query("DROP TABLE IF EXISTS r SYNC")
|
||||
node1.query("DROP TABLE IF EXISTS r2 SYNC")
|
||||
|
||||
|
||||
def test_query_timeout_with_zk_down(started_cluster):
|
||||
@pytest.mark.parametrize(
|
||||
"engine,storage_policy",
|
||||
[
|
||||
("ReplicatedMergeTree", "default"),
|
||||
],
|
||||
)
|
||||
def test_query_timeout_with_zk_down(started_cluster, engine, storage_policy):
|
||||
try:
|
||||
node1.query(
|
||||
"CREATE TABLE zk_down (a UInt64, b String) ENGINE=ReplicatedMergeTree('/test/zk_down', '0') ORDER BY tuple()"
|
||||
f"CREATE TABLE zk_down (a UInt64, b String) ENGINE={engine}('/test/zk_down', '0') ORDER BY tuple() SETTINGS storage_policy='{storage_policy}'"
|
||||
)
|
||||
|
||||
cluster.stop_zookeeper_nodes(["zoo1", "zoo2", "zoo3"])
|
||||
@ -118,3 +143,45 @@ def test_query_timeout_with_zk_down(started_cluster):
|
||||
finally:
|
||||
cluster.start_zookeeper_nodes(["zoo1", "zoo2", "zoo3"])
|
||||
node1.query("DROP TABLE IF EXISTS zk_down SYNC")
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"engine,storage_policy",
|
||||
[
|
||||
("ReplicatedMergeTree", "default"),
|
||||
],
|
||||
)
|
||||
def test_retries_should_not_wait_for_global_connection(
|
||||
started_cluster, engine, storage_policy
|
||||
):
|
||||
pm = PartitionManager()
|
||||
try:
|
||||
node1.query(
|
||||
f"CREATE TABLE zk_down_retries (a UInt64, b String) ENGINE={engine}('/test/zk_down', '0') ORDER BY tuple() SETTINGS storage_policy='{storage_policy}'"
|
||||
)
|
||||
|
||||
cluster.stop_zookeeper_nodes(["zoo1", "zoo2", "zoo3"])
|
||||
# Apart from stopping keepers, we introduce a network delay to make connection retries slower
|
||||
# We want to check that retries are not blocked during that time
|
||||
pm.add_network_delay(node1, 1000)
|
||||
|
||||
query_id = uuid.uuid4()
|
||||
|
||||
with pytest.raises(QueryRuntimeException):
|
||||
node1.query(
|
||||
"INSERT INTO zk_down_retries SELECT number, toString(number) FROM numbers(10) SETTINGS insert_keeper_max_retries=10, insert_keeper_retry_max_backoff_ms=100",
|
||||
query_id=str(query_id),
|
||||
)
|
||||
pm.heal_all()
|
||||
# Use query_log for execution time since we want to ignore the network delay introduced (also in client)
|
||||
node1.query("SYSTEM FLUSH LOGS")
|
||||
res = node1.query(
|
||||
f"SELECT query_duration_ms FROM system.query_log WHERE type != 'QueryStart' AND query_id = '{query_id}'"
|
||||
)
|
||||
query_duration = int(res)
|
||||
# It should be around 1 second. 5 seconds is being generous (debug and so on). Used to take 35 seconds without the fix
|
||||
assert query_duration < 5000
|
||||
finally:
|
||||
pm.heal_all()
|
||||
cluster.start_zookeeper_nodes(["zoo1", "zoo2", "zoo3"])
|
||||
node1.query("DROP TABLE IF EXISTS zk_down_retries SYNC")
|
||||
|
@ -0,0 +1,10 @@
|
||||
<clickhouse>
|
||||
<database_atomic_delay_before_drop_table_sec>10</database_atomic_delay_before_drop_table_sec>
|
||||
<allow_moving_table_directory_to_trash>1</allow_moving_table_directory_to_trash>
|
||||
<merge_tree>
|
||||
<initialization_retry_period>10</initialization_retry_period>
|
||||
</merge_tree>
|
||||
<max_database_replicated_create_table_thread_pool_size>50</max_database_replicated_create_table_thread_pool_size>
|
||||
<allow_experimental_transactions>42</allow_experimental_transactions>
|
||||
<async_load_databases>false</async_load_databases>
|
||||
</clickhouse>
|
@ -0,0 +1,11 @@
|
||||
<clickhouse>
|
||||
<profiles>
|
||||
<default>
|
||||
</default>
|
||||
</profiles>
|
||||
<users>
|
||||
<default>
|
||||
<profile>default</profile>
|
||||
</default>
|
||||
</users>
|
||||
</clickhouse>
|
@ -0,0 +1,70 @@
|
||||
import pytest
|
||||
|
||||
|
||||
from helpers.cluster import ClickHouseCluster
|
||||
from helpers.test_tools import assert_eq_with_retry
|
||||
|
||||
|
||||
cluster = ClickHouseCluster(__file__)
|
||||
|
||||
shard1_node = cluster.add_instance(
|
||||
"shard1_node",
|
||||
main_configs=["configs/config.xml"],
|
||||
user_configs=["configs/settings.xml"],
|
||||
with_zookeeper=True,
|
||||
stay_alive=True,
|
||||
macros={"shard": 1, "replica": 1},
|
||||
)
|
||||
|
||||
shard2_node = cluster.add_instance(
|
||||
"shard2_node",
|
||||
main_configs=["configs/config.xml"],
|
||||
user_configs=["configs/settings.xml"],
|
||||
with_zookeeper=True,
|
||||
stay_alive=True,
|
||||
macros={"shard": 2, "replica": 1},
|
||||
)
|
||||
|
||||
|
||||
all_nodes = [
|
||||
shard1_node,
|
||||
shard2_node,
|
||||
]
|
||||
|
||||
|
||||
@pytest.fixture(scope="module")
|
||||
def started_cluster():
|
||||
try:
|
||||
cluster.start()
|
||||
yield cluster
|
||||
|
||||
finally:
|
||||
cluster.shutdown()
|
||||
|
||||
|
||||
def test_alter_modify_order_by(started_cluster):
|
||||
shard1_node.query("DROP DATABASE IF EXISTS alter_modify_order_by SYNC;")
|
||||
shard2_node.query("DROP DATABASE IF EXISTS alter_modify_order_by SYNC;")
|
||||
|
||||
shard1_node.query(
|
||||
"CREATE DATABASE alter_modify_order_by ENGINE = Replicated('/test/database/alter_modify_order_by', '{shard}', '{replica}');"
|
||||
)
|
||||
shard1_node.query(
|
||||
"CREATE TABLE alter_modify_order_by.t1 (id Int64, score Int64) ENGINE = ReplicatedMergeTree('/test/tables/{uuid}/{shard}', '{replica}') ORDER BY (id);"
|
||||
)
|
||||
shard1_node.query("ALTER TABLE alter_modify_order_by.t1 modify order by (id);")
|
||||
shard2_node.query(
|
||||
"CREATE DATABASE alter_modify_order_by ENGINE = Replicated('/test/database/alter_modify_order_by', '{shard}', '{replica}');"
|
||||
)
|
||||
|
||||
query = (
|
||||
"select count() from system.tables where database = 'alter_modify_order_by';"
|
||||
)
|
||||
expected = shard1_node.query(query)
|
||||
assert_eq_with_retry(shard2_node, query, expected)
|
||||
|
||||
query = "show create table alter_modify_order_by.t1;"
|
||||
assert shard1_node.query(query) == shard2_node.query(query)
|
||||
|
||||
shard1_node.query("DROP DATABASE IF EXISTS alter_modify_order_by SYNC;")
|
||||
shard2_node.query("DROP DATABASE IF EXISTS alter_modify_order_by SYNC;")
|
@ -69,6 +69,124 @@ def update_workloads_config(**settings):
|
||||
node.query("system reload config")
|
||||
|
||||
|
||||
def check_profile_event_for_query(workload, profile_event, amount=1):
|
||||
node.query("system flush logs")
|
||||
query_pattern = f"workload='{workload}'".replace("'", "\\'")
|
||||
assert (
|
||||
int(
|
||||
node.query(
|
||||
f"select ProfileEvents['{profile_event}'] from system.query_log where query ilike '%{query_pattern}%' and type = 'QueryFinish' order by query_start_time_microseconds desc limit 1"
|
||||
)
|
||||
)
|
||||
== amount
|
||||
)
|
||||
|
||||
|
||||
def test_s3_resource_request_granularity():
|
||||
node.query(
|
||||
f"""
|
||||
drop table if exists data;
|
||||
create table data (key UInt64 CODEC(NONE), value String CODEC(NONE)) engine=MergeTree() order by key settings min_bytes_for_wide_part=1e9, storage_policy='s3';
|
||||
"""
|
||||
)
|
||||
|
||||
total_bytes = 50000000 # Approximate data size
|
||||
max_bytes_per_request = 2000000 # Should be ~1MB or less in general
|
||||
min_bytes_per_request = 6000 # Small requests are ok, but we don't want hurt performance with too often resource requests
|
||||
|
||||
writes_before = int(
|
||||
node.query(
|
||||
f"select dequeued_requests from system.scheduler where resource='network_write' and path='/prio/admin'"
|
||||
).strip()
|
||||
)
|
||||
write_bytes_before = int(
|
||||
node.query(
|
||||
f"select dequeued_cost from system.scheduler where resource='network_write' and path='/prio/admin'"
|
||||
).strip()
|
||||
)
|
||||
write_budget_before = int(
|
||||
node.query(
|
||||
f"select budget from system.scheduler where resource='network_write' and path='/prio/admin'"
|
||||
).strip()
|
||||
)
|
||||
node.query(
|
||||
f"insert into data select number, randomString(10000000) from numbers(5) SETTINGS workload='admin'"
|
||||
)
|
||||
writes_after = int(
|
||||
node.query(
|
||||
f"select dequeued_requests from system.scheduler where resource='network_write' and path='/prio/admin'"
|
||||
).strip()
|
||||
)
|
||||
write_bytes_after = int(
|
||||
node.query(
|
||||
f"select dequeued_cost from system.scheduler where resource='network_write' and path='/prio/admin'"
|
||||
).strip()
|
||||
)
|
||||
write_budget_after = int(
|
||||
node.query(
|
||||
f"select budget from system.scheduler where resource='network_write' and path='/prio/admin'"
|
||||
).strip()
|
||||
)
|
||||
|
||||
write_requests = writes_after - writes_before
|
||||
write_bytes = (write_bytes_after - write_bytes_before) - (
|
||||
write_budget_after - write_budget_before
|
||||
)
|
||||
assert write_bytes > 1.0 * total_bytes
|
||||
assert write_bytes < 1.05 * total_bytes
|
||||
assert write_bytes / write_requests < max_bytes_per_request
|
||||
assert write_bytes / write_requests > min_bytes_per_request
|
||||
check_profile_event_for_query("admin", "SchedulerIOWriteRequests", write_requests)
|
||||
check_profile_event_for_query("admin", "SchedulerIOWriteBytes", write_bytes)
|
||||
|
||||
node.query(f"optimize table data final")
|
||||
|
||||
reads_before = int(
|
||||
node.query(
|
||||
f"select dequeued_requests from system.scheduler where resource='network_read' and path='/prio/admin'"
|
||||
).strip()
|
||||
)
|
||||
read_bytes_before = int(
|
||||
node.query(
|
||||
f"select dequeued_cost from system.scheduler where resource='network_read' and path='/prio/admin'"
|
||||
).strip()
|
||||
)
|
||||
read_budget_before = int(
|
||||
node.query(
|
||||
f"select budget from system.scheduler where resource='network_read' and path='/prio/admin'"
|
||||
).strip()
|
||||
)
|
||||
node.query(
|
||||
f"select count() from data where not ignore(*) SETTINGS workload='admin'"
|
||||
)
|
||||
reads_after = int(
|
||||
node.query(
|
||||
f"select dequeued_requests from system.scheduler where resource='network_read' and path='/prio/admin'"
|
||||
).strip()
|
||||
)
|
||||
read_bytes_after = int(
|
||||
node.query(
|
||||
f"select dequeued_cost from system.scheduler where resource='network_read' and path='/prio/admin'"
|
||||
).strip()
|
||||
)
|
||||
read_budget_after = int(
|
||||
node.query(
|
||||
f"select budget from system.scheduler where resource='network_read' and path='/prio/admin'"
|
||||
).strip()
|
||||
)
|
||||
|
||||
read_bytes = (read_bytes_after - read_bytes_before) - (
|
||||
read_budget_after - read_budget_before
|
||||
)
|
||||
read_requests = reads_after - reads_before
|
||||
assert read_bytes > 1.0 * total_bytes
|
||||
assert read_bytes < 1.05 * total_bytes
|
||||
assert read_bytes / read_requests < max_bytes_per_request
|
||||
assert read_bytes / read_requests > min_bytes_per_request
|
||||
check_profile_event_for_query("admin", "SchedulerIOReadRequests", read_requests)
|
||||
check_profile_event_for_query("admin", "SchedulerIOReadBytes", read_bytes)
|
||||
|
||||
|
||||
def test_s3_disk():
|
||||
node.query(
|
||||
f"""
|
||||
|
@ -0,0 +1,3 @@
|
||||
<clickhouse>
|
||||
<cgroups_memory_usage_observer_wait_time>0</cgroups_memory_usage_observer_wait_time>
|
||||
</clickhouse>
|
@ -152,7 +152,7 @@ cat /proc/sys/kernel/core_pattern
|
||||
{
|
||||
time $SCRIPT_DIR/download.sh "$REF_PR" "$REF_SHA" "$PR_TO_TEST" "$SHA_TO_TEST" && \
|
||||
time stage=configure "$script_path"/compare.sh ; \
|
||||
} 2>&1 | ts "$(printf '%%Y-%%m-%%d %%H:%%M:%%S\t')" | tee compare.log
|
||||
} 2>&1 | ts "$(printf '%%Y-%%m-%%d %%H:%%M:%%S\t')" | tee -a compare.log
|
||||
|
||||
# Stop the servers to free memory. Normally they are restarted before getting
|
||||
# the profile info, so they shouldn't use much, but if the comparison script
|
||||
|
@ -0,0 +1,70 @@
|
||||
-------------- Test copy sorting clauses from source table --------------
|
||||
CREATE TABLE default.x
|
||||
(
|
||||
`CounterID` UInt32,
|
||||
`EventDate` Date,
|
||||
`UserID` UInt64
|
||||
)
|
||||
ENGINE = MergeTree
|
||||
PARTITION BY toYYYYMM(EventDate)
|
||||
ORDER BY (CounterID, EventDate, intHash32(UserID))
|
||||
SAMPLE BY intHash32(UserID)
|
||||
SETTINGS index_granularity = 8192
|
||||
-------------------------------------------------------------------------
|
||||
CREATE TABLE default.x_as
|
||||
(
|
||||
`CounterID` UInt32,
|
||||
`EventDate` Date,
|
||||
`UserID` UInt64
|
||||
)
|
||||
ENGINE = MergeTree
|
||||
PARTITION BY toYYYYMM(EventDate)
|
||||
ORDER BY (CounterID, EventDate, intHash32(UserID))
|
||||
SAMPLE BY intHash32(UserID)
|
||||
SETTINGS enable_block_number_column = 1, enable_block_offset_column = 1, index_granularity = 8192
|
||||
-------------- Test copy sorting clauses from destination table (source table without the same type clauses) --------------
|
||||
CREATE TABLE default.x
|
||||
(
|
||||
`CounterID` UInt32,
|
||||
`EventDate` Date,
|
||||
`UserID` UInt64
|
||||
)
|
||||
ENGINE = MergeTree
|
||||
PRIMARY KEY (CounterID, EventDate, intHash32(UserID))
|
||||
ORDER BY (CounterID, EventDate, intHash32(UserID))
|
||||
SETTINGS index_granularity = 8192
|
||||
-------------------------------------------------------------------------
|
||||
CREATE TABLE default.x_as
|
||||
(
|
||||
`CounterID` UInt32,
|
||||
`EventDate` Date,
|
||||
`UserID` UInt64
|
||||
)
|
||||
ENGINE = MergeTree
|
||||
PARTITION BY toYYYYMM(EventDate)
|
||||
PRIMARY KEY (CounterID, EventDate, intHash32(UserID))
|
||||
ORDER BY (CounterID, EventDate, intHash32(UserID))
|
||||
SAMPLE BY intHash32(UserID)
|
||||
SETTINGS enable_block_number_column = 1, enable_block_offset_column = 1, index_granularity = 8192
|
||||
-------------- Test copy sorting clauses from destination table (source table with the same type clauses) --------------
|
||||
CREATE TABLE default.x
|
||||
(
|
||||
`CounterID` UInt32,
|
||||
`EventDate` Date,
|
||||
`UserID` UInt64
|
||||
)
|
||||
ENGINE = MergeTree
|
||||
ORDER BY CounterID
|
||||
SETTINGS index_granularity = 8192
|
||||
-------------------------------------------------------------------------
|
||||
CREATE TABLE default.x_as
|
||||
(
|
||||
`CounterID` UInt32,
|
||||
`EventDate` Date,
|
||||
`UserID` UInt64
|
||||
)
|
||||
ENGINE = MergeTree
|
||||
PARTITION BY toYYYYMM(EventDate)
|
||||
ORDER BY (CounterID, EventDate, intHash32(UserID))
|
||||
SAMPLE BY intHash32(UserID)
|
||||
SETTINGS enable_block_number_column = 1, enable_block_offset_column = 1, index_granularity = 8192
|
@ -0,0 +1,37 @@
|
||||
DROP TABLE IF EXISTS x;
|
||||
DROP TABLE IF EXISTS x_as;
|
||||
|
||||
SELECT '-------------- Test copy sorting clauses from source table --------------';
|
||||
CREATE TABLE x (`CounterID` UInt32, `EventDate` Date, `UserID` UInt64) ENGINE = MergeTree PARTITION BY toYYYYMM(EventDate) ORDER BY (CounterID, EventDate, intHash32(UserID)) SAMPLE BY intHash32(UserID);
|
||||
CREATE TABLE x_as AS x ENGINE = MergeTree SETTINGS enable_block_number_column = 1, enable_block_offset_column = 1;
|
||||
|
||||
SHOW CREATE TABLE x FORMAT TSVRaw;
|
||||
SELECT '-------------------------------------------------------------------------';
|
||||
SHOW CREATE TABLE x_as FORMAT TSVRaw;
|
||||
|
||||
DROP TABLE x;
|
||||
DROP TABLE x_as;
|
||||
|
||||
SELECT '-------------- Test copy sorting clauses from destination table (source table without the same type clauses) --------------';
|
||||
CREATE TABLE x (`CounterID` UInt32, `EventDate` Date, `UserID` UInt64) ENGINE = MergeTree PRIMARY KEY (CounterID, EventDate, intHash32(UserID));
|
||||
CREATE TABLE x_as AS x ENGINE = MergeTree PARTITION BY toYYYYMM(EventDate) ORDER BY (CounterID, EventDate, intHash32(UserID)) SAMPLE BY intHash32(UserID) SETTINGS enable_block_number_column = 1, enable_block_offset_column = 1;
|
||||
|
||||
SHOW CREATE TABLE x FORMAT TSVRaw;
|
||||
SELECT '-------------------------------------------------------------------------';
|
||||
SHOW CREATE TABLE x_as FORMAT TSVRaw;
|
||||
|
||||
DROP TABLE x;
|
||||
DROP TABLE x_as;
|
||||
|
||||
SELECT '-------------- Test copy sorting clauses from destination table (source table with the same type clauses) --------------';
|
||||
CREATE TABLE x (`CounterID` UInt32, `EventDate` Date, `UserID` UInt64) ENGINE = MergeTree ORDER BY (CounterID);
|
||||
CREATE TABLE x_as AS x ENGINE = MergeTree PARTITION BY toYYYYMM(EventDate) ORDER BY (CounterID, EventDate, intHash32(UserID)) SAMPLE BY intHash32(UserID) SETTINGS enable_block_number_column = 1, enable_block_offset_column = 1;
|
||||
|
||||
SHOW CREATE TABLE x FORMAT TSVRaw;
|
||||
SELECT '-------------------------------------------------------------------------';
|
||||
SHOW CREATE TABLE x_as FORMAT TSVRaw;
|
||||
|
||||
DROP TABLE x;
|
||||
DROP TABLE x_as;
|
||||
|
||||
|
@ -21,7 +21,7 @@ Expression (Projection)
|
||||
Union
|
||||
Expression ((Before LIMIT BY + (Before ORDER BY + (Convert VIEW subquery result to VIEW table structure + (Materialize constants after VIEW subquery + (Projection + Before ORDER BY))))))
|
||||
ReadFromSystemNumbers
|
||||
Expression
|
||||
Expression (Before LIMIT BY)
|
||||
ReadFromRemote (Read from remote replica)
|
||||
explain select distinct on (k1, k2) v from remote('127.{1,2}', view(select 1 k1, 2 k2, 3 v from numbers(2)), cityHash64(k1, k2)); -- optimized
|
||||
Union
|
||||
@ -96,7 +96,7 @@ Expression (Project names)
|
||||
LimitBy
|
||||
Expression ((Before LIMIT BY + (Projection + (Change column names to column identifiers + (Convert VIEW subquery result to VIEW table structure + (Materialize constants after VIEW subquery + (Project names + (Projection + (Change column names to column identifiers + (Project names + (Projection + Change column names to column identifiers)))))))))))
|
||||
ReadFromSystemNumbers
|
||||
Expression
|
||||
Expression (Before LIMIT BY)
|
||||
ReadFromRemote (Read from remote replica)
|
||||
explain select distinct on (k1, k2) v from remote('127.{1,2}', view(select 1 k1, 2 k2, 3 v from numbers(2)), cityHash64(k1, k2)); -- optimized
|
||||
Union
|
||||
|
@ -0,0 +1,4 @@
|
||||
200
|
||||
200
|
||||
100
|
||||
100
|
@ -0,0 +1,23 @@
|
||||
DROP TABLE IF EXISTS t_merge_tree SYNC;
|
||||
DROP TABLE IF EXISTS t_replicated_merge_tree SYNC;
|
||||
|
||||
CREATE TABLE t_merge_tree(time Date, id String , name String) ENGINE = MergeTree() PARTITION BY time ORDER BY id;
|
||||
CREATE TABLE t_replicated_merge_tree(time Date, id String, name String) ENGINE = ReplicatedMergeTree('/test/02352/{database}/t_rep','1') PARTITION BY time ORDER BY id;
|
||||
|
||||
INSERT INTO t_merge_tree select '2024-08-01', '1', toString(number) FROM numbers(100);
|
||||
INSERT INTO t_merge_tree select '2024-08-02', '1', toString(number) FROM numbers(100);
|
||||
|
||||
INSERT INTO t_replicated_merge_tree select '2024-08-01', '1', toString(number) FROM numbers(100);
|
||||
INSERT INTO t_replicated_merge_tree select '2024-08-02', '1', toString(number) FROM numbers(100);
|
||||
|
||||
SELECT COUNT() FROM t_merge_tree;
|
||||
SELECT COUNT() FROM t_replicated_merge_tree;
|
||||
|
||||
DELETE FROM t_merge_tree IN PARTITION '2024-08-01' WHERE id = '1';
|
||||
DELETE FROM t_replicated_merge_tree IN PARTITION '2024-08-01' WHERE id = '1';
|
||||
|
||||
SELECT COUNT() FROM t_merge_tree;
|
||||
SELECT COUNT() FROM t_replicated_merge_tree;
|
||||
|
||||
DROP TABLE t_merge_tree SYNC;
|
||||
DROP TABLE t_replicated_merge_tree SYNC;
|
@ -48,9 +48,11 @@ def generate_data(q, total_number, use_token):
|
||||
partitions = ["2022-11-11 10:10:10", "2022-12-12 10:10:10"]
|
||||
last_number = 0
|
||||
while True:
|
||||
dup_simulate = random.randint(0, 3)
|
||||
# 0 to simulate duplication
|
||||
# 1 to simulate empty
|
||||
simulate_flag = random.randint(0, 4)
|
||||
# insert old data randomly. 25% of them are dup.
|
||||
if dup_simulate == 0:
|
||||
if simulate_flag == 0:
|
||||
last_idx = len(old_data) - 1
|
||||
if last_idx < 0:
|
||||
continue
|
||||
@ -58,6 +60,11 @@ def generate_data(q, total_number, use_token):
|
||||
if idx < 0:
|
||||
idx = 0
|
||||
q.put(old_data[idx])
|
||||
if simulate_flag == 1:
|
||||
empty_insert_stmt = (
|
||||
"insert into t_async_insert_dedup values format JSONEachRow"
|
||||
)
|
||||
q.put((empty_insert_stmt, ""))
|
||||
else:
|
||||
# insert new data.
|
||||
chunk_size = random.randint(1, max_chunk_size)
|
||||
|
@ -1,5 +1,5 @@
|
||||
#!/usr/bin/env bash
|
||||
# Tags: long, zookeeper, no-parallel, no-fasttest
|
||||
# Tags: long, zookeeper, no-fasttest
|
||||
|
||||
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||
# shellcheck source=../shell_config.sh
|
||||
|
@ -1,5 +1,5 @@
|
||||
#!/usr/bin/env bash
|
||||
# Tags: long, zookeeper, no-parallel, no-fasttest
|
||||
# Tags: long, zookeeper, no-fasttest
|
||||
|
||||
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||
# shellcheck source=../shell_config.sh
|
||||
|
@ -395,9 +395,9 @@ Expression ((Projection + Before ORDER BY))
|
||||
Union
|
||||
Expression ((Before ORDER BY + (Conversion before UNION + (Projection + Before ORDER BY))))
|
||||
ReadFromStorage (SystemOne)
|
||||
Expression (( + (Conversion before UNION + (Projection + Before ORDER BY))))
|
||||
Expression ((Before ORDER BY + (Conversion before UNION + (Projection + Before ORDER BY))))
|
||||
ReadFromStorage (SystemOne)
|
||||
Expression (( + (Conversion before UNION + (Projection + Before ORDER BY))))
|
||||
Expression ((Before ORDER BY + (Conversion before UNION + (Projection + Before ORDER BY))))
|
||||
ReadFromStorage (SystemOne)
|
||||
-- execute
|
||||
Float64 9007199254740994
|
||||
@ -427,9 +427,9 @@ Expression ((Projection + Before ORDER BY))
|
||||
Union
|
||||
Expression ((Before ORDER BY + (Conversion before UNION + (Projection + Before ORDER BY))))
|
||||
ReadFromStorage (SystemOne)
|
||||
Expression (( + (Conversion before UNION + (Projection + Before ORDER BY))))
|
||||
Expression ((Before ORDER BY + (Conversion before UNION + (Projection + Before ORDER BY))))
|
||||
ReadFromStorage (SystemOne)
|
||||
Expression (( + (Conversion before UNION + (Projection + Before ORDER BY))))
|
||||
Expression ((Before ORDER BY + (Conversion before UNION + (Projection + Before ORDER BY))))
|
||||
ReadFromStorage (SystemOne)
|
||||
-- execute
|
||||
Nullable(Float64) 9007199254740994
|
||||
@ -459,9 +459,9 @@ Expression ((Projection + Before ORDER BY))
|
||||
Union
|
||||
Expression ((Before ORDER BY + (Conversion before UNION + (Projection + Before ORDER BY))))
|
||||
ReadFromStorage (SystemOne)
|
||||
Expression (( + (Conversion before UNION + (Projection + Before ORDER BY))))
|
||||
Expression ((Before ORDER BY + (Conversion before UNION + (Projection + Before ORDER BY))))
|
||||
ReadFromStorage (SystemOne)
|
||||
Expression (( + (Conversion before UNION + (Projection + Before ORDER BY))))
|
||||
Expression ((Before ORDER BY + (Conversion before UNION + (Projection + Before ORDER BY))))
|
||||
ReadFromStorage (SystemOne)
|
||||
-- execute
|
||||
Float64 9007199254740994
|
||||
|
@ -394,9 +394,9 @@ Expression ((Project names + Projection))
|
||||
Union
|
||||
Expression ((Before ORDER BY + (Projection + (Change column names to column identifiers + (Conversion before UNION + (Project names + (Projection + Change column names to column identifiers)))))))
|
||||
ReadFromStorage (SystemOne)
|
||||
Expression (( + ( + ( + (Conversion before UNION + (Project names + (Projection + Change column names to column identifiers)))))))
|
||||
Expression ((Before ORDER BY + (Projection + (Change column names to column identifiers + (Conversion before UNION + (Project names + (Projection + Change column names to column identifiers)))))))
|
||||
ReadFromStorage (SystemOne)
|
||||
Expression (( + ( + ( + (Conversion before UNION + (Project names + (Projection + Change column names to column identifiers)))))))
|
||||
Expression ((Before ORDER BY + (Projection + (Change column names to column identifiers + (Conversion before UNION + (Project names + (Projection + Change column names to column identifiers)))))))
|
||||
ReadFromStorage (SystemOne)
|
||||
-- execute
|
||||
Float64 9007199254740994
|
||||
@ -426,9 +426,9 @@ Expression ((Project names + Projection))
|
||||
Union
|
||||
Expression ((Before ORDER BY + (Projection + (Change column names to column identifiers + (Conversion before UNION + (Project names + (Projection + Change column names to column identifiers)))))))
|
||||
ReadFromStorage (SystemOne)
|
||||
Expression (( + ( + ( + (Conversion before UNION + (Project names + (Projection + Change column names to column identifiers)))))))
|
||||
Expression ((Before ORDER BY + (Projection + (Change column names to column identifiers + (Conversion before UNION + (Project names + (Projection + Change column names to column identifiers)))))))
|
||||
ReadFromStorage (SystemOne)
|
||||
Expression (( + ( + ( + (Conversion before UNION + (Project names + (Projection + Change column names to column identifiers)))))))
|
||||
Expression ((Before ORDER BY + (Projection + (Change column names to column identifiers + (Conversion before UNION + (Project names + (Projection + Change column names to column identifiers)))))))
|
||||
ReadFromStorage (SystemOne)
|
||||
-- execute
|
||||
Nullable(Float64) 9007199254740994
|
||||
@ -458,9 +458,9 @@ Expression ((Project names + Projection))
|
||||
Union
|
||||
Expression ((Before ORDER BY + (Projection + (Change column names to column identifiers + (Conversion before UNION + (Project names + (Projection + Change column names to column identifiers)))))))
|
||||
ReadFromStorage (SystemOne)
|
||||
Expression (( + ( + ( + (Conversion before UNION + (Project names + (Projection + Change column names to column identifiers)))))))
|
||||
Expression ((Before ORDER BY + (Projection + (Change column names to column identifiers + (Conversion before UNION + (Project names + (Projection + Change column names to column identifiers)))))))
|
||||
ReadFromStorage (SystemOne)
|
||||
Expression (( + ( + ( + (Conversion before UNION + (Project names + (Projection + Change column names to column identifiers)))))))
|
||||
Expression ((Before ORDER BY + (Projection + (Change column names to column identifiers + (Conversion before UNION + (Project names + (Projection + Change column names to column identifiers)))))))
|
||||
ReadFromStorage (SystemOne)
|
||||
-- execute
|
||||
Float64 9007199254740994
|
||||
|
@ -53,7 +53,7 @@ Expression (Projection)
|
||||
Distinct (Preliminary DISTINCT)
|
||||
Expression (Before ORDER BY)
|
||||
ReadFromSystemNumbers
|
||||
Expression (( + Projection))
|
||||
Expression ((Before ORDER BY + Projection))
|
||||
Distinct
|
||||
Distinct (Preliminary DISTINCT)
|
||||
Expression (Before ORDER BY)
|
||||
@ -536,7 +536,7 @@ Expression (Projection)
|
||||
Distinct (Preliminary DISTINCT)
|
||||
Expression (Before ORDER BY)
|
||||
ReadFromSystemNumbers
|
||||
Expression (( + Projection))
|
||||
Expression ((Before ORDER BY + Projection))
|
||||
Distinct
|
||||
Distinct (Preliminary DISTINCT)
|
||||
Expression (Before ORDER BY)
|
||||
|
@ -54,7 +54,7 @@ Expression (Project names)
|
||||
Distinct (Preliminary DISTINCT)
|
||||
Expression ((Projection + Change column names to column identifiers))
|
||||
ReadFromSystemNumbers
|
||||
Expression (( + ( + Project names)))
|
||||
Expression ((Projection + (Change column names to column identifiers + Project names)))
|
||||
Distinct (DISTINCT)
|
||||
Distinct (Preliminary DISTINCT)
|
||||
Expression ((Projection + Change column names to column identifiers))
|
||||
@ -542,7 +542,7 @@ Expression (Project names)
|
||||
Distinct (Preliminary DISTINCT)
|
||||
Expression ((Projection + Change column names to column identifiers))
|
||||
ReadFromSystemNumbers
|
||||
Expression (( + ( + Project names)))
|
||||
Expression ((Projection + (Change column names to column identifiers + Project names)))
|
||||
Distinct (DISTINCT)
|
||||
Distinct (Preliminary DISTINCT)
|
||||
Expression ((Projection + Change column names to column identifiers))
|
||||
|
@ -1,2 +1,2 @@
|
||||
explain ast insert into test values balabala;
|
||||
explain ast insert into test values (balabala);
|
||||
explain ast insert into test format TabSeparated balabala;
|
@ -1,4 +1,4 @@
|
||||
-- Tags: no-fasttest, no-parallel
|
||||
-- Tags: long, no-fasttest, no-parallel, no-tsan, no-msan, no-asan
|
||||
|
||||
set output_format_parquet_use_custom_encoder = 1;
|
||||
set output_format_parquet_row_group_size = 1000;
|
||||
|
@ -8,3 +8,7 @@ Syntax error
|
||||
7
|
||||
8
|
||||
9
|
||||
InsertQuery (children 1)
|
||||
Identifier TEST2
|
||||
InsertQuery (children 1)
|
||||
Identifier TEST1
|
||||
|
@ -51,6 +51,13 @@ INSERT INTO TEST2 VALUES
|
||||
SELECT * FROM TEST1 ORDER BY value;
|
||||
SELECT * FROM TEST2 ORDER BY value;
|
||||
DROP TABLE TEST1; DROP TABLE TEST2;
|
||||
|
||||
EXPLAIN AST INSERT INTO TEST2 FORMAT CSV
|
||||
1
|
||||
2
|
||||
|
||||
EXPLAIN AST INSERT INTO TEST1 VALUES (101),(102);
|
||||
|
||||
EOF
|
||||
|
||||
$CLICKHOUSE_CLIENT -m < "$SQL_FILE_NAME"
|
||||
|
@ -0,0 +1,23 @@
|
||||
-----
|
||||
1 1 1 a 1 A 1 A
|
||||
2 2 2 b 2 B 2 B
|
||||
-----
|
||||
\N \N \N 0 3 B
|
||||
1 1 1 a 1 A 1 A
|
||||
2 2 2 b 2 B 2 B
|
||||
-----
|
||||
1 1 1 a 1 A 1 A
|
||||
2 2 2 b 2 B 2 B
|
||||
\N \N \N \N 3 B \N \N
|
||||
\N \N \N \N \N \N 3 B
|
||||
-----
|
||||
\N \N \N 3 3 B 0 0
|
||||
\N \N \N 0 0 3 3 B
|
||||
1 1 1 a 1 1 A 1 1 A
|
||||
2 2 2 b 2 2 B 2 2 B
|
||||
-----
|
||||
3 3 \N B B
|
||||
1 1 1 a A A
|
||||
2 2 2 b B B
|
||||
-----
|
||||
7
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user