Merge branch 'ClickHouse:master' into hive_partitioning_filtration

This commit is contained in:
Yarik Briukhovetskyi 2024-09-03 15:35:31 +02:00 committed by GitHub
commit 1a386ae4d5
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
281 changed files with 5472 additions and 1548 deletions

View File

@ -30,7 +30,6 @@
* Support more variants of JOIN strictness (`LEFT/RIGHT SEMI/ANTI/ANY JOIN`) with inequality conditions which involve columns from both left and right table. e.g. `t1.y < t2.y` (see the setting `allow_experimental_join_condition`). [#64281](https://github.com/ClickHouse/ClickHouse/pull/64281) ([lgbo](https://github.com/lgbo-ustc)).
* Intrpret Hive-style partitioning for different engines (`File`, `URL`, `S3`, `AzureBlobStorage`, `HDFS`). Hive-style partitioning organizes data into partitioned sub-directories, making it efficient to query and manage large datasets. Currently, it only creates virtual columns with the appropriate name and data. The follow-up PR will introduce the appropriate data filtering (performance speedup). [#65997](https://github.com/ClickHouse/ClickHouse/pull/65997) ([Yarik Briukhovetskyi](https://github.com/yariks5s)).
* Add function `printf` for Spark compatiability (but you can use the existing `format` function). [#66257](https://github.com/ClickHouse/ClickHouse/pull/66257) ([李扬](https://github.com/taiyang-li)).
* Added a new server setting, `disable_insertion_and_mutation`. If it is enabled, the server will deny all insertions and mutations. This includes asynchronous INSERTs. This setting can be used to create read-only replicas. [#66519](https://github.com/ClickHouse/ClickHouse/pull/66519) ([Xu Jia](https://github.com/XuJia0210)).
* Add options `restore_replace_external_engines_to_null` and `restore_replace_external_table_functions_to_null` to replace external engines and table_engines to `Null` engine that can be useful for testing. It should work for RESTORE and explicit table creation. [#66536](https://github.com/ClickHouse/ClickHouse/pull/66536) ([Ilya Yatsishin](https://github.com/qoega)).
* Added support for reading `MULTILINESTRING` geometry in `WKT` format using function `readWKTLineString`. [#67647](https://github.com/ClickHouse/ClickHouse/pull/67647) ([Jacob Reckhard](https://github.com/jacobrec)).
* Add a new table function `fuzzQuery`. This function allows the modification of a given query string with random variations. Example: `SELECT query FROM fuzzQuery('SELECT 1') LIMIT 5;`. [#67655](https://github.com/ClickHouse/ClickHouse/pull/67655) ([pufit](https://github.com/pufit)).

View File

@ -56,6 +56,15 @@ Other upcoming meetups
* [Toronto Meetup (Shopify)](https://www.meetup.com/clickhouse-toronto-user-group/events/301490855/) - September 10
* [Austin Meetup](https://www.meetup.com/clickhouse-austin-user-group/events/302558689/) - September 17
* [London Meetup](https://www.meetup.com/clickhouse-london-user-group/events/302977267) - September 17
* [Tel Aviv Meetup](https://www.meetup.com/clickhouse-meetup-israel/events/303095121) - September 22
* [Madrid Meetup](https://www.meetup.com/clickhouse-spain-user-group/events/303096564/) - October 22
* [Barcelona Meetup](https://www.meetup.com/clickhouse-spain-user-group/events/303096876/) - October 29
* [Oslo Meetup](https://www.meetup.com/open-source-real-time-data-warehouse-real-time-analytics/events/302938622) - October 31
* [Ghent Meetup](https://www.meetup.com/clickhouse-belgium-user-group/events/303049405/) - November 19
* [Dubai Meetup](https://www.meetup.com/clickhouse-dubai-meetup-group/events/303096989/) - November 21
* [Paris Meetup](https://www.meetup.com/clickhouse-france-user-group/events/303096434) - November 26
## Recent Recordings
* **Recent Meetup Videos**: [Meetup Playlist](https://www.youtube.com/playlist?list=PL0Z2YDlm0b3iNDUzpY1S3L_iV4nARda_U) Whenever possible recordings of the ClickHouse Community Meetups are edited and presented as individual talks. Current featuring "Modern SQL in 2023", "Fast, Concurrent, and Consistent Asynchronous INSERTS in ClickHouse", and "Full-Text Indices: Design and Experiments"

View File

@ -18,7 +18,9 @@
#define Net_HTTPResponse_INCLUDED
#include <map>
#include <vector>
#include "Poco/Net/HTTPCookie.h"
#include "Poco/Net/HTTPMessage.h"
#include "Poco/Net/Net.h"
@ -180,6 +182,8 @@ namespace Net
/// May throw an exception in case of a malformed
/// Set-Cookie header.
void getHeaders(std::map<std::string, std::string> & headers) const;
void write(std::ostream & ostr) const;
/// Writes the HTTP response to the given
/// output stream.

View File

@ -19,6 +19,8 @@
#include <ios>
#include <memory>
#include <functional>
#include "Poco/Any.h"
#include "Poco/Buffer.h"
#include "Poco/Exception.h"
@ -33,6 +35,27 @@ namespace Net
{
class IHTTPSessionDataHooks
/// Interface to control stream of data bytes being sent or received though socket by HTTPSession
/// It allows to monitor, throttle and schedule data streams with syscall granulatrity
{
public:
virtual ~IHTTPSessionDataHooks() = default;
virtual void atStart(int bytes) = 0;
/// Called before sending/receiving data `bytes` to/from socket.
virtual void atFinish(int bytes) = 0;
/// Called when sending/receiving of data `bytes` is successfully finished.
virtual void atFail() = 0;
/// If an error occurred during send/receive `fail()` is called instead of `finish()`.
};
using HTTPSessionDataHooksPtr = std::shared_ptr<IHTTPSessionDataHooks>;
class Net_API HTTPSession
/// HTTPSession implements basic HTTP session management
/// for both HTTP clients and HTTP servers.
@ -73,6 +96,12 @@ namespace Net
Poco::Timespan getReceiveTimeout() const;
/// Returns receive timeout for the HTTP session.
void setSendDataHooks(const HTTPSessionDataHooksPtr & sendDataHooks = {});
/// Sets data hooks that will be called on every sent to the socket.
void setReceiveDataHooks(const HTTPSessionDataHooksPtr & receiveDataHooks = {});
/// Sets data hooks that will be called on every receive from the socket.
bool connected() const;
/// Returns true if the underlying socket is connected.
@ -211,6 +240,10 @@ namespace Net
Poco::Exception * _pException;
Poco::Any _data;
// Data hooks
HTTPSessionDataHooksPtr _sendDataHooks;
HTTPSessionDataHooksPtr _receiveDataHooks;
friend class HTTPStreamBuf;
friend class HTTPHeaderStreamBuf;
friend class HTTPFixedLengthStreamBuf;
@ -246,6 +279,16 @@ namespace Net
return _receiveTimeout;
}
inline void HTTPSession::setSendDataHooks(const HTTPSessionDataHooksPtr & sendDataHooks)
{
_sendDataHooks = sendDataHooks;
}
inline void HTTPSession::setReceiveDataHooks(const HTTPSessionDataHooksPtr & receiveDataHooks)
{
_receiveDataHooks = receiveDataHooks;
}
inline StreamSocket & HTTPSession::socket()
{
return _socket;

View File

@ -209,6 +209,15 @@ void HTTPResponse::getCookies(std::vector<HTTPCookie>& cookies) const
}
}
void HTTPResponse::getHeaders(std::map<std::string, std::string> & headers) const
{
headers.clear();
for (const auto & it : *this)
{
headers.emplace(it.first, it.second);
}
}
void HTTPResponse::write(std::ostream& ostr) const
{

View File

@ -128,14 +128,14 @@ int HTTPSession::get()
{
if (_pCurrent == _pEnd)
refill();
if (_pCurrent < _pEnd)
return *_pCurrent++;
else
return std::char_traits<char>::eof();
}
int HTTPSession::peek()
{
if (_pCurrent == _pEnd)
@ -147,7 +147,7 @@ int HTTPSession::peek()
return std::char_traits<char>::eof();
}
int HTTPSession::read(char* buffer, std::streamsize length)
{
if (_pCurrent < _pEnd)
@ -166,10 +166,17 @@ int HTTPSession::write(const char* buffer, std::streamsize length)
{
try
{
return _socket.sendBytes(buffer, (int) length);
if (_sendDataHooks)
_sendDataHooks->atStart((int) length);
int result = _socket.sendBytes(buffer, (int) length);
if (_sendDataHooks)
_sendDataHooks->atFinish(result);
return result;
}
catch (Poco::Exception& exc)
{
if (_sendDataHooks)
_sendDataHooks->atFail();
setException(exc);
throw;
}
@ -180,10 +187,17 @@ int HTTPSession::receive(char* buffer, int length)
{
try
{
return _socket.receiveBytes(buffer, length);
if (_receiveDataHooks)
_receiveDataHooks->atStart(length);
int result = _socket.receiveBytes(buffer, length);
if (_receiveDataHooks)
_receiveDataHooks->atFinish(result);
return result;
}
catch (Poco::Exception& exc)
{
if (_receiveDataHooks)
_receiveDataHooks->atFail();
setException(exc);
throw;
}

View File

@ -63,7 +63,7 @@ bool checkIsBrokenTimeout()
SocketImpl::SocketImpl():
_sockfd(POCO_INVALID_SOCKET),
_blocking(true),
_blocking(true),
_isBrokenTimeout(checkIsBrokenTimeout())
{
}
@ -82,7 +82,7 @@ SocketImpl::~SocketImpl()
close();
}
SocketImpl* SocketImpl::acceptConnection(SocketAddress& clientAddr)
{
if (_sockfd == POCO_INVALID_SOCKET) throw InvalidSocketException();
@ -118,7 +118,7 @@ void SocketImpl::connect(const SocketAddress& address)
rc = ::connect(_sockfd, address.addr(), address.length());
}
while (rc != 0 && lastError() == POCO_EINTR);
if (rc != 0)
if (rc != 0)
{
int err = lastError();
error(err, address.toString());
@ -205,7 +205,7 @@ void SocketImpl::bind6(const SocketAddress& address, bool reuseAddress, bool reu
#if defined(POCO_HAVE_IPv6)
if (address.family() != SocketAddress::IPv6)
throw Poco::InvalidArgumentException("SocketAddress must be an IPv6 address");
if (_sockfd == POCO_INVALID_SOCKET)
{
init(address.af());
@ -226,11 +226,11 @@ void SocketImpl::bind6(const SocketAddress& address, bool reuseAddress, bool reu
#endif
}
void SocketImpl::listen(int backlog)
{
if (_sockfd == POCO_INVALID_SOCKET) throw InvalidSocketException();
int rc = ::listen(_sockfd, backlog);
if (rc != 0) error();
}
@ -254,7 +254,7 @@ void SocketImpl::shutdownReceive()
if (rc != 0) error();
}
void SocketImpl::shutdownSend()
{
if (_sockfd == POCO_INVALID_SOCKET) throw InvalidSocketException();
@ -263,7 +263,7 @@ void SocketImpl::shutdownSend()
if (rc != 0) error();
}
void SocketImpl::shutdown()
{
if (_sockfd == POCO_INVALID_SOCKET) throw InvalidSocketException();
@ -318,7 +318,7 @@ int SocketImpl::receiveBytes(void* buffer, int length, int flags)
throw TimeoutException();
}
}
int rc;
do
{
@ -326,7 +326,7 @@ int SocketImpl::receiveBytes(void* buffer, int length, int flags)
rc = ::recv(_sockfd, reinterpret_cast<char*>(buffer), length, flags);
}
while (blocking && rc < 0 && lastError() == POCO_EINTR);
if (rc < 0)
if (rc < 0)
{
int err = lastError();
if ((err == POCO_EAGAIN || err == POCO_EWOULDBLOCK) && !blocking)
@ -364,7 +364,7 @@ int SocketImpl::receiveFrom(void* buffer, int length, SocketAddress& address, in
throw TimeoutException();
}
}
sockaddr_storage abuffer;
struct sockaddr* pSA = reinterpret_cast<struct sockaddr*>(&abuffer);
poco_socklen_t saLen = sizeof(abuffer);
@ -451,7 +451,7 @@ bool SocketImpl::pollImpl(Poco::Timespan& remainingTime, int mode)
}
while (rc < 0 && lastError() == POCO_EINTR);
if (rc < 0) error();
return rc > 0;
return rc > 0;
#else
@ -494,7 +494,7 @@ bool SocketImpl::pollImpl(Poco::Timespan& remainingTime, int mode)
}
while (rc < 0 && errorCode == POCO_EINTR);
if (rc < 0) error(errorCode);
return rc > 0;
return rc > 0;
#endif // POCO_HAVE_FD_POLL
}
@ -504,13 +504,13 @@ bool SocketImpl::poll(const Poco::Timespan& timeout, int mode)
Poco::Timespan remainingTime(timeout);
return pollImpl(remainingTime, mode);
}
void SocketImpl::setSendBufferSize(int size)
{
setOption(SOL_SOCKET, SO_SNDBUF, size);
}
int SocketImpl::getSendBufferSize()
{
int result;
@ -524,7 +524,7 @@ void SocketImpl::setReceiveBufferSize(int size)
setOption(SOL_SOCKET, SO_RCVBUF, size);
}
int SocketImpl::getReceiveBufferSize()
{
int result;
@ -570,7 +570,7 @@ Poco::Timespan SocketImpl::getReceiveTimeout()
return result;
}
SocketAddress SocketImpl::address()
{
if (_sockfd == POCO_INVALID_SOCKET) throw InvalidSocketException();
@ -581,7 +581,7 @@ SocketAddress SocketImpl::address()
int rc = ::getsockname(_sockfd, pSA, &saLen);
if (rc == 0)
return SocketAddress(pSA, saLen);
else
else
error();
return SocketAddress();
}

2
contrib/libfiu vendored

@ -1 +1 @@
Subproject commit b85edbde4cf974b1b40d27828a56f0505f4e2ee5
Subproject commit a1290d8cd3d7b4541d6c976e0a54f572ac03f2a3

2
contrib/usearch vendored

@ -1 +1 @@
Subproject commit e21a5778a0d4469ddaf38c94b7be0196bb701ee4
Subproject commit 7a8967cb442b08ca20c3dd781414378e65957d37

View File

@ -34,7 +34,7 @@ RUN arch=${TARGETARCH:-amd64} \
# lts / testing / prestable / etc
ARG REPO_CHANNEL="stable"
ARG REPOSITORY="https://packages.clickhouse.com/tgz/${REPO_CHANNEL}"
ARG VERSION="24.8.2.3"
ARG VERSION="24.8.3.59"
ARG PACKAGES="clickhouse-keeper"
ARG DIRECT_DOWNLOAD_URLS=""

View File

@ -32,7 +32,7 @@ RUN arch=${TARGETARCH:-amd64} \
# lts / testing / prestable / etc
ARG REPO_CHANNEL="stable"
ARG REPOSITORY="https://packages.clickhouse.com/tgz/${REPO_CHANNEL}"
ARG VERSION="24.8.2.3"
ARG VERSION="24.8.3.59"
ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static"
ARG DIRECT_DOWNLOAD_URLS=""

View File

@ -28,7 +28,7 @@ RUN sed -i "s|http://archive.ubuntu.com|${apt_archive}|g" /etc/apt/sources.list
ARG REPO_CHANNEL="stable"
ARG REPOSITORY="deb [signed-by=/usr/share/keyrings/clickhouse-keyring.gpg] https://packages.clickhouse.com/deb ${REPO_CHANNEL} main"
ARG VERSION="24.8.2.3"
ARG VERSION="24.8.3.59"
ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static"
#docker-official-library:off

View File

@ -13,7 +13,8 @@ entry="/usr/share/clickhouse-test/performance/scripts/entrypoint.sh"
# https://www.kernel.org/doc/Documentation/filesystems/tmpfs.txt
# Double-escaped backslashes are a tribute to the engineering wonder of docker --
# it gives '/bin/sh: 1: [bash,: not found' otherwise.
numactl --hardware
echo > compare.log
numactl --hardware | tee -a compare.log
node=$(( RANDOM % $(numactl --hardware | sed -n 's/^.*available:\(.*\)nodes.*$/\1/p') ));
echo Will bind to NUMA node $node;
echo Will bind to NUMA node $node | tee -a compare.log
numactl --cpunodebind=$node --membind=$node $entry

View File

@ -0,0 +1,32 @@
---
sidebar_position: 1
sidebar_label: 2024
---
# 2024 Changelog
### ClickHouse release v24.3.10.33-lts (37b6502ebf0) FIXME as compared to v24.3.9.5-lts (a939270465e)
#### Improvement
* Backported in [#68870](https://github.com/ClickHouse/ClickHouse/issues/68870): Make allow_experimental_analyzer be controlled by the initiator for distributed queries. This ensures compatibility and correctness during operations in mixed version clusters. [#65777](https://github.com/ClickHouse/ClickHouse/pull/65777) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
* Backported in [#69095](https://github.com/ClickHouse/ClickHouse/issues/69095): Support for the Spanish language in the embedded dictionaries. [#69035](https://github.com/ClickHouse/ClickHouse/pull/69035) ([Vasily Okunev](https://github.com/VOkunev)).
#### Bug Fix (user-visible misbehavior in an official stable release)
* Backported in [#68995](https://github.com/ClickHouse/ClickHouse/issues/68995): Fix the upper bound of the function `fromModifiedJulianDay`. It was supposed to be `9999-12-31` but was mistakenly set to `9999-01-01`. [#67583](https://github.com/ClickHouse/ClickHouse/pull/67583) ([PHO](https://github.com/depressed-pho)).
* Backported in [#68844](https://github.com/ClickHouse/ClickHouse/issues/68844): Fixed crash in Parquet filtering when data types in the file substantially differ from requested types (e.g. `... FROM file('a.parquet', Parquet, 'x String')`, but the file has `x Int64`). Without this fix, use `input_format_parquet_filter_push_down = 0` as a workaround. [#68131](https://github.com/ClickHouse/ClickHouse/pull/68131) ([Michael Kolupaev](https://github.com/al13n321)).
* Backported in [#68881](https://github.com/ClickHouse/ClickHouse/issues/68881): Fixes [#50868](https://github.com/ClickHouse/ClickHouse/issues/50868). Small DateTime64 constant values returned by a nested subquery inside a distributed query were wrongly transformed to Nulls, thus causing errors and possible incorrect query results. [#68323](https://github.com/ClickHouse/ClickHouse/pull/68323) ([Shankar](https://github.com/shiyer7474)).
* Backported in [#69054](https://github.com/ClickHouse/ClickHouse/issues/69054): Added back virtual columns ` _table` and `_database` to distributed tables. They were available until version 24.3. [#68672](https://github.com/ClickHouse/ClickHouse/pull/68672) ([Anton Popov](https://github.com/CurtizJ)).
* Backported in [#68856](https://github.com/ClickHouse/ClickHouse/issues/68856): Fix possible error `Size of permutation (0) is less than required (...)` during Variant column permutation. [#68681](https://github.com/ClickHouse/ClickHouse/pull/68681) ([Kruglov Pavel](https://github.com/Avogar)).
* Backported in [#69152](https://github.com/ClickHouse/ClickHouse/issues/69152): Fix possible wrong result during anyHeavy state merge. [#68950](https://github.com/ClickHouse/ClickHouse/pull/68950) ([Raúl Marín](https://github.com/Algunenano)).
* Backported in [#69112](https://github.com/ClickHouse/ClickHouse/issues/69112): Fix logical error when we have empty async insert. [#69080](https://github.com/ClickHouse/ClickHouse/pull/69080) ([Han Fei](https://github.com/hanfei1991)).
#### NO CL CATEGORY
* Backported in [#68938](https://github.com/ClickHouse/ClickHouse/issues/68938):. [#68897](https://github.com/ClickHouse/ClickHouse/pull/68897) ([Alexander Gololobov](https://github.com/davenger)).
#### NOT FOR CHANGELOG / INSIGNIFICANT
* Backported in [#68826](https://github.com/ClickHouse/ClickHouse/issues/68826): Turn off fault injection for insert in `01396_inactive_replica_cleanup_nodes_zookeeper`. [#68715](https://github.com/ClickHouse/ClickHouse/pull/68715) ([alesapin](https://github.com/alesapin)).
* Backported in [#68754](https://github.com/ClickHouse/ClickHouse/issues/68754): To make patch release possible from every commit on release branch, package_debug build is required and must not be skipped. [#68750](https://github.com/ClickHouse/ClickHouse/pull/68750) ([Max K.](https://github.com/maxknv)).
* Backported in [#69044](https://github.com/ClickHouse/ClickHouse/issues/69044): Fix 01114_database_atomic flakiness. [#68930](https://github.com/ClickHouse/ClickHouse/pull/68930) ([Raúl Marín](https://github.com/Algunenano)).

View File

@ -0,0 +1,29 @@
---
sidebar_position: 1
sidebar_label: 2024
---
# 2024 Changelog
### ClickHouse release v24.5.7.31-stable (6c185e9aec1) FIXME as compared to v24.5.6.45-stable (bdca8604c29)
#### Bug Fix (user-visible misbehavior in an official stable release)
* Backported in [#68564](https://github.com/ClickHouse/ClickHouse/issues/68564): Fix indexHint function case found by fuzzer. [#66286](https://github.com/ClickHouse/ClickHouse/pull/66286) ([Anton Popov](https://github.com/CurtizJ)).
* Backported in [#68996](https://github.com/ClickHouse/ClickHouse/issues/68996): Fix the upper bound of the function `fromModifiedJulianDay`. It was supposed to be `9999-12-31` but was mistakenly set to `9999-01-01`. [#67583](https://github.com/ClickHouse/ClickHouse/pull/67583) ([PHO](https://github.com/depressed-pho)).
* Backported in [#68865](https://github.com/ClickHouse/ClickHouse/issues/68865): Fixed crash in Parquet filtering when data types in the file substantially differ from requested types (e.g. `... FROM file('a.parquet', Parquet, 'x String')`, but the file has `x Int64`). Without this fix, use `input_format_parquet_filter_push_down = 0` as a workaround. [#68131](https://github.com/ClickHouse/ClickHouse/pull/68131) ([Michael Kolupaev](https://github.com/al13n321)).
* Backported in [#69004](https://github.com/ClickHouse/ClickHouse/issues/69004): After https://github.com/ClickHouse/ClickHouse/pull/61984 `schema_inference_make_columns_nullable=0` still can make columns `Nullable` in Parquet/Arrow formats. The change was backward incompatible and users noticed the changes in the behaviour. This PR makes `schema_inference_make_columns_nullable=0` to work as before (no Nullable columns will be inferred) and introduces new value `auto` for this setting that will make columns `Nullable` only if data has information about nullability. [#68298](https://github.com/ClickHouse/ClickHouse/pull/68298) ([Kruglov Pavel](https://github.com/Avogar)).
* Backported in [#68882](https://github.com/ClickHouse/ClickHouse/issues/68882): Fixes [#50868](https://github.com/ClickHouse/ClickHouse/issues/50868). Small DateTime64 constant values returned by a nested subquery inside a distributed query were wrongly transformed to Nulls, thus causing errors and possible incorrect query results. [#68323](https://github.com/ClickHouse/ClickHouse/pull/68323) ([Shankar](https://github.com/shiyer7474)).
* Backported in [#69023](https://github.com/ClickHouse/ClickHouse/issues/69023): Added back virtual columns ` _table` and `_database` to distributed tables. They were available until version 24.3. [#68672](https://github.com/ClickHouse/ClickHouse/pull/68672) ([Anton Popov](https://github.com/CurtizJ)).
* Backported in [#68858](https://github.com/ClickHouse/ClickHouse/issues/68858): Fix possible error `Size of permutation (0) is less than required (...)` during Variant column permutation. [#68681](https://github.com/ClickHouse/ClickHouse/pull/68681) ([Kruglov Pavel](https://github.com/Avogar)).
* Backported in [#68784](https://github.com/ClickHouse/ClickHouse/issues/68784): Fix issue with materialized constant keys when hashing maps with arrays as keys in functions `sipHash(64/128)Keyed`. [#68731](https://github.com/ClickHouse/ClickHouse/pull/68731) ([Salvatore Mesoraca](https://github.com/aiven-sal)).
* Backported in [#69154](https://github.com/ClickHouse/ClickHouse/issues/69154): Fix possible wrong result during anyHeavy state merge. [#68950](https://github.com/ClickHouse/ClickHouse/pull/68950) ([Raúl Marín](https://github.com/Algunenano)).
#### NO CL CATEGORY
* Backported in [#68940](https://github.com/ClickHouse/ClickHouse/issues/68940):. [#68897](https://github.com/ClickHouse/ClickHouse/pull/68897) ([Alexander Gololobov](https://github.com/davenger)).
#### NOT FOR CHANGELOG / INSIGNIFICANT
* Backported in [#68828](https://github.com/ClickHouse/ClickHouse/issues/68828): Turn off fault injection for insert in `01396_inactive_replica_cleanup_nodes_zookeeper`. [#68715](https://github.com/ClickHouse/ClickHouse/pull/68715) ([alesapin](https://github.com/alesapin)).
* Backported in [#69046](https://github.com/ClickHouse/ClickHouse/issues/69046): Fix 01114_database_atomic flakiness. [#68930](https://github.com/ClickHouse/ClickHouse/pull/68930) ([Raúl Marín](https://github.com/Algunenano)).

View File

@ -0,0 +1,29 @@
---
sidebar_position: 1
sidebar_label: 2024
---
# 2024 Changelog
### ClickHouse release v24.6.5.30-stable (e6e196c92d6) FIXME as compared to v24.6.4.42-stable (c534bb4b4dd)
#### Bug Fix (user-visible misbehavior in an official stable release)
* Backported in [#68969](https://github.com/ClickHouse/ClickHouse/issues/68969): Fix the upper bound of the function `fromModifiedJulianDay`. It was supposed to be `9999-12-31` but was mistakenly set to `9999-01-01`. [#67583](https://github.com/ClickHouse/ClickHouse/pull/67583) ([PHO](https://github.com/depressed-pho)).
* Backported in [#68814](https://github.com/ClickHouse/ClickHouse/issues/68814): Fixed crash in Parquet filtering when data types in the file substantially differ from requested types (e.g. `... FROM file('a.parquet', Parquet, 'x String')`, but the file has `x Int64`). Without this fix, use `input_format_parquet_filter_push_down = 0` as a workaround. [#68131](https://github.com/ClickHouse/ClickHouse/pull/68131) ([Michael Kolupaev](https://github.com/al13n321)).
* Backported in [#69005](https://github.com/ClickHouse/ClickHouse/issues/69005): After https://github.com/ClickHouse/ClickHouse/pull/61984 `schema_inference_make_columns_nullable=0` still can make columns `Nullable` in Parquet/Arrow formats. The change was backward incompatible and users noticed the changes in the behaviour. This PR makes `schema_inference_make_columns_nullable=0` to work as before (no Nullable columns will be inferred) and introduces new value `auto` for this setting that will make columns `Nullable` only if data has information about nullability. [#68298](https://github.com/ClickHouse/ClickHouse/pull/68298) ([Kruglov Pavel](https://github.com/Avogar)).
* Backported in [#68883](https://github.com/ClickHouse/ClickHouse/issues/68883): Fixes [#50868](https://github.com/ClickHouse/ClickHouse/issues/50868). Small DateTime64 constant values returned by a nested subquery inside a distributed query were wrongly transformed to Nulls, thus causing errors and possible incorrect query results. [#68323](https://github.com/ClickHouse/ClickHouse/pull/68323) ([Shankar](https://github.com/shiyer7474)).
* Backported in [#69025](https://github.com/ClickHouse/ClickHouse/issues/69025): Added back virtual columns ` _table` and `_database` to distributed tables. They were available until version 24.3. [#68672](https://github.com/ClickHouse/ClickHouse/pull/68672) ([Anton Popov](https://github.com/CurtizJ)).
* Backported in [#68860](https://github.com/ClickHouse/ClickHouse/issues/68860): Fix possible error `Size of permutation (0) is less than required (...)` during Variant column permutation. [#68681](https://github.com/ClickHouse/ClickHouse/pull/68681) ([Kruglov Pavel](https://github.com/Avogar)).
* Backported in [#68786](https://github.com/ClickHouse/ClickHouse/issues/68786): Fix issue with materialized constant keys when hashing maps with arrays as keys in functions `sipHash(64/128)Keyed`. [#68731](https://github.com/ClickHouse/ClickHouse/pull/68731) ([Salvatore Mesoraca](https://github.com/aiven-sal)).
* Backported in [#69156](https://github.com/ClickHouse/ClickHouse/issues/69156): Fix possible wrong result during anyHeavy state merge. [#68950](https://github.com/ClickHouse/ClickHouse/pull/68950) ([Raúl Marín](https://github.com/Algunenano)).
* Backported in [#69116](https://github.com/ClickHouse/ClickHouse/issues/69116): Fix logical error when we have empty async insert. [#69080](https://github.com/ClickHouse/ClickHouse/pull/69080) ([Han Fei](https://github.com/hanfei1991)).
#### NO CL CATEGORY
* Backported in [#68942](https://github.com/ClickHouse/ClickHouse/issues/68942):. [#68897](https://github.com/ClickHouse/ClickHouse/pull/68897) ([Alexander Gololobov](https://github.com/davenger)).
#### NOT FOR CHANGELOG / INSIGNIFICANT
* Backported in [#68830](https://github.com/ClickHouse/ClickHouse/issues/68830): Turn off fault injection for insert in `01396_inactive_replica_cleanup_nodes_zookeeper`. [#68715](https://github.com/ClickHouse/ClickHouse/pull/68715) ([alesapin](https://github.com/alesapin)).
* Backported in [#69048](https://github.com/ClickHouse/ClickHouse/issues/69048): Fix 01114_database_atomic flakiness. [#68930](https://github.com/ClickHouse/ClickHouse/pull/68930) ([Raúl Marín](https://github.com/Algunenano)).

View File

@ -0,0 +1,50 @@
---
sidebar_position: 1
sidebar_label: 2024
---
# 2024 Changelog
### ClickHouse release v24.8.3.59-lts (e729b9fa40e) FIXME as compared to v24.8.2.3-lts (b54f79ed323)
#### New Feature
* Backported in [#68710](https://github.com/ClickHouse/ClickHouse/issues/68710): Query cache entries can now be dropped by tag. For example, the query cache entry created by `SELECT 1 SETTINGS use_query_cache = true, query_cache_tag = 'abc'` can now be dropped by `SYSTEM DROP QUERY CACHE TAG 'abc'` (or of course just: `SYSTEM DROP QUERY CACHE` which will clear the entire query cache). [#68477](https://github.com/ClickHouse/ClickHouse/pull/68477) ([Michał Tabaszewski](https://github.com/pinsvin00)).
#### Improvement
* Backported in [#69097](https://github.com/ClickHouse/ClickHouse/issues/69097): Support for the Spanish language in the embedded dictionaries. [#69035](https://github.com/ClickHouse/ClickHouse/pull/69035) ([Vasily Okunev](https://github.com/VOkunev)).
#### Bug Fix (user-visible misbehavior in an official stable release)
* Backported in [#68973](https://github.com/ClickHouse/ClickHouse/issues/68973): Fix the upper bound of the function `fromModifiedJulianDay`. It was supposed to be `9999-12-31` but was mistakenly set to `9999-01-01`. [#67583](https://github.com/ClickHouse/ClickHouse/pull/67583) ([PHO](https://github.com/depressed-pho)).
* Backported in [#68818](https://github.com/ClickHouse/ClickHouse/issues/68818): Fixed crash in Parquet filtering when data types in the file substantially differ from requested types (e.g. `... FROM file('a.parquet', Parquet, 'x String')`, but the file has `x Int64`). Without this fix, use `input_format_parquet_filter_push_down = 0` as a workaround. [#68131](https://github.com/ClickHouse/ClickHouse/pull/68131) ([Michael Kolupaev](https://github.com/al13n321)).
* Backported in [#68893](https://github.com/ClickHouse/ClickHouse/issues/68893): After https://github.com/ClickHouse/ClickHouse/pull/61984 `schema_inference_make_columns_nullable=0` still can make columns `Nullable` in Parquet/Arrow formats. The change was backward incompatible and users noticed the changes in the behaviour. This PR makes `schema_inference_make_columns_nullable=0` to work as before (no Nullable columns will be inferred) and introduces new value `auto` for this setting that will make columns `Nullable` only if data has information about nullability. [#68298](https://github.com/ClickHouse/ClickHouse/pull/68298) ([Kruglov Pavel](https://github.com/Avogar)).
* Backported in [#68721](https://github.com/ClickHouse/ClickHouse/issues/68721): Fixes [#50868](https://github.com/ClickHouse/ClickHouse/issues/50868). Small DateTime64 constant values returned by a nested subquery inside a distributed query were wrongly transformed to Nulls, thus causing errors and possible incorrect query results. [#68323](https://github.com/ClickHouse/ClickHouse/pull/68323) ([Shankar](https://github.com/shiyer7474)).
* Backported in [#69029](https://github.com/ClickHouse/ClickHouse/issues/69029): Added back virtual columns ` _table` and `_database` to distributed tables. They were available until version 24.3. [#68672](https://github.com/ClickHouse/ClickHouse/pull/68672) ([Anton Popov](https://github.com/CurtizJ)).
* Backported in [#68864](https://github.com/ClickHouse/ClickHouse/issues/68864): Fix possible error `Size of permutation (0) is less than required (...)` during Variant column permutation. [#68681](https://github.com/ClickHouse/ClickHouse/pull/68681) ([Kruglov Pavel](https://github.com/Avogar)).
* Backported in [#68854](https://github.com/ClickHouse/ClickHouse/issues/68854): Fix possible error `DB::Exception: Block structure mismatch in joined block stream: different columns:` with new JSON column. [#68686](https://github.com/ClickHouse/ClickHouse/pull/68686) ([Kruglov Pavel](https://github.com/Avogar)).
* Backported in [#68790](https://github.com/ClickHouse/ClickHouse/issues/68790): Fix issue with materialized constant keys when hashing maps with arrays as keys in functions `sipHash(64/128)Keyed`. [#68731](https://github.com/ClickHouse/ClickHouse/pull/68731) ([Salvatore Mesoraca](https://github.com/aiven-sal)).
* Backported in [#69108](https://github.com/ClickHouse/ClickHouse/issues/69108): TODO. [#68744](https://github.com/ClickHouse/ClickHouse/pull/68744) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
* Backported in [#68850](https://github.com/ClickHouse/ClickHouse/issues/68850): Fix resolving dynamic subcolumns from subqueries in analyzer. [#68824](https://github.com/ClickHouse/ClickHouse/pull/68824) ([Kruglov Pavel](https://github.com/Avogar)).
* Backported in [#68911](https://github.com/ClickHouse/ClickHouse/issues/68911): Fix complex types metadata parsing in DeltaLake. Closes [#68739](https://github.com/ClickHouse/ClickHouse/issues/68739). [#68836](https://github.com/ClickHouse/ClickHouse/pull/68836) ([Kseniia Sumarokova](https://github.com/kssenii)).
* Backported in [#69160](https://github.com/ClickHouse/ClickHouse/issues/69160): Fix possible wrong result during anyHeavy state merge. [#68950](https://github.com/ClickHouse/ClickHouse/pull/68950) ([Raúl Marín](https://github.com/Algunenano)).
* Backported in [#69072](https://github.com/ClickHouse/ClickHouse/issues/69072): Fixed writing to Materialized Views with enabled setting `optimize_functions_to_subcolumns`. [#68951](https://github.com/ClickHouse/ClickHouse/pull/68951) ([Anton Popov](https://github.com/CurtizJ)).
* Backported in [#69016](https://github.com/ClickHouse/ClickHouse/issues/69016): Don't use serializations cache in const Dynamic column methods. It could let to use-of-unitialized value or even race condition during aggregations. [#68953](https://github.com/ClickHouse/ClickHouse/pull/68953) ([Kruglov Pavel](https://github.com/Avogar)).
* Backported in [#69120](https://github.com/ClickHouse/ClickHouse/issues/69120): Fix logical error when we have empty async insert. [#69080](https://github.com/ClickHouse/ClickHouse/pull/69080) ([Han Fei](https://github.com/hanfei1991)).
#### NO CL CATEGORY
* Backported in [#68947](https://github.com/ClickHouse/ClickHouse/issues/68947):. [#68897](https://github.com/ClickHouse/ClickHouse/pull/68897) ([Alexander Gololobov](https://github.com/davenger)).
#### NOT FOR CHANGELOG / INSIGNIFICANT
* Backported in [#68704](https://github.com/ClickHouse/ClickHouse/issues/68704): Fix enumerating dynamic subcolumns. [#68582](https://github.com/ClickHouse/ClickHouse/pull/68582) ([Kruglov Pavel](https://github.com/Avogar)).
* Backported in [#69000](https://github.com/ClickHouse/ClickHouse/issues/69000): Prioritizing of virtual columns in hive partitioning. [#68606](https://github.com/ClickHouse/ClickHouse/pull/68606) ([Yarik Briukhovetskyi](https://github.com/yariks5s)).
* Backported in [#68799](https://github.com/ClickHouse/ClickHouse/issues/68799): CI: Disable SQLLogic job. [#68654](https://github.com/ClickHouse/ClickHouse/pull/68654) ([Max K.](https://github.com/maxknv)).
* Backported in [#68834](https://github.com/ClickHouse/ClickHouse/issues/68834): Turn off fault injection for insert in `01396_inactive_replica_cleanup_nodes_zookeeper`. [#68715](https://github.com/ClickHouse/ClickHouse/pull/68715) ([alesapin](https://github.com/alesapin)).
* Backported in [#68781](https://github.com/ClickHouse/ClickHouse/issues/68781): Fix flaky test 00989_parallel_parts_loading. [#68737](https://github.com/ClickHouse/ClickHouse/pull/68737) ([alesapin](https://github.com/alesapin)).
* Backported in [#68762](https://github.com/ClickHouse/ClickHouse/issues/68762): To make patch release possible from every commit on release branch, package_debug build is required and must not be skipped. [#68750](https://github.com/ClickHouse/ClickHouse/pull/68750) ([Max K.](https://github.com/maxknv)).
* Backported in [#68810](https://github.com/ClickHouse/ClickHouse/issues/68810): Try to disable rerun check if job triggered manually. [#68751](https://github.com/ClickHouse/ClickHouse/pull/68751) ([Max K.](https://github.com/maxknv)).
* Backported in [#68962](https://github.com/ClickHouse/ClickHouse/issues/68962): Fix 2477 timeout. [#68752](https://github.com/ClickHouse/ClickHouse/pull/68752) ([jsc0218](https://github.com/jsc0218)).
* Backported in [#68977](https://github.com/ClickHouse/ClickHouse/issues/68977): Check setting use_json_alias_for_old_object_type in runtime. [#68793](https://github.com/ClickHouse/ClickHouse/pull/68793) ([Kruglov Pavel](https://github.com/Avogar)).
* Backported in [#68852](https://github.com/ClickHouse/ClickHouse/issues/68852): Make dynamic structure selection more consistent. [#68802](https://github.com/ClickHouse/ClickHouse/pull/68802) ([Kruglov Pavel](https://github.com/Avogar)).
* Backported in [#69052](https://github.com/ClickHouse/ClickHouse/issues/69052): Fix 01114_database_atomic flakiness. [#68930](https://github.com/ClickHouse/ClickHouse/pull/68930) ([Raúl Marín](https://github.com/Algunenano)).

View File

@ -6,28 +6,34 @@ sidebar_label: Iceberg
# Iceberg Table Engine
This engine provides a read-only integration with existing Apache [Iceberg](https://iceberg.apache.org/) tables in Amazon S3.
This engine provides a read-only integration with existing Apache [Iceberg](https://iceberg.apache.org/) tables in Amazon S3, Azure and locally stored tables.
## Create Table
Note that the Iceberg table must already exist in S3, this command does not take DDL parameters to create a new table.
Note that the Iceberg table must already exist in the storage, this command does not take DDL parameters to create a new table.
``` sql
CREATE TABLE iceberg_table
ENGINE = Iceberg(url, [aws_access_key_id, aws_secret_access_key,])
CREATE TABLE iceberg_table_s3
ENGINE = IcebergS3(url, [, NOSIGN | access_key_id, secret_access_key, [session_token]], format, [,compression])
CREATE TABLE iceberg_table_azure
ENGINE = IcebergAzure(connection_string|storage_account_url, container_name, blobpath, [account_name, account_key, format, compression])
CREATE TABLE iceberg_table_local
ENGINE = IcebergLocal(path_to_table, [,format] [,compression_method])
```
**Engine parameters**
**Engine arguments**
- `url` — url with the path to an existing Iceberg table.
- `aws_access_key_id`, `aws_secret_access_key` - Long-term credentials for the [AWS](https://aws.amazon.com/) account user. You can use these to authenticate your requests. Parameter is optional. If credentials are not specified, they are used from the configuration file.
Description of the arguments coincides with description of arguments in engines `S3`, `AzureBlobStorage` and `File` correspondingly.
`format` stands for the format of data files in the Iceberg table.
Engine parameters can be specified using [Named Collections](../../../operations/named-collections.md)
**Example**
```sql
CREATE TABLE iceberg_table ENGINE=Iceberg('http://test.s3.amazonaws.com/clickhouse-bucket/test_table', 'test', 'test')
CREATE TABLE iceberg_table ENGINE=IcebergS3('http://test.s3.amazonaws.com/clickhouse-bucket/test_table', 'test', 'test')
```
Using named collections:
@ -45,9 +51,15 @@ Using named collections:
```
```sql
CREATE TABLE iceberg_table ENGINE=Iceberg(iceberg_conf, filename = 'test_table')
CREATE TABLE iceberg_table ENGINE=IcebergS3(iceberg_conf, filename = 'test_table')
```
**Aliases**
Table engine `Iceberg` is an alias to `IcebergS3` now.
## See also
- [iceberg table function](/docs/en/sql-reference/table-functions/iceberg.md)

View File

@ -54,7 +54,7 @@ Parameters:
- `distance_function`: either `L2Distance` (the [Euclidean distance](https://en.wikipedia.org/wiki/Euclidean_distance) - the length of a
line between two points in Euclidean space), or `cosineDistance` (the [cosine
distance](https://en.wikipedia.org/wiki/Cosine_similarity#Cosine_distance)- the angle between two non-zero vectors).
- `quantization`: either `f32`, `f16`, or `i8` for storing the vector with reduced precision (optional, default: `f32`)
- `quantization`: either `f64`, `f32`, `f16`, `bf16`, or `i8` for storing the vector with reduced precision (optional, default: `bf16`)
- `m`: the number of neighbors per graph node (optional, default: 16)
- `ef_construction`: (optional, default: 128)
- `ef_search`: (optional, default: 64)

View File

@ -109,6 +109,7 @@ For partitioning by month, use the `toYYYYMM(date_column)` expression, where `da
- `_file` — Resource name of the `URL`. Type: `LowCardinalty(String)`.
- `_size` — Size of the resource in bytes. Type: `Nullable(UInt64)`. If the size is unknown, the value is `NULL`.
- `_time` — Last modified time of the file. Type: `Nullable(DateTime)`. If the time is unknown, the value is `NULL`.
- `_headers` - HTTP response headers. Type: `Map(LowCardinality(String), LowCardinality(String))`.
## Storage Settings {#storage-settings}

View File

@ -58,7 +58,7 @@ Connection: Close
Content-Type: text/tab-separated-values; charset=UTF-8
X-ClickHouse-Server-Display-Name: clickhouse.ru-central1.internal
X-ClickHouse-Query-Id: 5abe861c-239c-467f-b955-8a201abb8b7f
X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0","elapsed_ns":"662334"}
X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0","elapsed_ns":"662334", "real_time_microseconds": "0"}
1
```
@ -472,7 +472,7 @@ $ curl -v 'http://localhost:8123/predefined_query'
< X-ClickHouse-Format: Template
< X-ClickHouse-Timezone: Asia/Shanghai
< Keep-Alive: timeout=10
< X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0","elapsed_ns":"662334"}
< X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0","elapsed_ns":"662334", "real_time_microseconds":"0"}
<
# HELP "Query" "Number of executing queries"
# TYPE "Query" counter
@ -668,7 +668,7 @@ $ curl -vv -H 'XXX:xxx' 'http://localhost:8123/hi'
< Content-Type: text/html; charset=UTF-8
< Transfer-Encoding: chunked
< Keep-Alive: timeout=10
< X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0","elapsed_ns":"662334"}
< X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0","elapsed_ns":"662334", "real_time_microseconds":"0"}
<
* Connection #0 to host localhost left intact
Say Hi!%
@ -708,7 +708,7 @@ $ curl -v -H 'XXX:xxx' 'http://localhost:8123/get_config_static_handler'
< Content-Type: text/plain; charset=UTF-8
< Transfer-Encoding: chunked
< Keep-Alive: timeout=10
< X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0","elapsed_ns":"662334"}
< X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0","elapsed_ns":"662334", "real_time_microseconds":"0"}
<
* Connection #0 to host localhost left intact
<html ng-app="SMI2"><head><base href="http://ui.tabix.io/"></head><body><div ui-view="" class="content-ui"></div><script src="http://loader.tabix.io/master.js"></script></body></html>%
@ -766,7 +766,7 @@ $ curl -vv -H 'XXX:xxx' 'http://localhost:8123/get_absolute_path_static_handler'
< Content-Type: text/html; charset=UTF-8
< Transfer-Encoding: chunked
< Keep-Alive: timeout=10
< X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0","elapsed_ns":"662334"}
< X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0","elapsed_ns":"662334", "real_time_microseconds":"0"}
<
<html><body>Absolute Path File</body></html>
* Connection #0 to host localhost left intact
@ -785,7 +785,7 @@ $ curl -vv -H 'XXX:xxx' 'http://localhost:8123/get_relative_path_static_handler'
< Content-Type: text/html; charset=UTF-8
< Transfer-Encoding: chunked
< Keep-Alive: timeout=10
< X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0","elapsed_ns":"662334"}
< X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0","elapsed_ns":"662334", "real_time_microseconds":"0"}
<
<html><body>Relative Path File</body></html>
* Connection #0 to host localhost left intact

View File

@ -5633,7 +5633,6 @@ Default value: `1GiB`.
## use_json_alias_for_old_object_type
When enabled, `JSON` data type alias will be used to create an old [Object('json')](../../sql-reference/data-types/json.md) type instead of the new [JSON](../../sql-reference/data-types/newjson.md) type.
This setting requires server restart to take effect when changed.
Default value: `false`.

View File

@ -0,0 +1,41 @@
---
slug: /en/operations/system-tables/projections
---
# projections
Contains information about existing projections in all the tables.
Columns:
- `database` ([String](../../sql-reference/data-types/string.md)) — Database name.
- `table` ([String](../../sql-reference/data-types/string.md)) — Table name.
- `name` ([String](../../sql-reference/data-types/string.md)) — Projection name.
- `type` ([Enum](../../sql-reference/data-types/enum.md)) — Projection type ('Normal' = 0, 'Aggregate' = 1).
- `sorting_key` ([Array(String)](../../sql-reference/data-types/array.md)) — Projection sorting key.
- `query` ([String](../../sql-reference/data-types/string.md)) — Projection query.
**Example**
```sql
SELECT * FROM system.projections LIMIT 2 FORMAT Vertical;
```
```text
Row 1:
──────
database: default
table: landing
name: improved_sorting_key
type: Normal
sorting_key: ['user_id','date']
query: SELECT * ORDER BY user_id, date
Row 2:
──────
database: default
table: landing
name: agg_no_key
type: Aggregate
sorting_key: []
query: SELECT count()
```

View File

@ -104,7 +104,7 @@ Events that occur at the same second may lay in the sequence in an undefined ord
**Parameters**
- `pattern` — Pattern string. See [Pattern syntax](#sequencematch).
- `pattern` — Pattern string. See [Pattern syntax](#pattern-syntax).
**Returned values**
@ -113,8 +113,7 @@ Events that occur at the same second may lay in the sequence in an undefined ord
Type: `UInt8`.
<a name="sequence-function-pattern-syntax"></a>
**Pattern syntax**
#### Pattern syntax
- `(?N)` — Matches the condition argument at position `N`. Conditions are numbered in the `[1, 32]` range. For example, `(?1)` matches the argument passed to the `cond1` parameter.
@ -196,7 +195,7 @@ sequenceCount(pattern)(timestamp, cond1, cond2, ...)
**Parameters**
- `pattern` — Pattern string. See [Pattern syntax](#sequencematch).
- `pattern` — Pattern string. See [Pattern syntax](#pattern-syntax).
**Returned values**

View File

@ -453,8 +453,8 @@ As we can see, after inserting paths `e` and `f.g` the limit was reached and we
### During merges of data parts in MergeTree table engines
During merge of several data parts in MergeTree table the `JSON` column in the resulting data part can reach the limit of dynamic paths won't be able to store all paths from source parts as subcolumns.
In this case ClickHouse chooses what paths will remain as subcolumns after merge and what types will be stored in the shared data structure. In most cases ClickHouse tries to keep paths that contains
During merge of several data parts in MergeTree table the `JSON` column in the resulting data part can reach the limit of dynamic paths and won't be able to store all paths from source parts as subcolumns.
In this case ClickHouse chooses what paths will remain as subcolumns after merge and what paths will be stored in the shared data structure. In most cases ClickHouse tries to keep paths that contain
the largest number of non-null values and move the rarest paths to the shared data structure, but it depends on the implementation.
Let's see an example of such merge. First, let's create a table with `JSON` column, set the limit of dynamic paths to `3` and insert values with `5` different paths:

View File

@ -49,7 +49,7 @@ Result:
## multiIf
Allows to write the [CASE](../../sql-reference/operators/index.md#operator_case) operator more compactly in the query.
Allows to write the [CASE](../../sql-reference/operators/index.md#conditional-expression) operator more compactly in the query.
**Syntax**
@ -264,4 +264,4 @@ SELECT clamp(1, 2, 3) result, toTypeName(result) type;
┌─result─┬─type────┐
│ 2 │ Float64 │
└────────┴─────────┘
```
```

View File

@ -2019,7 +2019,7 @@ Alias: `dateTrunc`.
`unit` argument is case-insensitive.
- `value` — Date and time. [DateTime](../data-types/datetime.md) or [DateTime64](../data-types/datetime64.md).
- `value` — Date and time. [Date](../data-types/date.md), [Date32](../data-types/date32.md), [DateTime](../data-types/datetime.md) or [DateTime64](../data-types/datetime64.md).
- `timezone` — [Timezone name](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-timezone) for the returned value (optional). If not specified, the function uses the timezone of the `value` parameter. [String](../data-types/string.md).
**Returned value**
@ -4287,7 +4287,7 @@ Result:
## fromModifiedJulianDay
Converts a [Modified Julian Day](https://en.wikipedia.org/wiki/Julian_day#Variants) number to a [Proleptic Gregorian calendar](https://en.wikipedia.org/wiki/Proleptic_Gregorian_calendar) date in text form `YYYY-MM-DD`. This function supports day number from `-678941` to `2973119` (which represent 0000-01-01 and 9999-12-31 respectively). It raises an exception if the day number is outside of the supported range.
Converts a [Modified Julian Day](https://en.wikipedia.org/wiki/Julian_day#Variants) number to a [Proleptic Gregorian calendar](https://en.wikipedia.org/wiki/Proleptic_Gregorian_calendar) date in text form `YYYY-MM-DD`. This function supports day number from `-678941` to `2973483` (which represent 0000-01-01 and 9999-12-31 respectively). It raises an exception if the day number is outside of the supported range.
**Syntax**

View File

@ -49,6 +49,55 @@ SETTINGS cast_keep_nullable = 1
└──────────────────┴─────────────────────┴──────────────────┘
```
## toBool
Converts an input value to a value of type [`Bool`](../data-types/boolean.md). Throws an exception in case of an error.
**Syntax**
```sql
toBool(expr)
```
**Arguments**
- `expr` — Expression returning a number or a string. [Expression](../syntax.md/#syntax-expressions).
Supported arguments:
- Values of type (U)Int8/16/32/64/128/256.
- Values of type Float32/64.
- Strings `true` or `false` (case-insensitive).
**Returned value**
- Returns `true` or `false` based on evaluation of the argument. [Bool](../data-types/boolean.md).
**Example**
Query:
```sql
SELECT
toBool(toUInt8(1)),
toBool(toInt8(-1)),
toBool(toFloat32(1.01)),
toBool('true'),
toBool('false'),
toBool('FALSE')
FORMAT Vertical
```
Result:
```response
toBool(toUInt8(1)): true
toBool(toInt8(-1)): true
toBool(toFloat32(1.01)): true
toBool('true'): true
toBool('false'): false
toBool('FALSE'): false
```
## toInt8
Converts an input value to a value of type [`Int8`](../data-types/int-uint.md). Throws an exception in case of an error.

View File

@ -8,7 +8,7 @@ title: "CREATE ROW POLICY"
Creates a [row policy](../../../guides/sre/user-management/index.md#row-policy-management), i.e. a filter used to determine which rows a user can read from a table.
:::tip
Row policies makes sense only for users with readonly access. If user can modify table or copy partitions between tables, it defeats the restrictions of row policies.
Row policies make sense only for users with readonly access. If a user can modify a table or copy partitions between tables, it defeats the restrictions of row policies.
:::
Syntax:
@ -24,40 +24,40 @@ CREATE [ROW] POLICY [IF NOT EXISTS | OR REPLACE] policy_name1 [ON CLUSTER cluste
## USING Clause
Allows to specify a condition to filter rows. An user will see a row if the condition is calculated to non-zero for the row.
Allows specifying a condition to filter rows. A user will see a row if the condition is calculated to non-zero for the row.
## TO Clause
In the section `TO` you can provide a list of users and roles this policy should work for. For example, `CREATE ROW POLICY ... TO accountant, john@localhost`.
In the `TO` section you can provide a list of users and roles this policy should work for. For example, `CREATE ROW POLICY ... TO accountant, john@localhost`.
Keyword `ALL` means all the ClickHouse users including current user. Keyword `ALL EXCEPT` allow to exclude some users from the all users list, for example, `CREATE ROW POLICY ... TO ALL EXCEPT accountant, john@localhost`
Keyword `ALL` means all the ClickHouse users, including current user. Keyword `ALL EXCEPT` allows excluding some users from the all users list, for example, `CREATE ROW POLICY ... TO ALL EXCEPT accountant, john@localhost`
:::note
If there are no row policies defined for a table then any user can `SELECT` all the row from the table. Defining one or more row policies for the table makes the access to the table depending on the row policies no matter if those row policies are defined for the current user or not. For example, the following policy
If there are no row policies defined for a table, then any user can `SELECT` all the rows from the table. Defining one or more row policies for the table makes access to the table dependent on the row policies, no matter if those row policies are defined for the current user or not. For example, the following policy:
`CREATE ROW POLICY pol1 ON mydb.table1 USING b=1 TO mira, peter`
forbids the users `mira` and `peter` to see the rows with `b != 1`, and any non-mentioned user (e.g., the user `paul`) will see no rows from `mydb.table1` at all.
forbids the users `mira` and `peter` from seeing the rows with `b != 1`, and any non-mentioned user (e.g., the user `paul`) will see no rows from `mydb.table1` at all.
If that's not desirable it can't be fixed by adding one more row policy, like the following:
If that's not desirable, it can be fixed by adding one more row policy, like the following:
`CREATE ROW POLICY pol2 ON mydb.table1 USING 1 TO ALL EXCEPT mira, peter`
:::
## AS Clause
It's allowed to have more than one policy enabled on the same table for the same user at the one time. So we need a way to combine the conditions from multiple policies.
It's allowed to have more than one policy enabled on the same table for the same user at one time. So we need a way to combine the conditions from multiple policies.
By default policies are combined using the boolean `OR` operator. For example, the following policies
By default, policies are combined using the boolean `OR` operator. For example, the following policies:
``` sql
CREATE ROW POLICY pol1 ON mydb.table1 USING b=1 TO mira, peter
CREATE ROW POLICY pol2 ON mydb.table1 USING c=2 TO peter, antonio
```
enables the user `peter` to see rows with either `b=1` or `c=2`.
enable the user `peter` to see rows with either `b=1` or `c=2`.
The `AS` clause specifies how policies should be combined with other policies. Policies can be either permissive or restrictive. By default policies are permissive, which means they are combined using the boolean `OR` operator.
The `AS` clause specifies how policies should be combined with other policies. Policies can be either permissive or restrictive. By default, policies are permissive, which means they are combined using the boolean `OR` operator.
A policy can be defined as restrictive as an alternative. Restrictive policies are combined using the boolean `AND` operator.
@ -68,25 +68,25 @@ row_is_visible = (one or more of the permissive policies' conditions are non-zer
(all of the restrictive policies's conditions are non-zero)
```
For example, the following policies
For example, the following policies:
``` sql
CREATE ROW POLICY pol1 ON mydb.table1 USING b=1 TO mira, peter
CREATE ROW POLICY pol2 ON mydb.table1 USING c=2 AS RESTRICTIVE TO peter, antonio
```
enables the user `peter` to see rows only if both `b=1` AND `c=2`.
enable the user `peter` to see rows only if both `b=1` AND `c=2`.
Database policies are combined with table policies.
For example, the following policies
For example, the following policies:
``` sql
CREATE ROW POLICY pol1 ON mydb.* USING b=1 TO mira, peter
CREATE ROW POLICY pol2 ON mydb.table1 USING c=2 AS RESTRICTIVE TO peter, antonio
```
enables the user `peter` to see table1 rows only if both `b=1` AND `c=2`, although
enable the user `peter` to see table1 rows only if both `b=1` AND `c=2`, although
any other table in mydb would have only `b=1` policy applied for the user.

View File

@ -10,7 +10,7 @@ title: The Lightweight DELETE Statement
The lightweight `DELETE` statement removes rows from the table `[db.]table` that match the expression `expr`. It is only available for the *MergeTree table engine family.
``` sql
DELETE FROM [db.]table [ON CLUSTER cluster] WHERE expr;
DELETE FROM [db.]table [ON CLUSTER cluster] [IN PARTITION partition_expr] WHERE expr;
```
It is called "lightweight `DELETE`" to contrast it to the [ALTER table DELETE](/en/sql-reference/statements/alter/delete) command, which is a heavyweight process.

View File

@ -6,35 +6,37 @@ sidebar_label: iceberg
# iceberg Table Function
Provides a read-only table-like interface to Apache [Iceberg](https://iceberg.apache.org/) tables in Amazon S3.
Provides a read-only table-like interface to Apache [Iceberg](https://iceberg.apache.org/) tables in Amazon S3, Azure or locally stored.
## Syntax
``` sql
iceberg(url [,aws_access_key_id, aws_secret_access_key] [,format] [,structure])
icebergS3(url [, NOSIGN | access_key_id, secret_access_key, [session_token]] [,format] [,compression_method])
icebergS3(named_collection[, option=value [,..]])
icebergAzure(connection_string|storage_account_url, container_name, blobpath, [,account_name], [,account_key] [,format] [,compression_method])
icebergAzure(named_collection[, option=value [,..]])
icebergLocal(path_to_table, [,format] [,compression_method])
icebergLocal(named_collection[, option=value [,..]])
```
## Arguments
- `url` — Bucket url with the path to an existing Iceberg table in S3.
- `aws_access_key_id`, `aws_secret_access_key` - Long-term credentials for the [AWS](https://aws.amazon.com/) account user. You can use these to authenticate your requests. These parameters are optional. If credentials are not specified, they are used from the ClickHouse configuration. For more information see [Using S3 for Data Storage](/docs/en/engines/table-engines/mergetree-family/mergetree.md/#table_engine-mergetree-s3).
- `format` — The [format](/docs/en/interfaces/formats.md/#formats) of the file. By default `Parquet` is used.
- `structure` — Structure of the table. Format `'column1_name column1_type, column2_name column2_type, ...'`.
Engine parameters can be specified using [Named Collections](/docs/en/operations/named-collections.md).
Description of the arguments coincides with description of arguments in table functions `s3`, `azureBlobStorage` and `file` correspondingly.
`format` stands for the format of data files in the Iceberg table.
**Returned value**
A table with the specified structure for reading data in the specified Iceberg table in S3.
A table with the specified structure for reading data in the specified Iceberg table.
**Example**
```sql
SELECT * FROM iceberg('http://test.s3.amazonaws.com/clickhouse-bucket/test_table', 'test', 'test')
SELECT * FROM icebergS3('http://test.s3.amazonaws.com/clickhouse-bucket/test_table', 'test', 'test')
```
:::important
ClickHouse currently supports reading v1 (v2 support is coming soon!) of the Iceberg format via the `iceberg` table function and `Iceberg` table engine.
ClickHouse currently supports reading v1 and v2 of the Iceberg format via the `icebergS3`, `icebergAzure` and `icebergLocal` table functions and `IcebergS3`, `icebergAzure` ans `icebergLocal` table engines.
:::
## Defining a named collection
@ -56,10 +58,14 @@ Here is an example of configuring a named collection for storing the URL and cre
```
```sql
SELECT * FROM iceberg(iceberg_conf, filename = 'test_table')
DESCRIBE iceberg(iceberg_conf, filename = 'test_table')
SELECT * FROM icebergS3(iceberg_conf, filename = 'test_table')
DESCRIBE icebergS3(iceberg_conf, filename = 'test_table')
```
**Aliases**
Table function `iceberg` is an alias to `icebergS3` now.
**See Also**
- [Iceberg engine](/docs/en/engines/table-engines/integrations/iceberg.md)

View File

@ -54,6 +54,7 @@ Character `|` inside patterns is used to specify failover addresses. They are it
- `_file` — Resource name of the `URL`. Type: `LowCardinalty(String)`.
- `_size` — Size of the resource in bytes. Type: `Nullable(UInt64)`. If the size is unknown, the value is `NULL`.
- `_time` — Last modified time of the file. Type: `Nullable(DateTime)`. If the time is unknown, the value is `NULL`.
- `_headers` - HTTP response headers. Type: `Map(LowCardinality(String), LowCardinality(String))`.
## Hive-style partitioning {#hive-style-partitioning}

View File

@ -50,7 +50,7 @@ Connection: Close
Content-Type: text/tab-separated-values; charset=UTF-8
X-ClickHouse-Server-Display-Name: clickhouse.ru-central1.internal
X-ClickHouse-Query-Id: 5abe861c-239c-467f-b955-8a201abb8b7f
X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0","elapsed_ns":"662334"}
X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0","elapsed_ns":"662334", "real_time_microseconds":"0"}
1
```
@ -367,7 +367,7 @@ $ curl -v 'http://localhost:8123/predefined_query'
< X-ClickHouse-Format: Template
< X-ClickHouse-Timezone: Asia/Shanghai
< Keep-Alive: timeout=10
< X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0"}
< X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0", "elapsed_ns":"662334", "real_time_microseconds":"0"}
<
# HELP "Query" "Number of executing queries"
# TYPE "Query" counter
@ -601,7 +601,7 @@ $ curl -v -H 'XXX:xxx' 'http://localhost:8123/get_config_static_handler'
< Content-Type: text/plain; charset=UTF-8
< Transfer-Encoding: chunked
< Keep-Alive: timeout=10
< X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0","elapsed_ns":"662334"}
< X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0","elapsed_ns":"662334", "real_time_microseconds":"0"}
<
* Connection #0 to host localhost left intact
<html ng-app="SMI2"><head><base href="http://ui.tabix.io/"></head><body><div ui-view="" class="content-ui"></div><script src="http://loader.tabix.io/master.js"></script></body></html>%
@ -659,7 +659,7 @@ $ curl -vv -H 'XXX:xxx' 'http://localhost:8123/get_absolute_path_static_handler'
< Content-Type: text/html; charset=UTF-8
< Transfer-Encoding: chunked
< Keep-Alive: timeout=10
< X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0","elapsed_ns":"662334"}
< X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0","elapsed_ns":"662334", "real_time_microseconds":"0"}
<
<html><body>Absolute Path File</body></html>
* Connection #0 to host localhost left intact
@ -678,7 +678,7 @@ $ curl -vv -H 'XXX:xxx' 'http://localhost:8123/get_relative_path_static_handler'
< Content-Type: text/html; charset=UTF-8
< Transfer-Encoding: chunked
< Keep-Alive: timeout=10
< X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0","elapsed_ns":"662334"}
< X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0","elapsed_ns":"662334", "real_time_microseconds":"0"}
<
<html><body>Relative Path File</body></html>
* Connection #0 to host localhost left intact

View File

@ -280,7 +280,7 @@ SYSTEM START REPLICATION QUEUES [ON CLUSTER cluster_name] [[db.]replicated_merge
Ждет когда таблица семейства `ReplicatedMergeTree` будет синхронизирована с другими репликами в кластере, но не более `receive_timeout` секунд:
``` sql
SYSTEM SYNC REPLICA [db.]replicated_merge_tree_family_table_name [STRICT | LIGHTWEIGHT [FROM 'srcReplica1'[, 'srcReplica2'[, ...]]] | PULL]
SYSTEM SYNC REPLICA [ON CLUSTER cluster_name] [db.]replicated_merge_tree_family_table_name [STRICT | LIGHTWEIGHT [FROM 'srcReplica1'[, 'srcReplica2'[, ...]]] | PULL]
```
После выполнения этого запроса таблица `[db.]replicated_merge_tree_family_table_name` загружает команды из общего реплицированного лога в свою собственную очередь репликации. Затем запрос ждет, пока реплика не обработает все загруженные команды. Поддерживаются следующие модификаторы:

View File

@ -53,7 +53,7 @@ Connection: Close
Content-Type: text/tab-separated-values; charset=UTF-8
X-ClickHouse-Server-Display-Name: clickhouse.ru-central1.internal
X-ClickHouse-Query-Id: 5abe861c-239c-467f-b955-8a201abb8b7f
X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0","elapsed_ns":"662334"}
X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0","elapsed_ns":"662334","real_time_microseconds":"0"}
1
```
@ -363,7 +363,7 @@ $ curl -v 'http://localhost:8123/predefined_query'
< X-ClickHouse-Format: Template
< X-ClickHouse-Timezone: Asia/Shanghai
< Keep-Alive: timeout=10
< X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0","elapsed_ns":"662334"}
< X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0","elapsed_ns":"662334", "real_time_microseconds":"0"}
<
# HELP "Query" "Number of executing queries"
# TYPE "Query" counter
@ -524,7 +524,7 @@ $ curl -vv -H 'XXX:xxx' 'http://localhost:8123/hi'
< Content-Type: text/html; charset=UTF-8
< Transfer-Encoding: chunked
< Keep-Alive: timeout=10
< X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0","elapsed_ns":"662334"}
< X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0","elapsed_ns":"662334", "real_time_microseconds":"0"}
<
* Connection #0 to host localhost left intact
Say Hi!%
@ -564,7 +564,7 @@ $ curl -v -H 'XXX:xxx' 'http://localhost:8123/get_config_static_handler'
< Content-Type: text/plain; charset=UTF-8
< Transfer-Encoding: chunked
< Keep-Alive: timeout=10
< X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0","elapsed_ns":"662334"}
< X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0","elapsed_ns":"662334","real_time_microseconds":"0"}
<
* Connection #0 to host localhost left intact
<html ng-app="SMI2"><head><base href="http://ui.tabix.io/"></head><body><div ui-view="" class="content-ui"></div><script src="http://loader.tabix.io/master.js"></script></body></html>%
@ -616,7 +616,7 @@ $ curl -vv -H 'XXX:xxx' 'http://localhost:8123/get_absolute_path_static_handler'
< Content-Type: text/html; charset=UTF-8
< Transfer-Encoding: chunked
< Keep-Alive: timeout=10
< X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0","elapsed_ns":"662334"}
< X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0","elapsed_ns":"662334","real_time_microseconds":"0"}
<
<html><body>Absolute Path File</body></html>
* Connection #0 to host localhost left intact
@ -635,7 +635,7 @@ $ curl -vv -H 'XXX:xxx' 'http://localhost:8123/get_relative_path_static_handler'
< Content-Type: text/html; charset=UTF-8
< Transfer-Encoding: chunked
< Keep-Alive: timeout=10
< X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0","elapsed_ns":"662334"}
< X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0","elapsed_ns":"662334","real_time_microseconds":"0"}
<
<html><body>Relative Path File</body></html>
* Connection #0 to host localhost left intact

View File

@ -1157,7 +1157,7 @@ SELECT toModifiedJulianDayOrNull('2020-01-01');
## fromModifiedJulianDay {#frommodifiedjulianday}
将 [Modified Julian Day](https://en.wikipedia.org/wiki/Julian_day#Variants) 数字转换为 `YYYY-MM-DD` 文本格式的 [Proleptic Gregorian calendar](https://en.wikipedia.org/wiki/Proleptic_Gregorian_calendar) 日期。该函数支持从 `-678941``2973119` 的天数(分别代表 0000-01-01 和 9999-12-31。如果天数超出支持范围则会引发异常。
将 [Modified Julian Day](https://en.wikipedia.org/wiki/Julian_day#Variants) 数字转换为 `YYYY-MM-DD` 文本格式的 [Proleptic Gregorian calendar](https://en.wikipedia.org/wiki/Proleptic_Gregorian_calendar) 日期。该函数支持从 `-678941``2973483` 的天数(分别代表 0000-01-01 和 9999-12-31。如果天数超出支持范围则会引发异常。
**语法**

View File

@ -677,4 +677,122 @@ void GetAllChildrenNumberCommand::execute(const ASTKeeperQuery * query, KeeperCl
std::cout << totalNumChildren << "\n";
}
namespace
{
class CPMVOperation
{
constexpr static UInt64 kTryLimit = 1000;
public:
CPMVOperation(String src_, String dest_, bool remove_src_, KeeperClient * client_)
: src(std::move(src_)), dest(std::move(dest_)), remove_src(remove_src_), client(client_)
{
}
bool isTryLimitReached() const { return failed_tries_count >= kTryLimit; }
bool isCompleted() const { return is_completed; }
void perform()
{
Coordination::Stat src_stat;
String data = client->zookeeper->get(src, &src_stat);
Coordination::Requests ops{
zkutil::makeCheckRequest(src, src_stat.version),
zkutil::makeCreateRequest(dest, data, zkutil::CreateMode::Persistent), // Do we need to copy ACLs here?
};
if (remove_src)
ops.push_back(zkutil::makeRemoveRequest(src, src_stat.version));
Coordination::Responses responses;
auto code = client->zookeeper->tryMulti(ops, responses);
switch (code)
{
case Coordination::Error::ZOK: {
is_completed = true;
return;
}
case Coordination::Error::ZBADVERSION: {
++failed_tries_count;
if (isTryLimitReached())
zkutil::KeeperMultiException::check(code, ops, responses);
return;
}
default:
zkutil::KeeperMultiException::check(code, ops, responses);
}
throw Exception(ErrorCodes::LOGICAL_ERROR, "Unreachable");
}
private:
String src;
String dest;
bool remove_src = false;
KeeperClient * client = nullptr;
bool is_completed = false;
uint64_t failed_tries_count = 0;
};
}
bool CPCommand::parse(IParser::Pos & pos, std::shared_ptr<ASTKeeperQuery> & node, [[maybe_unused]] Expected & expected) const
{
String src_path;
if (!parseKeeperPath(pos, expected, src_path))
return false;
node->args.push_back(std::move(src_path));
String to_path;
if (!parseKeeperPath(pos, expected, to_path))
return false;
node->args.push_back(std::move(to_path));
return true;
}
void CPCommand::execute(const ASTKeeperQuery * query, KeeperClient * client) const
{
auto src = client->getAbsolutePath(query->args[0].safeGet<String>());
auto dest = client->getAbsolutePath(query->args[1].safeGet<String>());
CPMVOperation operation(std::move(src), std::move(dest), /*remove_src_=*/false, /*client_=*/client);
while (!operation.isTryLimitReached() && !operation.isCompleted())
operation.perform();
}
bool MVCommand::parse(IParser::Pos & pos, std::shared_ptr<ASTKeeperQuery> & node, Expected & expected) const
{
String src_path;
if (!parseKeeperPath(pos, expected, src_path))
return false;
node->args.push_back(std::move(src_path));
String to_path;
if (!parseKeeperPath(pos, expected, to_path))
return false;
node->args.push_back(std::move(to_path));
return true;
}
void MVCommand::execute(const ASTKeeperQuery * query, KeeperClient * client) const
{
auto src = client->getAbsolutePath(query->args[0].safeGet<String>());
auto dest = client->getAbsolutePath(query->args[1].safeGet<String>());
CPMVOperation operation(std::move(src), std::move(dest), /*remove_src_=*/true, /*client_=*/client);
while (!operation.isTryLimitReached() && !operation.isCompleted())
operation.perform();
}
}

View File

@ -266,4 +266,32 @@ class GetAllChildrenNumberCommand : public IKeeperClientCommand
}
};
class CPCommand : public IKeeperClientCommand
{
String getName() const override { return "cp"; }
bool parse(IParser::Pos & pos, std::shared_ptr<ASTKeeperQuery> & node, Expected & expected) const override;
void execute(const ASTKeeperQuery * query, KeeperClient * client) const override;
String getHelpMessage() const override
{
return "{} <src> <dest> -- Copies 'src' node to 'dest' path.";
}
};
class MVCommand : public IKeeperClientCommand
{
String getName() const override { return "mv"; }
bool parse(IParser::Pos & pos, std::shared_ptr<ASTKeeperQuery> & node, Expected & expected) const override;
void execute(const ASTKeeperQuery * query, KeeperClient * client) const override;
String getHelpMessage() const override
{
return "{} <src> <dest> -- Moves 'src' node to the 'dest' path.";
}
};
}

View File

@ -212,6 +212,8 @@ void KeeperClient::initialize(Poco::Util::Application & /* self */)
std::make_shared<FourLetterWordCommand>(),
std::make_shared<GetDirectChildrenNumberCommand>(),
std::make_shared<GetAllChildrenNumberCommand>(),
std::make_shared<CPCommand>(),
std::make_shared<MVCommand>(),
});
String home_path;

View File

@ -978,6 +978,7 @@ try
/** Explicitly destroy Context. It is more convenient than in destructor of Server, because logger is still available.
* At this moment, no one could own shared part of Context.
*/
global_context->resetSharedContext();
global_context.reset();
shared_context.reset();
LOG_DEBUG(log, "Destroyed global context.");

View File

@ -68,7 +68,10 @@ public:
if (data().isEqualTo(to.data()))
counter += to.counter;
else if (!data().has() || counter < to.counter)
{
data().set(to.data(), arena);
counter = to.counter - counter;
}
else
counter -= to.counter;
}

View File

@ -209,7 +209,7 @@ std::map<std::pair<TypeIndex, String>, NodeToSubcolumnTransformer> node_transfor
},
};
std::tuple<FunctionNode *, ColumnNode *, TableNode *> getTypedNodesForOptimization(const QueryTreeNodePtr & node)
std::tuple<FunctionNode *, ColumnNode *, TableNode *> getTypedNodesForOptimization(const QueryTreeNodePtr & node, const ContextPtr & context)
{
auto * function_node = node->as<FunctionNode>();
if (!function_node)
@ -232,6 +232,12 @@ std::tuple<FunctionNode *, ColumnNode *, TableNode *> getTypedNodesForOptimizati
const auto & storage_snapshot = table_node->getStorageSnapshot();
auto column = first_argument_column_node->getColumn();
/// If view source is set we cannot optimize because it doesn't support moving functions to subcolumns.
/// The storage is replaced to the view source but it happens only after building a query tree and applying passes.
auto view_source = context->getViewSource();
if (view_source && view_source->getStorageID().getFullNameNotQuoted() == storage->getStorageID().getFullNameNotQuoted())
return {};
if (!storage->supportsOptimizationToSubcolumns() || storage->isVirtualColumn(column.name, storage_snapshot->metadata))
return {};
@ -266,7 +272,7 @@ public:
return;
}
auto [function_node, first_argument_node, table_node] = getTypedNodesForOptimization(node);
auto [function_node, first_argument_node, table_node] = getTypedNodesForOptimization(node, getContext());
if (function_node && first_argument_node && table_node)
{
enterImpl(*function_node, *first_argument_node, *table_node);
@ -416,7 +422,7 @@ public:
if (!getSettings().optimize_functions_to_subcolumns)
return;
auto [function_node, first_argument_column_node, table_node] = getTypedNodesForOptimization(node);
auto [function_node, first_argument_column_node, table_node] = getTypedNodesForOptimization(node, getContext());
if (!function_node || !first_argument_column_node || !table_node)
return;

View File

@ -100,6 +100,7 @@ protected:
auto buf = BuilderRWBufferFromHTTP(getPingURI())
.withConnectionGroup(HTTPConnectionGroupType::STORAGE)
.withTimeouts(getHTTPTimeouts())
.withSettings(getContext()->getReadSettings())
.create(credentials);
return checkString(PING_OK_ANSWER, *buf);
@ -206,6 +207,7 @@ protected:
.withConnectionGroup(HTTPConnectionGroupType::STORAGE)
.withMethod(Poco::Net::HTTPRequest::HTTP_POST)
.withTimeouts(getHTTPTimeouts())
.withSettings(getContext()->getReadSettings())
.create(credentials);
bool res = false;
@ -232,6 +234,7 @@ protected:
.withConnectionGroup(HTTPConnectionGroupType::STORAGE)
.withMethod(Poco::Net::HTTPRequest::HTTP_POST)
.withTimeouts(getHTTPTimeouts())
.withSettings(getContext()->getReadSettings())
.create(credentials);
std::string character;

View File

@ -111,6 +111,7 @@ add_headers_and_sources(dbms Storages/ObjectStorage)
add_headers_and_sources(dbms Storages/ObjectStorage/Azure)
add_headers_and_sources(dbms Storages/ObjectStorage/S3)
add_headers_and_sources(dbms Storages/ObjectStorage/HDFS)
add_headers_and_sources(dbms Storages/ObjectStorage/Local)
add_headers_and_sources(dbms Storages/ObjectStorage/DataLakes)
add_headers_and_sources(dbms Common/NamedCollections)

View File

@ -34,6 +34,7 @@
#include <Parsers/Access/ASTCreateUserQuery.h>
#include <Parsers/Access/ASTAuthenticationData.h>
#include <Parsers/ASTDropQuery.h>
#include <Parsers/ASTExplainQuery.h>
#include <Parsers/ASTSelectQuery.h>
#include <Parsers/ASTSetQuery.h>
#include <Parsers/ASTUseQuery.h>
@ -2111,6 +2112,15 @@ MultiQueryProcessingStage ClientBase::analyzeMultiQueryText(
// - Other formats (e.g. FORMAT CSV) are arbitrarily more complex and tricky to parse. For example, we may be unable to distinguish if the semicolon
// is part of the data or ends the statement. In this case, we simply assume that the end of the INSERT statement is determined by \n\n (two newlines).
auto * insert_ast = parsed_query->as<ASTInsertQuery>();
// We also consider the INSERT query in EXPLAIN queries (same as normal INSERT queries)
if (!insert_ast)
{
auto * explain_ast = parsed_query->as<ASTExplainQuery>();
if (explain_ast && explain_ast->getExplainedQuery())
{
insert_ast = explain_ast->getExplainedQuery()->as<ASTInsertQuery>();
}
}
const char * query_to_execute_end = this_query_end;
if (insert_ast && insert_ast->data)
{

View File

@ -300,7 +300,7 @@ void ColumnDynamic::get(size_t n, Field & res) const
auto value_data = shared_variant.getDataAt(variant_col.offsetAt(n));
ReadBufferFromMemory buf(value_data.data, value_data.size);
auto type = decodeDataType(buf);
getVariantSerialization(type)->deserializeBinary(res, buf, getFormatSettings());
type->getDefaultSerialization()->deserializeBinary(res, buf, getFormatSettings());
}
@ -736,8 +736,7 @@ StringRef ColumnDynamic::serializeValueIntoArena(size_t n, Arena & arena, const
{
const auto & variant_type = assert_cast<const DataTypeVariant &>(*variant_info.variant_type).getVariant(discr);
encodeDataType(variant_type, buf);
getVariantSerialization(variant_type, variant_info.variant_names[discr])
->serializeBinary(variant_col.getVariantByGlobalDiscriminator(discr), variant_col.offsetAt(n), buf, getFormatSettings());
variant_type->getDefaultSerialization()->serializeBinary(variant_col.getVariantByGlobalDiscriminator(discr), variant_col.offsetAt(n), buf, getFormatSettings());
type_and_value = buf.str();
}
@ -870,7 +869,7 @@ int ColumnDynamic::doCompareAt(size_t n, size_t m, const IColumn & rhs, int nan_
/// We have both values serialized in binary format, so we need to
/// create temporary column, insert both values into it and compare.
auto tmp_column = left_data_type->createColumn();
const auto & serialization = getVariantSerialization(left_data_type, left_data_type_name);
const auto & serialization = left_data_type->getDefaultSerialization();
serialization->deserializeBinary(*tmp_column, buf_left, getFormatSettings());
serialization->deserializeBinary(*tmp_column, buf_right, getFormatSettings());
return tmp_column->compareAt(0, 1, *tmp_column, nan_direction_hint);
@ -892,7 +891,7 @@ int ColumnDynamic::doCompareAt(size_t n, size_t m, const IColumn & rhs, int nan_
/// We have left value serialized in binary format, we need to
/// create temporary column, insert the value into it and compare.
auto tmp_column = left_data_type->createColumn();
getVariantSerialization(left_data_type, left_data_type_name)->deserializeBinary(*tmp_column, buf_left, getFormatSettings());
left_data_type->getDefaultSerialization()->deserializeBinary(*tmp_column, buf_left, getFormatSettings());
return tmp_column->compareAt(0, right_variant.offsetAt(m), right_variant.getVariantByGlobalDiscriminator(right_discr), nan_direction_hint);
}
/// Check if only right value is in shared data.
@ -912,7 +911,7 @@ int ColumnDynamic::doCompareAt(size_t n, size_t m, const IColumn & rhs, int nan_
/// We have right value serialized in binary format, we need to
/// create temporary column, insert the value into it and compare.
auto tmp_column = right_data_type->createColumn();
getVariantSerialization(right_data_type, right_data_type_name)->deserializeBinary(*tmp_column, buf_right, getFormatSettings());
right_data_type->getDefaultSerialization()->deserializeBinary(*tmp_column, buf_right, getFormatSettings());
return left_variant.getVariantByGlobalDiscriminator(left_discr).compareAt(left_variant.offsetAt(n), 0, *tmp_column, nan_direction_hint);
}
/// Otherwise both values are regular variants.

View File

@ -414,7 +414,7 @@ public:
/// Insert value into shared variant. Also updates Variant discriminators and offsets.
void insertValueIntoSharedVariant(const IColumn & src, const DataTypePtr & type, const String & type_name, size_t n);
const SerializationPtr & getVariantSerialization(const DataTypePtr & variant_type, const String & variant_name) const
const SerializationPtr & getVariantSerialization(const DataTypePtr & variant_type, const String & variant_name)
{
/// Get serialization for provided data type.
/// To avoid calling type->getDefaultSerialization() every time we use simple cache with max size.
@ -428,7 +428,7 @@ public:
return serialization_cache.emplace(variant_name, variant_type->getDefaultSerialization()).first->second;
}
const SerializationPtr & getVariantSerialization(const DataTypePtr & variant_type) const { return getVariantSerialization(variant_type, variant_type->getName()); }
const SerializationPtr & getVariantSerialization(const DataTypePtr & variant_type) { return getVariantSerialization(variant_type, variant_type->getName()); }
private:
void createVariantInfo(const DataTypePtr & variant_type);
@ -473,7 +473,7 @@ private:
/// We can use serializations of different data types to serialize values into shared variant.
/// To avoid creating the same serialization multiple times, use simple cache.
static const size_t SERIALIZATION_CACHE_MAX_SIZE = 256;
mutable std::unordered_map<String, SerializationPtr> serialization_cache;
std::unordered_map<String, SerializationPtr> serialization_cache;
};
void extendVariantColumn(

View File

@ -75,9 +75,9 @@
M(GlobalThread, "Number of threads in global thread pool.") \
M(GlobalThreadActive, "Number of threads in global thread pool running a task.") \
M(GlobalThreadScheduled, "Number of queued or active jobs in global thread pool.") \
M(LocalThread, "Number of threads in local thread pools. The threads in local thread pools are taken from the global thread pool.") \
M(LocalThreadActive, "Number of threads in local thread pools running a task.") \
M(LocalThreadScheduled, "Number of queued or active jobs in local thread pools.") \
M(LocalThread, "Obsolete. Number of threads in local thread pools. The threads in local thread pools are taken from the global thread pool.") \
M(LocalThreadActive, "Obsolete. Number of threads in local thread pools running a task.") \
M(LocalThreadScheduled, "Obsolete. Number of queued or active jobs in local thread pools.") \
M(MergeTreeDataSelectExecutorThreads, "Number of threads in the MergeTreeDataSelectExecutor thread pool.") \
M(MergeTreeDataSelectExecutorThreadsActive, "Number of threads in the MergeTreeDataSelectExecutor thread pool running a task.") \
M(MergeTreeDataSelectExecutorThreadsScheduled, "Number of queued or active jobs in the MergeTreeDataSelectExecutor thread pool.") \
@ -292,6 +292,9 @@
M(DistrCacheWriteRequests, "Number of executed Write requests to Distributed Cache") \
M(DistrCacheServerConnections, "Number of open connections to ClickHouse server from Distributed Cache") \
\
M(SchedulerIOReadScheduled, "Number of IO reads are being scheduled currently") \
M(SchedulerIOWriteScheduled, "Number of IO writes are being scheduled currently") \
\
M(StorageConnectionsStored, "Total count of sessions stored in the session pool for storages") \
M(StorageConnectionsTotal, "Total count of all sessions: stored in the pool and actively used right now for storages") \
\

View File

@ -113,6 +113,56 @@ std::string_view CurrentThread::getQueryId()
return current_thread->getQueryId();
}
void CurrentThread::attachReadResource(ResourceLink link)
{
if (unlikely(!current_thread))
return;
if (current_thread->read_resource_link)
throw Exception(ErrorCodes::LOGICAL_ERROR, "Thread #{} has been already attached to read resource", std::to_string(getThreadId()));
current_thread->read_resource_link = link;
}
void CurrentThread::detachReadResource()
{
if (unlikely(!current_thread))
return;
if (!current_thread->read_resource_link)
throw Exception(ErrorCodes::LOGICAL_ERROR, "Thread #{} has not been attached to read resource", std::to_string(getThreadId()));
current_thread->read_resource_link.reset();
}
ResourceLink CurrentThread::getReadResourceLink()
{
if (unlikely(!current_thread))
return {};
return current_thread->read_resource_link;
}
void CurrentThread::attachWriteResource(ResourceLink link)
{
if (unlikely(!current_thread))
return;
if (current_thread->write_resource_link)
throw Exception(ErrorCodes::LOGICAL_ERROR, "Thread #{} has been already attached to write resource", std::to_string(getThreadId()));
current_thread->write_resource_link = link;
}
void CurrentThread::detachWriteResource()
{
if (unlikely(!current_thread))
return;
if (!current_thread->write_resource_link)
throw Exception(ErrorCodes::LOGICAL_ERROR, "Thread #{} has not been attached to write resource", std::to_string(getThreadId()));
current_thread->write_resource_link.reset();
}
ResourceLink CurrentThread::getWriteResourceLink()
{
if (unlikely(!current_thread))
return {};
return current_thread->write_resource_link;
}
MemoryTracker * CurrentThread::getUserMemoryTracker()
{
if (unlikely(!current_thread))

View File

@ -2,6 +2,7 @@
#include <Interpreters/Context_fwd.h>
#include <Common/ThreadStatus.h>
#include <Common/Scheduler/ResourceLink.h>
#include <memory>
#include <string>
@ -23,7 +24,6 @@ class QueryStatus;
struct Progress;
class InternalTextLogsQueue;
/** Collection of static methods to work with thread-local objects.
* Allows to attach and detach query/process (thread group) to a thread
* (to calculate query-related metrics and to allow to obtain query-related data from a thread).
@ -92,6 +92,14 @@ public:
static std::string_view getQueryId();
// For IO Scheduling
static void attachReadResource(ResourceLink link);
static void detachReadResource();
static ResourceLink getReadResourceLink();
static void attachWriteResource(ResourceLink link);
static void detachWriteResource();
static ResourceLink getWriteResourceLink();
/// Initializes query with current thread as master thread in constructor, and detaches it in destructor
struct QueryScope : private boost::noncopyable
{
@ -102,6 +110,39 @@ public:
void logPeakMemoryUsage();
bool log_peak_memory_usage_in_destructor = true;
};
/// Scoped attach/detach of IO resource links
struct IOScope : private boost::noncopyable
{
explicit IOScope(ResourceLink read_resource_link, ResourceLink write_resource_link)
{
if (read_resource_link)
{
attachReadResource(read_resource_link);
read_attached = true;
}
if (write_resource_link)
{
attachWriteResource(write_resource_link);
write_attached = true;
}
}
explicit IOScope(const IOSchedulingSettings & settings)
: IOScope(settings.read_resource_link, settings.write_resource_link)
{}
~IOScope()
{
if (read_attached)
detachReadResource();
if (write_attached)
detachWriteResource();
}
bool read_attached = false;
bool write_attached = false;
};
};
}

View File

@ -2,6 +2,7 @@
#include <Common/HostResolvePool.h>
#include <Common/ProfileEvents.h>
#include <Common/Stopwatch.h>
#include <Common/CurrentMetrics.h>
#include <Common/logger_useful.h>
#include <Common/Exception.h>
@ -9,6 +10,7 @@
#include <Common/ProxyConfiguration.h>
#include <Common/MemoryTrackerSwitcher.h>
#include <Common/SipHash.h>
#include <Common/Scheduler/ResourceGuard.h>
#include <Common/proxyConfigurationToPocoProxyConfig.h>
#include <Poco/Net/HTTPChunkedStream.h>
@ -236,6 +238,59 @@ public:
};
// Session data hooks implementation for integration with resource scheduler.
// Hooks are created per every request-response pair and are registered/unregistered in HTTP session.
// * `atStart()` send resource request to the scheduler every time HTTP session is going to send or receive
// data to/from socket. `start()` waits for the scheduler confirmation. This way scheduler might
// throttle and/or schedule socket data streams.
// * `atFinish()` hook is called on successful socket read/write operation.
// It informs the scheduler that operation is complete, which allows the scheduler to control the total
// amount of in-flight bytes and/or operations.
// * `atFail()` hook is called on failure of socket operation. The purpose is to correct the amount of bytes
// passed through the scheduler queue to ensure fair bandwidth allocation even in presence of errors.
struct ResourceGuardSessionDataHooks : public Poco::Net::IHTTPSessionDataHooks
{
ResourceGuardSessionDataHooks(ResourceLink link_, const ResourceGuard::Metrics * metrics, LoggerPtr log_, const String & method, const String & uri)
: link(link_)
, log(log_)
, http_request(method + " " + uri)
{
request.metrics = metrics;
chassert(link);
}
~ResourceGuardSessionDataHooks() override
{
request.assertFinished(); // Never destruct with an active request
}
void atStart(int bytes) override
{
Stopwatch timer;
request.enqueue(bytes, link);
request.wait();
timer.stop();
if (timer.elapsedMilliseconds() >= 5000)
LOG_INFO(log, "Resource request took too long to finish: {} ms for {}", timer.elapsedMilliseconds(), http_request);
}
void atFinish(int bytes) override
{
request.finish(bytes, link);
}
void atFail() override
{
request.finish(0, link);
}
ResourceLink link;
ResourceGuard::Request request;
LoggerPtr log;
String http_request;
};
// EndpointConnectionPool manage connections to the endpoint
// Features:
// - it uses HostResolver for address selecting. See Common/HostResolver.h for more info.
@ -246,8 +301,6 @@ public:
// - `Session::reconnect()` uses the pool as well
// - comprehensive sensors
// - session is reused according its inner state, automatically
template <class Session>
class EndpointConnectionPool : public std::enable_shared_from_this<EndpointConnectionPool<Session>>, public IExtendedPool
{
@ -337,6 +390,13 @@ private:
std::ostream & sendRequest(Poco::Net::HTTPRequest & request) override
{
auto idle = idleTime();
// Set data hooks for IO scheduling
if (ResourceLink link = CurrentThread::getReadResourceLink())
Session::setReceiveDataHooks(std::make_shared<ResourceGuardSessionDataHooks>(link, ResourceGuard::Metrics::getIORead(), log, request.getMethod(), request.getURI()));
if (ResourceLink link = CurrentThread::getWriteResourceLink())
Session::setSendDataHooks(std::make_shared<ResourceGuardSessionDataHooks>(link, ResourceGuard::Metrics::getIOWrite(), log, request.getMethod(), request.getURI()));
std::ostream & result = Session::sendRequest(request);
result.exceptions(std::ios::badbit);
@ -393,6 +453,8 @@ private:
}
}
response_stream = nullptr;
Session::setSendDataHooks();
Session::setReceiveDataHooks();
group->atConnectionDestroy();

View File

@ -86,6 +86,20 @@
M(NetworkReceiveBytes, "Total number of bytes received from network. Only ClickHouse-related network interaction is included, not by 3rd party libraries.") \
M(NetworkSendBytes, "Total number of bytes send to network. Only ClickHouse-related network interaction is included, not by 3rd party libraries.") \
\
M(GlobalThreadPoolExpansions, "Counts the total number of times new threads have been added to the global thread pool. This metric indicates the frequency of expansions in the global thread pool to accommodate increased processing demands.") \
M(GlobalThreadPoolShrinks, "Counts the total number of times the global thread pool has shrunk by removing threads. This occurs when the number of idle threads exceeds max_thread_pool_free_size, indicating adjustments in the global thread pool size in response to decreased thread utilization.") \
M(GlobalThreadPoolThreadCreationMicroseconds, "Total time spent waiting for new threads to start.") \
M(GlobalThreadPoolLockWaitMicroseconds, "Total time threads have spent waiting for locks in the global thread pool.") \
M(GlobalThreadPoolJobs, "Counts the number of jobs that have been pushed to the global thread pool.") \
M(GlobalThreadPoolJobWaitTimeMicroseconds, "Measures the elapsed time from when a job is scheduled in the thread pool to when it is picked up for execution by a worker thread. This metric helps identify delays in job processing, indicating the responsiveness of the thread pool to new tasks.") \
M(LocalThreadPoolExpansions, "Counts the total number of times threads have been borrowed from the global thread pool to expand local thread pools.") \
M(LocalThreadPoolShrinks, "Counts the total number of times threads have been returned to the global thread pool from local thread pools.") \
M(LocalThreadPoolThreadCreationMicroseconds, "Total time local thread pools have spent waiting to borrow a thread from the global pool.") \
M(LocalThreadPoolLockWaitMicroseconds, "Total time threads have spent waiting for locks in the local thread pools.") \
M(LocalThreadPoolJobs, "Counts the number of jobs that have been pushed to the local thread pools.") \
M(LocalThreadPoolBusyMicroseconds, "Total time threads have spent executing the actual work.") \
M(LocalThreadPoolJobWaitTimeMicroseconds, "Measures the elapsed time from when a job is scheduled in the thread pool to when it is picked up for execution by a worker thread. This metric helps identify delays in job processing, indicating the responsiveness of the thread pool to new tasks.") \
\
M(DiskS3GetRequestThrottlerCount, "Number of DiskS3 GET and SELECT requests passed through throttler.") \
M(DiskS3GetRequestThrottlerSleepMicroseconds, "Total time a query was sleeping to conform DiskS3 GET and SELECT request throttling.") \
M(DiskS3PutRequestThrottlerCount, "Number of DiskS3 PUT, COPY, POST and LIST requests passed through throttler.") \
@ -106,6 +120,13 @@
M(PartsWithAppliedMutationsOnFly, "Total number of parts for which there was any mutation applied on fly") \
M(MutationsAppliedOnFlyInAllParts, "The sum of number of applied mutations on-fly for part among all read parts") \
\
M(SchedulerIOReadRequests, "Resource requests passed through scheduler for IO reads.") \
M(SchedulerIOReadBytes, "Bytes passed through scheduler for IO reads.") \
M(SchedulerIOReadWaitMicroseconds, "Total time a query was waiting on resource requests for IO reads.") \
M(SchedulerIOWriteRequests, "Resource requests passed through scheduler for IO writes.") \
M(SchedulerIOWriteBytes, "Bytes passed through scheduler for IO writes.") \
M(SchedulerIOWriteWaitMicroseconds, "Total time a query was waiting on resource requests for IO writes.") \
\
M(QueryMaskingRulesMatch, "Number of times query masking rules was successfully matched.") \
\
M(ReplicatedPartFetches, "Number of times a data part was downloaded from replica of a ReplicatedMergeTree table.") \

View File

@ -34,13 +34,16 @@ bool ProgressIndication::updateProgress(const Progress & value)
void ProgressIndication::resetProgress()
{
watch.restart();
progress.reset();
show_progress_bar = false;
written_progress_chars = 0;
write_progress_on_update = false;
{
std::lock_guard lock(progress_mutex);
progress.reset();
show_progress_bar = false;
written_progress_chars = 0;
write_progress_on_update = false;
}
{
std::lock_guard lock(profile_events_mutex);
watch.restart();
cpu_usage_meter.reset(getElapsedNanoseconds());
hosts_data.clear();
}
@ -90,6 +93,8 @@ ProgressIndication::MemoryUsage ProgressIndication::getMemoryUsage() const
void ProgressIndication::writeFinalProgress()
{
std::lock_guard lock(progress_mutex);
if (progress.read_rows < 1000)
return;
@ -271,6 +276,8 @@ void ProgressIndication::writeProgress(WriteBufferFromFileDescriptor & message)
void ProgressIndication::clearProgressOutput(WriteBufferFromFileDescriptor & message)
{
std::lock_guard lock(progress_mutex);
if (written_progress_chars)
{
written_progress_chars = 0;

View File

@ -115,6 +115,8 @@ private:
/// It is possible concurrent access to the following:
/// - writeProgress() (class properties) (guarded with progress_mutex)
/// - hosts_data/cpu_usage_meter (guarded with profile_events_mutex)
///
/// It is also possible to have more races if query is cancelled, so that clearProgressOutput() is called concurrently
mutable std::mutex profile_events_mutex;
mutable std::mutex progress_mutex;

View File

@ -22,10 +22,13 @@ public:
{}
// Wrapper for `enqueueRequest()` that should be used to account for available resource budget
void enqueueRequestUsingBudget(ResourceRequest * request)
// Returns `estimated_cost` that should be passed later to `adjustBudget()`
[[ nodiscard ]] ResourceCost enqueueRequestUsingBudget(ResourceRequest * request)
{
request->cost = budget.ask(request->cost);
ResourceCost estimated_cost = request->cost;
request->cost = budget.ask(estimated_cost);
enqueueRequest(request);
return estimated_cost;
}
// Should be called to account for difference between real and estimated costs
@ -34,18 +37,6 @@ public:
budget.adjust(estimated_cost, real_cost);
}
// Adjust budget to account for extra consumption of `cost` resource units
void consumeBudget(ResourceCost cost)
{
adjustBudget(0, cost);
}
// Adjust budget to account for requested, but not consumed `cost` resource units
void accumulateBudget(ResourceCost cost)
{
adjustBudget(cost, 0);
}
/// Enqueue new request to be executed using underlying resource.
/// Should be called outside of scheduling subsystem, implementation must be thread-safe.
virtual void enqueueRequest(ResourceRequest * request) = 0;

View File

@ -232,12 +232,13 @@ struct ResourceTestManager : public ResourceTestBase
ResourceTestManager & t;
Guard(ResourceTestManager & t_, ResourceLink link_, ResourceCost cost)
: ResourceGuard(link_, cost, PostponeLocking)
: ResourceGuard(ResourceGuard::Metrics::getIOWrite(), link_, cost, Lock::Defer)
, t(t_)
{
t.onEnqueue(link);
lock();
t.onExecute(link);
consume(cost);
}
};
@ -310,8 +311,9 @@ struct ResourceTestManager : public ResourceTestBase
// NOTE: actually leader's request(s) make their own small busy period.
void blockResource(ResourceLink link)
{
ResourceGuard g(link, 1, ResourceGuard::PostponeLocking);
ResourceGuard g(ResourceGuard::Metrics::getIOWrite(), link, 1, ResourceGuard::Lock::Defer);
g.lock();
g.consume(1);
// NOTE: at this point we assume resource to be blocked by single request (<max_requests>1</max_requests>)
busy_period.arrive_and_wait(); // (1) notify all followers that resource is blocked
busy_period.arrive_and_wait(); // (2) wait all followers to enqueue their requests
@ -320,10 +322,11 @@ struct ResourceTestManager : public ResourceTestBase
{
getLinkData(link).left += total_requests + 1;
busy_period.arrive_and_wait(); // (1) wait leader to block resource
ResourceGuard g(link, cost, ResourceGuard::PostponeLocking);
ResourceGuard g(ResourceGuard::Metrics::getIOWrite(), link, cost, ResourceGuard::Lock::Defer);
onEnqueue(link);
busy_period.arrive_and_wait(); // (2) notify leader to unblock
g.lock();
g.consume(cost);
onExecute(link);
}
};

View File

@ -36,11 +36,16 @@ TEST(SchedulerDynamicResourceManager, Smoke)
for (int i = 0; i < 10; i++)
{
ResourceGuard gA(cA->get("res1"), ResourceGuard::PostponeLocking);
ResourceGuard gA(ResourceGuard::Metrics::getIOWrite(), cA->get("res1"), 1, ResourceGuard::Lock::Defer);
gA.lock();
gA.consume(1);
gA.unlock();
ResourceGuard gB(cB->get("res1"));
ResourceGuard gB(ResourceGuard::Metrics::getIOWrite(), cB->get("res1"));
gB.unlock();
ResourceGuard gC(ResourceGuard::Metrics::getIORead(), cB->get("res1"));
gB.consume(2);
}
}

View File

@ -1,11 +1,13 @@
#include <gtest/gtest.h>
#include <Common/Scheduler/SchedulerRoot.h>
#include <Common/Scheduler/Nodes/tests/ResourceTest.h>
#include <Common/Scheduler/SchedulerRoot.h>
#include <Common/randomSeed.h>
#include <barrier>
#include <future>
#include <pcg_random.hpp>
using namespace DB;
@ -22,6 +24,17 @@ struct ResourceTest : public ResourceTestBase
{
scheduler.stop(true);
}
std::mutex rng_mutex;
pcg64 rng{randomSeed()};
template <typename T>
T randomInt(T from, T to)
{
std::uniform_int_distribution<T> distribution(from, to);
std::lock_guard lock(rng_mutex);
return distribution(rng);
}
};
struct ResourceHolder
@ -109,26 +122,55 @@ TEST(SchedulerRoot, Smoke)
r2.registerResource();
{
ResourceGuard rg(a);
ResourceGuard rg(ResourceGuard::Metrics::getIOWrite(), a);
EXPECT_TRUE(fc1->requests.contains(&rg.request));
rg.consume(1);
}
{
ResourceGuard rg(b);
ResourceGuard rg(ResourceGuard::Metrics::getIOWrite(), b);
EXPECT_TRUE(fc1->requests.contains(&rg.request));
rg.consume(1);
}
{
ResourceGuard rg(c);
ResourceGuard rg(ResourceGuard::Metrics::getIOWrite(), c);
EXPECT_TRUE(fc2->requests.contains(&rg.request));
rg.consume(1);
}
{
ResourceGuard rg(d);
ResourceGuard rg(ResourceGuard::Metrics::getIOWrite(), d);
EXPECT_TRUE(fc2->requests.contains(&rg.request));
rg.consume(1);
}
}
TEST(SchedulerRoot, Budget)
{
ResourceTest t;
ResourceHolder r1(t);
r1.add<ConstraintTest>("/", "<max_requests>1</max_requests>");
r1.add<PriorityPolicy>("/prio");
auto a = r1.addQueue("/prio/A", "");
r1.registerResource();
ResourceCost total_real_cost = 0;
int total_requests = 10;
for (int i = 0 ; i < total_requests; i++)
{
ResourceCost est_cost = t.randomInt(1, 10);
ResourceCost real_cost = t.randomInt(0, 10);
ResourceGuard rg(ResourceGuard::Metrics::getIOWrite(), a, est_cost);
rg.consume(real_cost);
total_real_cost += real_cost;
}
EXPECT_EQ(total_requests, a.queue->dequeued_requests);
EXPECT_EQ(total_real_cost, a.queue->dequeued_cost - a.queue->getBudget());
}
TEST(SchedulerRoot, Cancel)
{
ResourceTest t;

View File

@ -1,25 +0,0 @@
#include <Common/Scheduler/ISchedulerQueue.h>
#include <Common/Scheduler/ResourceLink.h>
#include <Common/Scheduler/ResourceRequest.h>
namespace DB
{
void ResourceLink::adjust(ResourceCost estimated_cost, ResourceCost real_cost) const
{
if (queue)
queue->adjustBudget(estimated_cost, real_cost);
}
void ResourceLink::consumed(ResourceCost cost) const
{
if (queue)
queue->consumeBudget(cost);
}
void ResourceLink::accumulate(DB::ResourceCost cost) const
{
if (queue)
queue->accumulateBudget(cost);
}
}

View File

@ -7,10 +7,30 @@
#include <Common/Scheduler/ResourceRequest.h>
#include <Common/Scheduler/ResourceLink.h>
#include <Common/CurrentThread.h>
#include <Common/ProfileEvents.h>
#include <Common/CurrentMetrics.h>
#include <condition_variable>
#include <mutex>
namespace ProfileEvents
{
extern const Event SchedulerIOReadRequests;
extern const Event SchedulerIOReadBytes;
extern const Event SchedulerIOReadWaitMicroseconds;
extern const Event SchedulerIOWriteRequests;
extern const Event SchedulerIOWriteBytes;
extern const Event SchedulerIOWriteWaitMicroseconds;
}
namespace CurrentMetrics
{
extern const Metric SchedulerIOReadScheduled;
extern const Metric SchedulerIOWriteScheduled;
}
namespace DB
{
@ -22,12 +42,42 @@ namespace DB
class ResourceGuard
{
public:
enum ResourceGuardCtor
enum class Lock
{
LockStraightAway, /// Locks inside constructor (default)
Default, /// Locks inside constructor
// WARNING: Only for tests. It is not exception-safe because `lock()` must be called after construction.
PostponeLocking /// Don't lock in constructor, but send request
Defer /// Don't lock in constructor, but send request
};
struct Metrics
{
const ProfileEvents::Event requests = ProfileEvents::end();
const ProfileEvents::Event cost = ProfileEvents::end();
const ProfileEvents::Event wait_microseconds = ProfileEvents::end();
const CurrentMetrics::Metric scheduled_count = CurrentMetrics::end();
static const Metrics * getIORead()
{
static Metrics metrics{
.requests = ProfileEvents::SchedulerIOReadRequests,
.cost = ProfileEvents::SchedulerIOReadBytes,
.wait_microseconds = ProfileEvents::SchedulerIOReadWaitMicroseconds,
.scheduled_count = CurrentMetrics::SchedulerIOReadScheduled
};
return &metrics;
}
static const Metrics * getIOWrite()
{
static Metrics metrics{
.requests = ProfileEvents::SchedulerIOWriteRequests,
.cost = ProfileEvents::SchedulerIOWriteBytes,
.wait_microseconds = ProfileEvents::SchedulerIOWriteWaitMicroseconds,
.scheduled_count = CurrentMetrics::SchedulerIOWriteScheduled
};
return &metrics;
}
};
enum RequestState
@ -46,60 +96,74 @@ public:
chassert(state == Finished);
state = Enqueued;
ResourceRequest::reset(cost_);
link_.queue->enqueueRequestUsingBudget(this);
estimated_cost = link_.queue->enqueueRequestUsingBudget(this); // NOTE: it modifies `cost` and enqueues request
}
// This function is executed inside scheduler thread and wakes thread issued this `request`.
// That thread will continue execution and do real consumption of requested resource synchronously.
void execute() override
{
{
std::unique_lock lock(mutex);
chassert(state == Enqueued);
state = Dequeued;
}
std::unique_lock lock(mutex);
chassert(state == Enqueued);
state = Dequeued;
dequeued_cv.notify_one();
}
void wait()
{
CurrentMetrics::Increment scheduled(metrics->scheduled_count);
auto timer = CurrentThread::getProfileEvents().timer(metrics->wait_microseconds);
std::unique_lock lock(mutex);
dequeued_cv.wait(lock, [this] { return state == Dequeued; });
}
void finish()
void finish(ResourceCost real_cost_, ResourceLink link_)
{
// lock(mutex) is not required because `Dequeued` request cannot be used by the scheduler thread
chassert(state == Dequeued);
state = Finished;
if (estimated_cost != real_cost_)
link_.queue->adjustBudget(estimated_cost, real_cost_);
ResourceRequest::finish();
ProfileEvents::increment(metrics->requests);
ProfileEvents::increment(metrics->cost, real_cost_);
}
static Request & local()
void assertFinished()
{
// lock(mutex) is not required because `Finished` request cannot be used by the scheduler thread
chassert(state == Finished);
}
static Request & local(const Metrics * metrics)
{
// Since single thread cannot use more than one resource request simultaneously,
// we can reuse thread-local request to avoid allocations
static thread_local Request instance;
instance.metrics = metrics;
return instance;
}
const Metrics * metrics = nullptr; // Must be initialized before use
private:
ResourceCost estimated_cost = 0; // Stores initial `cost` value in case budget was used to modify it
std::mutex mutex;
std::condition_variable dequeued_cv;
RequestState state = Finished;
};
/// Creates pending request for resource; blocks while resource is not available (unless `PostponeLocking`)
explicit ResourceGuard(ResourceLink link_, ResourceCost cost = 1, ResourceGuardCtor ctor = LockStraightAway)
/// Creates pending request for resource; blocks while resource is not available (unless `Lock::Defer`)
explicit ResourceGuard(const Metrics * metrics, ResourceLink link_, ResourceCost cost = 1, ResourceGuard::Lock type = ResourceGuard::Lock::Default)
: link(link_)
, request(Request::local())
, request(Request::local(metrics))
{
if (cost == 0)
link.queue = nullptr; // Ignore zero-cost requests
else if (link.queue)
link.reset(); // Ignore zero-cost requests
else if (link)
{
request.enqueue(cost, link);
if (ctor == LockStraightAway)
if (type == Lock::Default)
request.wait();
}
}
@ -112,22 +176,29 @@ public:
/// Blocks until resource is available
void lock()
{
if (link.queue)
if (link)
request.wait();
}
/// Report resource consumption has finished
void unlock()
void consume(ResourceCost cost)
{
if (link.queue)
real_cost += cost;
}
/// Report resource consumption has finished
void unlock(ResourceCost consumed = 0)
{
consume(consumed);
if (link)
{
request.finish();
link.queue = nullptr;
request.finish(real_cost, link);
link.reset();
}
}
ResourceLink link;
Request & request;
ResourceCost real_cost = 0;
};
}

View File

@ -13,13 +13,28 @@ using ResourceCost = Int64;
struct ResourceLink
{
ISchedulerQueue * queue = nullptr;
bool operator==(const ResourceLink &) const = default;
explicit operator bool() const { return queue != nullptr; }
void adjust(ResourceCost estimated_cost, ResourceCost real_cost) const;
void reset()
{
queue = nullptr;
}
};
void consumed(ResourceCost cost) const;
/*
* Everything required for IO scheduling.
* Note that raw pointer are stored inside, so make sure that `ClassifierPtr` that produced
* resource links will outlive them. Usually classifier is stored in query `Context`.
*/
struct IOSchedulingSettings
{
ResourceLink read_resource_link;
ResourceLink write_resource_link;
void accumulate(ResourceCost cost) const;
bool operator==(const IOSchedulingSettings &) const = default;
explicit operator bool() const { return read_resource_link && write_resource_link; }
};
}

View File

@ -45,7 +45,7 @@ constexpr ResourceCost ResourceCostMax = std::numeric_limits<int>::max();
class ResourceRequest : public boost::intrusive::list_base_hook<>
{
public:
/// Cost of request execution; should be filled before request enqueueing.
/// Cost of request execution; should be filled before request enqueueing and remain constant until `finish()`.
/// NOTE: If cost is not known in advance, ResourceBudget should be used (note that every ISchedulerQueue has it)
ResourceCost cost;

View File

@ -13,6 +13,7 @@
#include <IO/ReadHelpers.h>
#include <Interpreters/Context.h>
#include <Core/Settings.h>
#include <Poco/Environment.h>
#pragma clang diagnostic ignored "-Wreserved-identifier"
@ -371,8 +372,8 @@ try
/// in case of double fault.
LOG_FATAL(log, "########## Short fault info ############");
LOG_FATAL(log, "(version {}{}, build id: {}, git hash: {}) (from thread {}) Received signal {}",
VERSION_STRING, VERSION_OFFICIAL, daemon ? daemon->build_id : "", GIT_HASH,
LOG_FATAL(log, "(version {}{}, build id: {}, git hash: {}, architecture: {}) (from thread {}) Received signal {}",
VERSION_STRING, VERSION_OFFICIAL, daemon ? daemon->build_id : "", GIT_HASH, Poco::Environment::osArchitecture(),
thread_num, sig);
std::string signal_description = "Unknown signal";

View File

@ -1,4 +1,5 @@
#include <Common/ThreadPool.h>
#include <Common/ProfileEvents.h>
#include <Common/setThreadName.h>
#include <Common/Exception.h>
#include <Common/getNumberOfPhysicalCPUCores.h>
@ -27,6 +28,25 @@ namespace CurrentMetrics
extern const Metric GlobalThreadScheduled;
}
namespace ProfileEvents
{
extern const Event GlobalThreadPoolExpansions;
extern const Event GlobalThreadPoolShrinks;
extern const Event GlobalThreadPoolThreadCreationMicroseconds;
extern const Event GlobalThreadPoolLockWaitMicroseconds;
extern const Event GlobalThreadPoolJobs;
extern const Event GlobalThreadPoolJobWaitTimeMicroseconds;
extern const Event LocalThreadPoolExpansions;
extern const Event LocalThreadPoolShrinks;
extern const Event LocalThreadPoolThreadCreationMicroseconds;
extern const Event LocalThreadPoolLockWaitMicroseconds;
extern const Event LocalThreadPoolJobs;
extern const Event LocalThreadPoolBusyMicroseconds;
extern const Event LocalThreadPoolJobWaitTimeMicroseconds;
}
class JobWithPriority
{
public:
@ -40,6 +60,7 @@ public:
/// Call stacks of all jobs' schedulings leading to this one
std::vector<StackTrace::FramePointers> frame_pointers;
bool enable_job_stack_trace = false;
Stopwatch job_create_time;
JobWithPriority(
Job job_, Priority priority_, CurrentMetrics::Metric metric,
@ -59,6 +80,13 @@ public:
{
return priority > rhs.priority; // Reversed for `priority_queue` max-heap to yield minimum value (i.e. highest priority) first
}
UInt64 elapsedMicroseconds() const
{
return job_create_time.elapsedMicroseconds();
}
};
static constexpr auto DEFAULT_THREAD_NAME = "ThreadPool";
@ -180,14 +208,18 @@ ReturnType ThreadPoolImpl<Thread>::scheduleImpl(Job job, Priority priority, std:
};
{
Stopwatch watch;
std::unique_lock lock(mutex);
ProfileEvents::increment(
std::is_same_v<Thread, std::thread> ? ProfileEvents::GlobalThreadPoolLockWaitMicroseconds : ProfileEvents::LocalThreadPoolLockWaitMicroseconds,
watch.elapsedMicroseconds());
if (CannotAllocateThreadFaultInjector::injectFault())
return on_error("fault injected");
auto pred = [this] { return !queue_size || scheduled_jobs < queue_size || shutdown; };
if (wait_microseconds) /// Check for optional. Condition is true if the optional is set and the value is zero.
if (wait_microseconds) /// Check for optional. Condition is true if the optional is set. Even if the value is zero.
{
if (!job_finished.wait_for(lock, std::chrono::microseconds(*wait_microseconds), pred))
return on_error(fmt::format("no free thread (timeout={})", *wait_microseconds));
@ -216,7 +248,13 @@ ReturnType ThreadPoolImpl<Thread>::scheduleImpl(Job job, Priority priority, std:
try
{
Stopwatch watch2;
threads.front() = Thread([this, it = threads.begin()] { worker(it); });
ProfileEvents::increment(
std::is_same_v<Thread, std::thread> ? ProfileEvents::GlobalThreadPoolThreadCreationMicroseconds : ProfileEvents::LocalThreadPoolThreadCreationMicroseconds,
watch2.elapsedMicroseconds());
ProfileEvents::increment(
std::is_same_v<Thread, std::thread> ? ProfileEvents::GlobalThreadPoolExpansions : ProfileEvents::LocalThreadPoolExpansions);
}
catch (...)
{
@ -239,6 +277,8 @@ ReturnType ThreadPoolImpl<Thread>::scheduleImpl(Job job, Priority priority, std:
/// Wake up a free thread to run the new job.
new_job_or_shutdown.notify_one();
ProfileEvents::increment(std::is_same_v<Thread, std::thread> ? ProfileEvents::GlobalThreadPoolJobs : ProfileEvents::LocalThreadPoolJobs);
return static_cast<ReturnType>(true);
}
@ -262,7 +302,14 @@ void ThreadPoolImpl<Thread>::startNewThreadsNoLock()
try
{
Stopwatch watch;
threads.front() = Thread([this, it = threads.begin()] { worker(it); });
ProfileEvents::increment(
std::is_same_v<Thread, std::thread> ? ProfileEvents::GlobalThreadPoolThreadCreationMicroseconds : ProfileEvents::LocalThreadPoolThreadCreationMicroseconds,
watch.elapsedMicroseconds());
ProfileEvents::increment(
std::is_same_v<Thread, std::thread> ? ProfileEvents::GlobalThreadPoolExpansions : ProfileEvents::LocalThreadPoolExpansions);
}
catch (...)
{
@ -293,7 +340,11 @@ void ThreadPoolImpl<Thread>::scheduleOrThrow(Job job, Priority priority, uint64_
template <typename Thread>
void ThreadPoolImpl<Thread>::wait()
{
Stopwatch watch;
std::unique_lock lock(mutex);
ProfileEvents::increment(
std::is_same_v<Thread, std::thread> ? ProfileEvents::GlobalThreadPoolLockWaitMicroseconds : ProfileEvents::LocalThreadPoolLockWaitMicroseconds,
watch.elapsedMicroseconds());
/// Signal here just in case.
/// If threads are waiting on condition variables, but there are some jobs in the queue
/// then it will prevent us from deadlock.
@ -334,7 +385,11 @@ void ThreadPoolImpl<Thread>::finalize()
/// Wait for all currently running jobs to finish (we don't wait for all scheduled jobs here like the function wait() does).
for (auto & thread : threads)
{
thread.join();
ProfileEvents::increment(
std::is_same_v<Thread, std::thread> ? ProfileEvents::GlobalThreadPoolShrinks : ProfileEvents::LocalThreadPoolShrinks);
}
threads.clear();
}
@ -391,7 +446,11 @@ void ThreadPoolImpl<Thread>::worker(typename std::list<Thread>::iterator thread_
std::optional<JobWithPriority> job_data;
{
Stopwatch watch;
std::unique_lock lock(mutex);
ProfileEvents::increment(
std::is_same_v<Thread, std::thread> ? ProfileEvents::GlobalThreadPoolLockWaitMicroseconds : ProfileEvents::LocalThreadPoolLockWaitMicroseconds,
watch.elapsedMicroseconds());
// Finish with previous job if any
if (job_is_done)
@ -424,6 +483,8 @@ void ThreadPoolImpl<Thread>::worker(typename std::list<Thread>::iterator thread_
{
thread_it->detach();
threads.erase(thread_it);
ProfileEvents::increment(
std::is_same_v<Thread, std::thread> ? ProfileEvents::GlobalThreadPoolShrinks : ProfileEvents::LocalThreadPoolShrinks);
}
return;
}
@ -433,6 +494,10 @@ void ThreadPoolImpl<Thread>::worker(typename std::list<Thread>::iterator thread_
job_data = std::move(const_cast<JobWithPriority &>(jobs.top()));
jobs.pop();
ProfileEvents::increment(
std::is_same_v<Thread, std::thread> ? ProfileEvents::GlobalThreadPoolJobWaitTimeMicroseconds : ProfileEvents::LocalThreadPoolJobWaitTimeMicroseconds,
job_data->elapsedMicroseconds());
/// We don't run jobs after `shutdown` is set, but we have to properly dequeue all jobs and finish them.
if (shutdown)
{
@ -459,7 +524,22 @@ void ThreadPoolImpl<Thread>::worker(typename std::list<Thread>::iterator thread_
CurrentMetrics::Increment metric_active_pool_threads(metric_active_threads);
job_data->job();
if constexpr (!std::is_same_v<Thread, std::thread>)
{
Stopwatch watch;
job_data->job();
// This metric is less relevant for the global thread pool, as it would show large values (time while
// a thread was used by local pools) and increment only when local pools are destroyed.
//
// In cases where global pool threads are used directly (without a local thread pool), distinguishing
// them is difficult.
ProfileEvents::increment(ProfileEvents::LocalThreadPoolBusyMicroseconds, watch.elapsedMicroseconds());
}
else
{
job_data->job();
}
if (thread_trace_context.root_span.isTraceEnabled())
{

View File

@ -131,7 +131,7 @@ private:
bool threads_remove_themselves = true;
const bool shutdown_on_exception = true;
boost::heap::priority_queue<JobWithPriority> jobs;
boost::heap::priority_queue<JobWithPriority,boost::heap::stable<true>> jobs;
std::list<Thread> threads;
std::exception_ptr first_exception;
std::stack<OnDestroyCallback> on_destroy_callbacks;

View File

@ -7,11 +7,11 @@
#include <Common/MemoryTracker.h>
#include <Common/ProfileEvents.h>
#include <Common/Stopwatch.h>
#include <Common/Scheduler/ResourceLink.h>
#include <boost/noncopyable.hpp>
#include <functional>
#include <map>
#include <memory>
#include <mutex>
#include <unordered_set>
@ -188,6 +188,10 @@ public:
Progress progress_in;
Progress progress_out;
/// IO scheduling
ResourceLink read_resource_link;
ResourceLink write_resource_link;
private:
/// Group of threads, to which this thread attached
ThreadGroupPtr thread_group;

View File

@ -210,7 +210,7 @@ namespace
{
UInt64 stringToMaxThreads(const String & str)
{
if (startsWith(str, "auto"))
if (startsWith(str, "auto") || startsWith(str, "'auto"))
return 0;
return parseFromString<UInt64>(str);
}
@ -237,7 +237,8 @@ SettingFieldMaxThreads & SettingFieldMaxThreads::operator=(const Field & f)
String SettingFieldMaxThreads::toString() const
{
if (is_auto)
return "auto(" + ::DB::toString(value) + ")";
/// Removing quotes here will introduce an incompatibility between replicas with different versions.
return "'auto(" + ::DB::toString(value) + ")'";
else
return ::DB::toString(value);
}

View File

@ -22,7 +22,6 @@
#include <cstring>
#include <unistd.h>
#include <algorithm>
#include <typeinfo>
#include <iostream>
#include <memory>

View File

@ -185,7 +185,7 @@ std::unique_ptr<IDataType::SubstreamData> DataTypeDynamic::getDynamicSubcolumnDa
auto type = decodeDataType(buf);
if (type->getName() == subcolumn_type_name)
{
dynamic_column.getVariantSerialization(subcolumn_type, subcolumn_type_name)->deserializeBinary(*subcolumn, buf, format_settings);
subcolumn_type->getDefaultSerialization()->deserializeBinary(*subcolumn, buf, format_settings);
null_map.push_back(0);
}
else

View File

@ -1,10 +1,12 @@
#include <DataTypes/DataTypeFactory.h>
#include <DataTypes/DataTypeObject.h>
#include <DataTypes/DataTypeObjectDeprecated.h>
#include <DataTypes/Serializations/SerializationJSON.h>
#include <DataTypes/Serializations/SerializationObjectTypedPath.h>
#include <DataTypes/Serializations/SerializationObjectDynamicPath.h>
#include <DataTypes/Serializations/SerializationSubObject.h>
#include <Columns/ColumnObject.h>
#include <Common/CurrentThread.h>
#include <Parsers/IAST.h>
#include <Parsers/ASTLiteral.h>
@ -513,13 +515,24 @@ static DataTypePtr createObject(const ASTPtr & arguments, const DataTypeObject::
static DataTypePtr createJSON(const ASTPtr & arguments)
{
auto context = CurrentThread::getQueryContext();
if (!context)
context = Context::getGlobalContextInstance();
if (context->getSettingsRef().allow_experimental_object_type && context->getSettingsRef().use_json_alias_for_old_object_type)
{
if (arguments && !arguments->children.empty())
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Experimental Object type doesn't support any arguments. If you want to use new JSON type, set settings allow_experimental_json_type = 1 and use_json_alias_for_old_object_type = 0");
return std::make_shared<DataTypeObjectDeprecated>("JSON", false);
}
return createObject(arguments, DataTypeObject::SchemaFormat::JSON);
}
void registerDataTypeJSON(DataTypeFactory & factory)
{
if (!Context::getGlobalContextInstance()->getSettingsRef().use_json_alias_for_old_object_type)
factory.registerDataType("JSON", createJSON, DataTypeFactory::Case::Insensitive);
factory.registerDataType("JSON", createJSON, DataTypeFactory::Case::Insensitive);
}
}

View File

@ -78,10 +78,6 @@ static DataTypePtr create(const ASTPtr & arguments)
void registerDataTypeObjectDeprecated(DataTypeFactory & factory)
{
factory.registerDataType("Object", create);
if (Context::getGlobalContextInstance()->getSettingsRef().use_json_alias_for_old_object_type)
factory.registerSimpleDataType("JSON",
[] { return std::make_shared<DataTypeObjectDeprecated>("JSON", false); },
DataTypeFactory::Case::Insensitive);
}
}

View File

@ -489,9 +489,8 @@ void SerializationDynamic::serializeBinary(const IColumn & column, size_t row_nu
}
const auto & variant_type = assert_cast<const DataTypeVariant &>(*variant_info.variant_type).getVariant(global_discr);
const auto & variant_type_name = variant_info.variant_names[global_discr];
encodeDataType(variant_type, ostr);
dynamic_column.getVariantSerialization(variant_type, variant_type_name)->serializeBinary(variant_column.getVariantByGlobalDiscriminator(global_discr), variant_column.offsetAt(row_num), ostr, settings);
variant_type->getDefaultSerialization()->serializeBinary(variant_column.getVariantByGlobalDiscriminator(global_discr), variant_column.offsetAt(row_num), ostr, settings);
}
template <typename ReturnType = void, typename DeserializeFunc>
@ -629,7 +628,7 @@ static void serializeTextImpl(
ReadBufferFromMemory buf(value.data, value.size);
auto variant_type = decodeDataType(buf);
auto tmp_variant_column = variant_type->createColumn();
auto variant_serialization = dynamic_column.getVariantSerialization(variant_type);
auto variant_serialization = variant_type->getDefaultSerialization();
variant_serialization->deserializeBinary(*tmp_variant_column, buf, settings);
nested_serialize(*variant_serialization, *tmp_variant_column, 0, ostr);
}

View File

@ -35,9 +35,10 @@ class RegionsNames
M(et, ru, 11) \
M(pt, en, 12) \
M(he, en, 13) \
M(vi, en, 14)
M(vi, en, 14) \
M(es, en, 15)
static constexpr size_t total_languages = 15;
static constexpr size_t total_languages = 16;
public:
enum class Language : size_t

View File

@ -8,6 +8,7 @@
#include <IO/ReadBufferFromString.h>
#include <Common/logger_useful.h>
#include <Common/Throttler.h>
#include <Common/Scheduler/ResourceGuard.h>
#include <base/sleep.h>
#include <Common/ProfileEvents.h>
#include <IO/SeekableReadBuffer.h>
@ -113,7 +114,9 @@ bool ReadBufferFromAzureBlobStorage::nextImpl()
{
try
{
ResourceGuard rlock(ResourceGuard::Metrics::getIORead(), read_settings.io_scheduling.read_resource_link, to_read_bytes);
bytes_read = data_stream->ReadToCount(reinterpret_cast<uint8_t *>(data_ptr), to_read_bytes);
rlock.unlock(bytes_read); // Do not hold resource under bandwidth throttler
if (read_settings.remote_throttler)
read_settings.remote_throttler->add(bytes_read, ProfileEvents::RemoteReadThrottlerBytes, ProfileEvents::RemoteReadThrottlerSleepMicroseconds);
break;

View File

@ -101,15 +101,13 @@ void WriteBufferFromAzureBlobStorage::execWithRetry(std::function<void()> func,
{
try
{
ResourceGuard rlock(write_settings.resource_link, cost); // Note that zero-cost requests are ignored
ResourceGuard rlock(ResourceGuard::Metrics::getIOWrite(), write_settings.io_scheduling.write_resource_link, cost); // Note that zero-cost requests are ignored
func();
rlock.unlock(cost);
break;
}
catch (const Azure::Core::RequestFailedException & e)
{
if (cost)
write_settings.resource_link.accumulate(cost); // Accumulate resource for later use, because we have failed to consume it
if (i == num_tries - 1 || !isRetryableAzureException(e))
throw;
@ -117,8 +115,6 @@ void WriteBufferFromAzureBlobStorage::execWithRetry(std::function<void()> func,
}
catch (...)
{
if (cost)
write_settings.resource_link.accumulate(cost); // We assume no resource was used in case of failure
throw;
}
}

View File

@ -461,14 +461,17 @@ DiskObjectStoragePtr DiskObjectStorage::createDiskObjectStorage()
}
template <class Settings>
static inline Settings updateResourceLink(const Settings & settings, const String & resource_name)
static inline Settings updateIOSchedulingSettings(const Settings & settings, const String & read_resource_name, const String & write_resource_name)
{
if (resource_name.empty())
if (read_resource_name.empty() && write_resource_name.empty())
return settings;
if (auto query_context = CurrentThread::getQueryContext())
{
Settings result(settings);
result.resource_link = query_context->getWorkloadClassifier()->get(resource_name);
if (!read_resource_name.empty())
result.io_scheduling.read_resource_link = query_context->getWorkloadClassifier()->get(read_resource_name);
if (!write_resource_name.empty())
result.io_scheduling.write_resource_link = query_context->getWorkloadClassifier()->get(write_resource_name);
return result;
}
return settings;
@ -500,7 +503,7 @@ std::unique_ptr<ReadBufferFromFileBase> DiskObjectStorage::readFile(
return object_storage->readObjects(
storage_objects,
updateResourceLink(settings, getReadResourceName()),
updateIOSchedulingSettings(settings, getReadResourceName(), getWriteResourceName()),
read_hint,
file_size);
}
@ -513,7 +516,7 @@ std::unique_ptr<WriteBufferFromFileBase> DiskObjectStorage::writeFile(
{
LOG_TEST(log, "Write file: {}", path);
WriteSettings write_settings = updateResourceLink(settings, getWriteResourceName());
WriteSettings write_settings = updateIOSchedulingSettings(settings, getReadResourceName(), getWriteResourceName());
auto transaction = createObjectStorageTransaction();
return transaction->writeFile(path, buf_size, mode, write_settings);
}

View File

@ -43,39 +43,21 @@ bool LocalObjectStorage::exists(const StoredObject & object) const
std::unique_ptr<ReadBufferFromFileBase> LocalObjectStorage::readObjects( /// NOLINT
const StoredObjects & objects,
const ReadSettings & read_settings,
std::optional<size_t> read_hint,
std::optional<size_t> file_size) const
std::optional<size_t>,
std::optional<size_t>) const
{
auto modified_settings = patchSettings(read_settings);
auto global_context = Context::getGlobalContextInstance();
auto read_buffer_creator =
[=] (bool /* restricted_seek */, const StoredObject & object)
-> std::unique_ptr<ReadBufferFromFileBase>
{
return createReadBufferFromFileBase(object.remote_path, modified_settings, read_hint, file_size);
};
auto read_buffer_creator = [=](bool /* restricted_seek */, const StoredObject & object) -> std::unique_ptr<ReadBufferFromFileBase>
{ return std::make_unique<ReadBufferFromFile>(object.remote_path); };
switch (read_settings.remote_fs_method)
{
case RemoteFSReadMethod::read:
{
return std::make_unique<ReadBufferFromRemoteFSGather>(
std::move(read_buffer_creator), objects, "file:", modified_settings,
global_context->getFilesystemCacheLog(), /* use_external_buffer */false);
}
case RemoteFSReadMethod::threadpool:
{
auto impl = std::make_unique<ReadBufferFromRemoteFSGather>(
std::move(read_buffer_creator), objects, "file:", modified_settings,
global_context->getFilesystemCacheLog(), /* use_external_buffer */true);
auto & reader = global_context->getThreadPoolReader(FilesystemReaderType::ASYNCHRONOUS_REMOTE_FS_READER);
return std::make_unique<AsynchronousBoundedReadBuffer>(
std::move(impl), reader, read_settings,
global_context->getAsyncReadCounters(),
global_context->getFilesystemReadPrefetchesLog());
}
}
return std::make_unique<ReadBufferFromRemoteFSGather>(
std::move(read_buffer_creator),
objects,
"file:",
modified_settings,
global_context->getFilesystemCacheLog(),
/* use_external_buffer */ false);
}
ReadSettings LocalObjectStorage::patchSettings(const ReadSettings & read_settings) const

View File

@ -1179,6 +1179,12 @@ public:
const FormatSettings & format_settings,
String & error) const override
{
if (element.isNull() && format_settings.null_as_default)
{
column.insertDefault();
return true;
}
auto & tuple = assert_cast<ColumnTuple &>(column);
size_t old_size = column.size();
bool were_valid_elements = false;
@ -1298,6 +1304,12 @@ public:
const FormatSettings & format_settings,
String & error) const override
{
if (element.isNull() && format_settings.null_as_default)
{
column.insertDefault();
return true;
}
if (!element.isObject())
{
error = fmt::format("cannot read Map value from JSON element: {}", jsonElementToString<JSONParser>(element, format_settings));
@ -1362,6 +1374,14 @@ public:
String & error) const override
{
auto & column_variant = assert_cast<ColumnVariant &>(column);
/// Check if element is NULL.
if (element.isNull())
{
column_variant.insertDefault();
return true;
}
for (size_t i : order)
{
auto & variant = column_variant.getVariantByGlobalDiscriminator(i);

View File

@ -284,12 +284,12 @@ void OrdinalDate::init(int64_t modified_julian_day)
bool OrdinalDate::tryInit(int64_t modified_julian_day)
{
/// This function supports day number from -678941 to 2973119 (which represent 0000-01-01 and 9999-12-31 respectively).
/// This function supports day number from -678941 to 2973483 (which represent 0000-01-01 and 9999-12-31 respectively).
if (modified_julian_day < -678941)
return false;
if (modified_julian_day > 2973119)
if (modified_julian_day > 2973483)
return false;
const auto a = modified_julian_day + 678575;

View File

@ -4,17 +4,21 @@
#if USE_ICU
#include <Columns/ColumnString.h>
#include <Functions/LowerUpperImpl.h>
#include <unicode/unistr.h>
#include <Common/StringUtils.h>
# include <Columns/ColumnString.h>
# include <Functions/LowerUpperImpl.h>
# include <unicode/ucasemap.h>
# include <unicode/unistr.h>
# include <unicode/urename.h>
# include <unicode/utypes.h>
# include <Common/StringUtils.h>
namespace DB
{
namespace ErrorCodes
{
extern const int BAD_ARGUMENTS;
extern const int BAD_ARGUMENTS;
extern const int LOGICAL_ERROR;
}
template <char not_case_lower_bound, char not_case_upper_bound, bool upper>
@ -27,7 +31,7 @@ struct LowerUpperUTF8Impl
ColumnString::Offsets & res_offsets,
size_t input_rows_count)
{
if (data.empty())
if (input_rows_count == 0)
return;
bool all_ascii = isAllASCII(data.data(), data.size());
@ -38,39 +42,56 @@ struct LowerUpperUTF8Impl
}
res_data.resize(data.size());
res_offsets.resize_exact(offsets.size());
res_offsets.resize_exact(input_rows_count);
UErrorCode error_code = U_ZERO_ERROR;
UCaseMap * case_map = ucasemap_open("", U_FOLD_CASE_DEFAULT, &error_code);
if (U_FAILURE(error_code))
throw DB::Exception(ErrorCodes::LOGICAL_ERROR, "Error calling ucasemap_open: {}", u_errorName(error_code));
String output;
size_t curr_offset = 0;
for (size_t i = 0; i < input_rows_count; ++i)
for (size_t row_i = 0; row_i < input_rows_count; ++row_i)
{
const auto * data_start = reinterpret_cast<const char *>(&data[offsets[i - 1]]);
size_t size = offsets[i] - offsets[i - 1];
const auto * src = reinterpret_cast<const char *>(&data[offsets[row_i - 1]]);
size_t src_size = offsets[row_i] - offsets[row_i - 1] - 1;
icu::UnicodeString input(data_start, static_cast<int32_t>(size), "UTF-8");
int32_t dst_size;
if constexpr (upper)
input.toUpper();
dst_size = ucasemap_utf8ToUpper(
case_map, reinterpret_cast<char *>(&res_data[curr_offset]), res_data.size() - curr_offset, src, src_size, &error_code);
else
input.toLower();
dst_size = ucasemap_utf8ToLower(
case_map, reinterpret_cast<char *>(&res_data[curr_offset]), res_data.size() - curr_offset, src, src_size, &error_code);
output.clear();
input.toUTF8String(output);
if (error_code == U_BUFFER_OVERFLOW_ERROR || error_code == U_STRING_NOT_TERMINATED_WARNING)
{
size_t new_size = curr_offset + dst_size + 1;
res_data.resize(new_size);
/// For valid UTF-8 input strings, ICU sometimes produces output with an extra '\0 at the end. Only the data before that
/// '\0' is valid. If the input is not valid UTF-8, then the behavior of lower/upperUTF8 is undefined by definition. In this
/// case, the behavior is also reasonable.
size_t valid_size = output.size();
if (!output.empty() && output.back() == '\0')
--valid_size;
error_code = U_ZERO_ERROR;
if constexpr (upper)
dst_size = ucasemap_utf8ToUpper(
case_map, reinterpret_cast<char *>(&res_data[curr_offset]), res_data.size() - curr_offset, src, src_size, &error_code);
else
dst_size = ucasemap_utf8ToLower(
case_map, reinterpret_cast<char *>(&res_data[curr_offset]), res_data.size() - curr_offset, src, src_size, &error_code);
}
res_data.resize(curr_offset + valid_size + 1);
if (error_code != U_ZERO_ERROR)
throw DB::Exception(
ErrorCodes::LOGICAL_ERROR,
"Error calling {}: {} input: {} input_size: {}",
upper ? "ucasemap_utf8ToUpper" : "ucasemap_utf8ToLower",
u_errorName(error_code),
std::string_view(src, src_size),
src_size);
memcpy(&res_data[curr_offset], output.data(), valid_size);
res_data[curr_offset + valid_size] = 0;
curr_offset += valid_size + 1;
res_offsets[i] = curr_offset;
res_data[curr_offset + dst_size] = 0;
curr_offset += dst_size + 1;
res_offsets[row_i] = curr_offset;
}
res_data.resize(curr_offset);
}
static void vectorFixed(const ColumnString::Chars &, size_t, ColumnString::Chars &, size_t)

View File

@ -406,7 +406,7 @@ void UserDefinedSQLObjectsZooKeeperStorage::syncObjects(const zkutil::ZooKeeperP
LOG_DEBUG(log, "Syncing user-defined {} objects", object_type);
Strings object_names = getObjectNamesAndSetWatch(zookeeper, object_type);
getLock();
auto lock = getLock();
/// Remove stale objects
removeAllObjectsExcept(object_names);

View File

@ -2,6 +2,7 @@
#include <Columns/ColumnString.h>
#include <Columns/ColumnsNumber.h>
#include <DataTypes/DataTypeDate.h>
#include <DataTypes/DataTypeDate32.h>
#include <DataTypes/DataTypeDateTime.h>
#include <DataTypes/DataTypeInterval.h>
#include <Formats/FormatSettings.h>
@ -43,6 +44,7 @@ public:
enum ResultType
{
Date,
Date32,
DateTime,
DateTime64,
};
@ -75,15 +77,15 @@ public:
bool second_argument_is_date = false;
auto check_second_argument = [&] {
if (!isDate(arguments[1].type) && !isDateTime(arguments[1].type) && !isDateTime64(arguments[1].type))
if (!isDateOrDate32(arguments[1].type) && !isDateTime(arguments[1].type) && !isDateTime64(arguments[1].type))
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Illegal type {} of 2nd argument of function {}. "
"Should be a date or a date with time", arguments[1].type->getName(), getName());
second_argument_is_date = isDate(arguments[1].type);
second_argument_is_date = isDateOrDate32(arguments[1].type);
if (second_argument_is_date && ((datepart_kind == IntervalKind::Kind::Hour)
|| (datepart_kind == IntervalKind::Kind::Minute) || (datepart_kind == IntervalKind::Kind::Second)))
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Illegal type Date of argument for function {}", getName());
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Illegal type {} of argument for function {}", arguments[1].type->getName(), getName());
};
auto check_timezone_argument = [&] {
@ -119,6 +121,8 @@ public:
if (result_type == ResultType::Date)
return std::make_shared<DataTypeDate>();
if (result_type == ResultType::Date32)
return std::make_shared<DataTypeDate32>();
else if (result_type == ResultType::DateTime)
return std::make_shared<DataTypeDateTime>(extractTimeZoneNameFromFunctionArguments(arguments, 2, 1, false));
else

View File

@ -44,9 +44,9 @@ public:
auto check_first_argument = [&]
{
const DataTypePtr & type_arg1 = arguments[0].type;
if (!isDate(type_arg1) && !isDateTime(type_arg1) && !isDateTime64(type_arg1))
if (!isDateOrDate32(type_arg1) && !isDateTime(type_arg1) && !isDateTime64(type_arg1))
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT,
"Illegal type {} of 1st argument of function {}, expected a Date, DateTime or DateTime64",
"Illegal type {} of 1st argument of function {}, expected a Date, Date32, DateTime or DateTime64",
type_arg1->getName(), getName());
value_is_date = isDate(type_arg1);
};
@ -56,6 +56,7 @@ public:
enum class ResultType : uint8_t
{
Date,
Date32,
DateTime,
DateTime64
};
@ -128,6 +129,8 @@ public:
{
case ResultType::Date:
return std::make_shared<DataTypeDate>();
case ResultType::Date32:
return std::make_shared<DataTypeDate32>();
case ResultType::DateTime:
return std::make_shared<DataTypeDateTime>(extractTimeZoneNameFromFunctionArguments(arguments, 2, 0, false));
case ResultType::DateTime64:
@ -185,7 +188,13 @@ private:
if (time_column_vec)
return dispatchForIntervalColumn(assert_cast<const DataTypeDate &>(time_column_type), *time_column_vec, interval_column, result_type, time_zone, input_rows_count);
}
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Illegal column for 1st argument of function {}, expected a Date, DateTime or DateTime64", getName());
else if (isDate32(time_column_type))
{
const auto * time_column_vec = checkAndGetColumn<ColumnDate32>(&time_column_col);
if (time_column_vec)
return dispatchForIntervalColumn(assert_cast<const DataTypeDate32 &>(time_column_type), *time_column_vec, interval_column, result_type, time_zone, input_rows_count);
}
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Illegal column for 1st argument of function {}, expected a Date, Date32, DateTime or DateTime64", getName());
}
template <typename TimeDataType, typename TimeColumnType>

View File

@ -91,6 +91,8 @@ void ProgressValues::writeJSON(WriteBuffer & out) const
writeText(result_bytes, out);
writeCString("\",\"elapsed_ns\":\"", out);
writeText(elapsed_ns, out);
writeCString("\",\"real_time_microseconds\":\"", out);
writeText(real_time_microseconds, out);
writeCString("\"", out);
writeCString("}", out);
}
@ -110,6 +112,7 @@ bool Progress::incrementPiecewiseAtomically(const Progress & rhs)
result_bytes += rhs.result_bytes;
elapsed_ns += rhs.elapsed_ns;
real_time_microseconds += rhs.real_time_microseconds;
return rhs.read_rows || rhs.written_rows;
}
@ -129,6 +132,7 @@ void Progress::reset()
result_bytes = 0;
elapsed_ns = 0;
real_time_microseconds = 0;
}
ProgressValues Progress::getValues() const
@ -148,6 +152,7 @@ ProgressValues Progress::getValues() const
res.result_bytes = result_bytes.load(std::memory_order_relaxed);
res.elapsed_ns = elapsed_ns.load(std::memory_order_relaxed);
res.real_time_microseconds = real_time_microseconds.load(std::memory_order_relaxed);
return res;
}
@ -169,6 +174,7 @@ ProgressValues Progress::fetchValuesAndResetPiecewiseAtomically()
res.result_bytes = result_bytes.fetch_and(0);
res.elapsed_ns = elapsed_ns.fetch_and(0);
res.real_time_microseconds = real_time_microseconds.fetch_and(0);
return res;
}
@ -190,6 +196,7 @@ Progress Progress::fetchAndResetPiecewiseAtomically()
res.result_bytes = result_bytes.fetch_and(0);
res.elapsed_ns = elapsed_ns.fetch_and(0);
res.real_time_microseconds = real_time_microseconds.fetch_and(0);
return res;
}
@ -209,6 +216,7 @@ Progress & Progress::operator=(Progress && other) noexcept
result_bytes = other.result_bytes.load(std::memory_order_relaxed);
elapsed_ns = other.elapsed_ns.load(std::memory_order_relaxed);
real_time_microseconds = other.real_time_microseconds.load(std::memory_order_relaxed);
return *this;
}
@ -244,4 +252,9 @@ void Progress::incrementElapsedNs(UInt64 elapsed_ns_)
elapsed_ns.fetch_add(elapsed_ns_, std::memory_order_relaxed);
}
void Progress::incrementRealTimeMicroseconds(UInt64 microseconds)
{
real_time_microseconds.fetch_add(microseconds, std::memory_order_relaxed);
}
}

View File

@ -28,6 +28,7 @@ struct ProgressValues
UInt64 result_bytes = 0;
UInt64 elapsed_ns = 0;
UInt64 real_time_microseconds = 0;
void read(ReadBuffer & in, UInt64 server_revision);
void write(WriteBuffer & out, UInt64 client_revision) const;
@ -40,6 +41,7 @@ struct ReadProgress
UInt64 read_bytes = 0;
UInt64 total_rows_to_read = 0;
UInt64 total_bytes_to_read = 0;
UInt64 real_time_microseconds = 0;
ReadProgress(UInt64 read_rows_, UInt64 read_bytes_, UInt64 total_rows_to_read_ = 0, UInt64 total_bytes_to_read_ = 0)
: read_rows(read_rows_), read_bytes(read_bytes_), total_rows_to_read(total_rows_to_read_), total_bytes_to_read(total_bytes_to_read_) {}
@ -96,6 +98,8 @@ struct Progress
std::atomic<UInt64> elapsed_ns {0};
std::atomic<UInt64> real_time_microseconds {0};
Progress() = default;
Progress(UInt64 read_rows_, UInt64 read_bytes_, UInt64 total_rows_to_read_ = 0, UInt64 total_bytes_to_read_ = 0)
@ -125,6 +129,8 @@ struct Progress
void incrementElapsedNs(UInt64 elapsed_ns_);
void incrementRealTimeMicroseconds(UInt64 microseconds);
void reset();
ProgressValues getValues() const;

View File

@ -6,7 +6,6 @@
#include <IO/ReadBufferFromIStream.h>
#include <IO/ReadBufferFromS3.h>
#include <Common/Scheduler/ResourceGuard.h>
#include <IO/S3/getObjectInfo.h>
#include <IO/S3/Requests.h>
@ -423,22 +422,13 @@ Aws::S3::Model::GetObjectResult ReadBufferFromS3::sendRequest(size_t attempt, si
ProfileEventTimeIncrement<Microseconds> watch(ProfileEvents::ReadBufferFromS3InitMicroseconds);
// We do not know in advance how many bytes we are going to consume, to avoid blocking estimated it from below
constexpr ResourceCost estimated_cost = 1;
ResourceGuard rlock(read_settings.resource_link, estimated_cost);
CurrentThread::IOScope io_scope(read_settings.io_scheduling);
Aws::S3::Model::GetObjectOutcome outcome = client_ptr->GetObject(req);
rlock.unlock();
if (outcome.IsSuccess())
{
ResourceCost bytes_read = outcome.GetResult().GetContentLength();
read_settings.resource_link.adjust(estimated_cost, bytes_read);
return outcome.GetResultWithOwnership();
}
else
{
read_settings.resource_link.accumulate(estimated_cost);
const auto & error = outcome.GetError();
throw S3Exception(error.GetMessage(), error.GetErrorType());
}

View File

@ -118,8 +118,7 @@ struct ReadSettings
ThrottlerPtr remote_throttler;
ThrottlerPtr local_throttler;
// Resource to be used during reading
ResourceLink resource_link;
IOSchedulingSettings io_scheduling;
size_t http_max_tries = 10;
size_t http_retry_initial_backoff_ms = 100;

View File

@ -443,6 +443,7 @@ std::unique_ptr<ReadBuffer> ReadWriteBufferFromHTTP::initialize()
}
response.getCookies(cookies);
response.getHeaders(response_headers);
content_encoding = response.get("Content-Encoding", "");
// Remember file size. It'll be used to report eof in next nextImpl() call.
@ -680,6 +681,19 @@ std::string ReadWriteBufferFromHTTP::getResponseCookie(const std::string & name,
return def;
}
Map ReadWriteBufferFromHTTP::getResponseHeaders() const
{
Map map;
for (const auto & header : response_headers)
{
Tuple elem;
elem.emplace_back(header.first);
elem.emplace_back(header.second);
map.emplace_back(elem);
}
return map;
}
void ReadWriteBufferFromHTTP::setNextCallback(NextCallback next_callback_)
{
next_callback = next_callback_;

View File

@ -90,6 +90,9 @@ private:
std::unique_ptr<ReadBuffer> impl;
std::vector<Poco::Net::HTTPCookie> cookies;
std::map<String, String> response_headers;
HTTPHeaderEntries http_header_entries;
std::function<void(size_t)> next_callback;
@ -187,6 +190,8 @@ public:
HTTPFileInfo getFileInfo();
static HTTPFileInfo parseFileInfo(const Poco::Net::HTTPResponse & response, size_t requested_range_begin);
Map getResponseHeaders() const;
};
using ReadWriteBufferFromHTTPPtr = std::unique_ptr<ReadWriteBufferFromHTTP>;

View File

@ -11,7 +11,6 @@
#include <Common/Throttler.h>
#include <Interpreters/Cache/FileCache.h>
#include <Common/Scheduler/ResourceGuard.h>
#include <IO/WriteHelpers.h>
#include <IO/S3Common.h>
#include <IO/S3/Requests.h>
@ -558,12 +557,11 @@ void WriteBufferFromS3::writePart(WriteBufferFromS3::PartData && data)
auto & request = std::get<0>(*worker_data);
ResourceCost cost = request.GetContentLength();
ResourceGuard rlock(write_settings.resource_link, cost);
CurrentThread::IOScope io_scope(write_settings.io_scheduling);
Stopwatch watch;
auto outcome = client_ptr->UploadPart(request);
watch.stop();
rlock.unlock(); // Avoid acquiring other locks under resource lock
ProfileEvents::increment(ProfileEvents::WriteBufferFromS3Microseconds, watch.elapsedMicroseconds());
@ -577,7 +575,6 @@ void WriteBufferFromS3::writePart(WriteBufferFromS3::PartData && data)
if (!outcome.IsSuccess())
{
ProfileEvents::increment(ProfileEvents::WriteBufferFromS3RequestsErrors, 1);
write_settings.resource_link.accumulate(cost); // We assume no resource was used in case of failure
throw S3Exception(outcome.GetError().GetMessage(), outcome.GetError().GetErrorType());
}
@ -715,12 +712,11 @@ void WriteBufferFromS3::makeSinglepartUpload(WriteBufferFromS3::PartData && data
if (client_ptr->isClientForDisk())
ProfileEvents::increment(ProfileEvents::DiskS3PutObject);
ResourceCost cost = request.GetContentLength();
ResourceGuard rlock(write_settings.resource_link, cost);
CurrentThread::IOScope io_scope(write_settings.io_scheduling);
Stopwatch watch;
auto outcome = client_ptr->PutObject(request);
watch.stop();
rlock.unlock();
ProfileEvents::increment(ProfileEvents::WriteBufferFromS3Microseconds, watch.elapsedMicroseconds());
if (blob_log)
@ -734,7 +730,6 @@ void WriteBufferFromS3::makeSinglepartUpload(WriteBufferFromS3::PartData && data
}
ProfileEvents::increment(ProfileEvents::WriteBufferFromS3RequestsErrors, 1);
write_settings.resource_link.accumulate(cost); // We assume no resource was used in case of failure
if (outcome.GetError().GetErrorType() == Aws::S3::S3Errors::NO_SUCH_KEY)
{

View File

@ -13,8 +13,7 @@ struct WriteSettings
ThrottlerPtr remote_throttler;
ThrottlerPtr local_throttler;
// Resource to be used during reading
ResourceLink resource_link;
IOSchedulingSettings io_scheduling;
/// Filesystem cache settings
bool enable_filesystem_cache_on_write_operations = false;

View File

@ -396,7 +396,7 @@ const ActionsDAG::Node * ActionsDAG::tryFindInOutputs(const std::string & name)
return nullptr;
}
ActionsDAG::NodeRawConstPtrs ActionsDAG::findInOutpus(const Names & names) const
ActionsDAG::NodeRawConstPtrs ActionsDAG::findInOutputs(const Names & names) const
{
NodeRawConstPtrs required_nodes;
required_nodes.reserve(names.size());
@ -524,7 +524,7 @@ void ActionsDAG::removeUnusedActions(const NameSet & required_names, bool allow_
void ActionsDAG::removeUnusedActions(const Names & required_names, bool allow_remove_inputs, bool allow_constant_folding)
{
auto required_nodes = findInOutpus(required_names);
auto required_nodes = findInOutputs(required_names);
outputs.swap(required_nodes);
removeUnusedActions(allow_remove_inputs, allow_constant_folding);
}

View File

@ -156,7 +156,7 @@ public:
const Node * tryFindInOutputs(const std::string & name) const;
/// Same, but for the list of names.
NodeRawConstPtrs findInOutpus(const Names & names) const;
NodeRawConstPtrs findInOutputs(const Names & names) const;
/// Find first node with the same name in output nodes and replace it.
/// If was not found, add node to outputs end.
@ -436,7 +436,7 @@ public:
/// Returns a list of nodes representing atomic predicates.
static NodeRawConstPtrs extractConjunctionAtoms(const Node * predicate);
/// Get a list of nodes. For every node, check if it can be compused using allowed subset of inputs.
/// Get a list of nodes. For every node, check if it can be computed using allowed subset of inputs.
/// Returns only those nodes from the list which can be computed.
static NodeRawConstPtrs filterNodesByAllowedInputs(
NodeRawConstPtrs nodes,

View File

@ -59,6 +59,18 @@ class CompiledAggregateFunctionsHolder;
class NativeWriter;
struct OutputBlockColumns;
struct GroupingSetsParams
{
GroupingSetsParams() = default;
GroupingSetsParams(Names used_keys_, Names missing_keys_) : used_keys(std::move(used_keys_)), missing_keys(std::move(missing_keys_)) { }
Names used_keys;
Names missing_keys;
};
using GroupingSetsParamsList = std::vector<GroupingSetsParams>;
/** How are "total" values calculated with WITH TOTALS?
* (For more details, see TotalsHavingTransform.)
*

View File

@ -33,6 +33,8 @@
#include <Common/SensitiveDataMasker.h>
#include <Common/SipHash.h>
#include <Common/logger_useful.h>
#include <Parsers/ASTExpressionList.h>
#include <Parsers/ASTIdentifier.h>
namespace CurrentMetrics
{
@ -308,6 +310,7 @@ void AsynchronousInsertQueue::preprocessInsertQuery(const ASTPtr & query, const
/* no_squash */ false,
/* no_destination */ false,
/* async_insert */ false);
auto table = interpreter.getTable(insert_query);
auto sample_block = InterpreterInsertQuery::getSampleBlock(insert_query, table, table->getInMemoryMetadataPtr(), query_context);
@ -318,6 +321,10 @@ void AsynchronousInsertQueue::preprocessInsertQuery(const ASTPtr & query, const
/// InterpreterInsertQuery::getTable() -> ITableFunction::execute().
if (insert_query.table_id)
query_context->checkAccess(AccessType::INSERT, insert_query.table_id, sample_block.getNames());
insert_query.columns = std::make_shared<ASTExpressionList>();
for (const auto & column : sample_block)
insert_query.columns->children.push_back(std::make_shared<ASTIdentifier>(column.name));
}
AsynchronousInsertQueue::PushResult
@ -389,6 +396,10 @@ AsynchronousInsertQueue::pushDataChunk(ASTPtr query, DataChunk chunk, ContextPtr
if (data_kind == DataKind::Preprocessed)
insert_query.format = "Native";
/// Query parameters make sense only for format Values.
if (insert_query.format == "Values")
entry->query_parameters = query_context->getQueryParameters();
InsertQuery key{query, query_context->getUserID(), query_context->getCurrentRoles(), settings, data_kind};
InsertDataPtr data_to_process;
std::future<void> insert_future;
@ -692,6 +703,17 @@ catch (...)
tryLogCurrentException("AsynchronousInsertQueue", "Failed to add elements to AsynchronousInsertLog");
}
void convertBlockToHeader(Block & block, const Block & header)
{
auto converting_dag = ActionsDAG::makeConvertingActions(
block.getColumnsWithTypeAndName(),
header.getColumnsWithTypeAndName(),
ActionsDAG::MatchColumnsMode::Name);
auto converting_actions = std::make_shared<ExpressionActions>(std::move(converting_dag));
converting_actions->execute(block);
}
String serializeQuery(const IAST & query, size_t max_length)
{
return query.hasSecretParts()
@ -787,6 +809,61 @@ try
if (async_insert_log)
log_elements.reserve(data->entries.size());
auto add_entry_to_asynchronous_insert_log = [&, query_by_format = NameToNameMap{}](
const InsertData::EntryPtr & entry,
const String & parsing_exception,
size_t num_rows,
size_t num_bytes) mutable
{
if (!async_insert_log)
return;
AsynchronousInsertLogElement elem;
elem.event_time = timeInSeconds(entry->create_time);
elem.event_time_microseconds = timeInMicroseconds(entry->create_time);
elem.database = query_database;
elem.table = query_table;
elem.format = entry->format;
elem.query_id = entry->query_id;
elem.bytes = num_bytes;
elem.rows = num_rows;
elem.exception = parsing_exception;
elem.data_kind = entry->chunk.getDataKind();
elem.timeout_milliseconds = data->timeout_ms.count();
elem.flush_query_id = insert_query_id;
auto get_query_by_format = [&](const String & format) -> const String &
{
auto [it, inserted] = query_by_format.try_emplace(format);
if (!inserted)
return it->second;
auto query = key.query->clone();
assert_cast<ASTInsertQuery &>(*query).format = format;
it->second = serializeQuery(*query, insert_context->getSettingsRef().log_queries_cut_to_length);
return it->second;
};
if (entry->chunk.getDataKind() == DataKind::Parsed)
elem.query_for_logging = key.query_str;
else
elem.query_for_logging = get_query_by_format(entry->format);
/// If there was a parsing error,
/// the entry won't be flushed anyway,
/// so add the log element immediately.
if (!elem.exception.empty())
{
elem.status = AsynchronousInsertLogElement::ParsingError;
async_insert_log->add(std::move(elem));
}
else
{
elem.status = AsynchronousInsertLogElement::Ok;
log_elements.push_back(std::move(elem));
}
};
try
{
interpreter = std::make_unique<InterpreterInsertQuery>(
@ -815,49 +892,20 @@ try
catch (...)
{
logExceptionBeforeStart(query_for_logging, insert_context, key.query, query_span, start_watch.elapsedMilliseconds());
if (async_insert_log)
{
for (const auto & entry : data->entries)
add_entry_to_asynchronous_insert_log(entry, /*parsing_exception=*/ "", /*num_rows=*/ 0, entry->chunk.byteSize());
auto exception = getCurrentExceptionMessage(false);
auto flush_time = std::chrono::system_clock::now();
appendElementsToLogSafe(*async_insert_log, std::move(log_elements), flush_time, exception);
}
throw;
}
auto add_entry_to_asynchronous_insert_log = [&](const auto & entry,
const auto & entry_query_for_logging,
const auto & exception,
size_t num_rows,
size_t num_bytes,
Milliseconds timeout_ms)
{
if (!async_insert_log)
return;
AsynchronousInsertLogElement elem;
elem.event_time = timeInSeconds(entry->create_time);
elem.event_time_microseconds = timeInMicroseconds(entry->create_time);
elem.query_for_logging = entry_query_for_logging;
elem.database = query_database;
elem.table = query_table;
elem.format = entry->format;
elem.query_id = entry->query_id;
elem.bytes = num_bytes;
elem.rows = num_rows;
elem.exception = exception;
elem.data_kind = entry->chunk.getDataKind();
elem.timeout_milliseconds = timeout_ms.count();
elem.flush_query_id = insert_query_id;
/// If there was a parsing error,
/// the entry won't be flushed anyway,
/// so add the log element immediately.
if (!elem.exception.empty())
{
elem.status = AsynchronousInsertLogElement::ParsingError;
async_insert_log->add(std::move(elem));
}
else
{
log_elements.push_back(elem);
}
};
auto finish_entries = [&]
auto finish_entries = [&](size_t num_rows, size_t num_bytes)
{
for (const auto & entry : data->entries)
{
@ -870,20 +918,7 @@ try
auto flush_time = std::chrono::system_clock::now();
appendElementsToLogSafe(*async_insert_log, std::move(log_elements), flush_time, "");
}
};
Chunk chunk;
auto header = pipeline.getHeader();
if (key.data_kind == DataKind::Parsed)
chunk = processEntriesWithParsing(key, data, header, insert_context, log, add_entry_to_asynchronous_insert_log);
else
chunk = processPreprocessedEntries(key, data, header, insert_context, add_entry_to_asynchronous_insert_log);
ProfileEvents::increment(ProfileEvents::AsyncInsertRows, chunk.getNumRows());
auto log_and_add_finish_to_query_log = [&](size_t num_rows, size_t num_bytes)
{
LOG_DEBUG(log, "Flushed {} rows, {} bytes for query '{}'", num_rows, num_bytes, key.query_str);
queue_shard_flush_time_history.updateWithCurrentTime();
@ -892,16 +927,24 @@ try
query_log_elem, insert_context, key.query, pipeline, pulling_pipeline, query_span, QueryCache::Usage::None, internal);
};
if (chunk.getNumRows() == 0)
{
finish_entries();
log_and_add_finish_to_query_log(0, 0);
return;
}
try
{
Chunk chunk;
auto header = pipeline.getHeader();
if (key.data_kind == DataKind::Parsed)
chunk = processEntriesWithParsing(key, data, header, insert_context, log, add_entry_to_asynchronous_insert_log);
else
chunk = processPreprocessedEntries(data, header, add_entry_to_asynchronous_insert_log);
ProfileEvents::increment(ProfileEvents::AsyncInsertRows, chunk.getNumRows());
if (chunk.getNumRows() == 0)
{
finish_entries(/*num_rows=*/ 0, /*num_bytes=*/ 0);
return;
}
size_t num_rows = chunk.getNumRows();
size_t num_bytes = chunk.bytes();
@ -911,7 +954,7 @@ try
CompletedPipelineExecutor completed_executor(pipeline);
completed_executor.execute();
log_and_add_finish_to_query_log(num_rows, num_bytes);
finish_entries(num_rows, num_bytes);
}
catch (...)
{
@ -925,8 +968,6 @@ try
}
throw;
}
finish_entries();
}
catch (const Exception & e)
{
@ -987,7 +1028,6 @@ Chunk AsynchronousInsertQueue::processEntriesWithParsing(
StreamingFormatExecutor executor(header, format, std::move(on_error), std::move(adding_defaults_transform));
auto chunk_info = std::make_shared<AsyncInsertInfo>();
auto query_for_logging = serializeQuery(*key.query, insert_context->getSettingsRef().log_queries_cut_to_length);
for (const auto & entry : data->entries)
{
@ -999,16 +1039,23 @@ Chunk AsynchronousInsertQueue::processEntriesWithParsing(
"Expected entry with data kind Parsed. Got: {}", entry->chunk.getDataKind());
auto buffer = std::make_unique<ReadBufferFromString>(*bytes);
executor.setQueryParameters(entry->query_parameters);
size_t num_bytes = bytes->size();
size_t num_rows = executor.execute(*buffer);
total_rows += num_rows;
chunk_info->offsets.push_back(total_rows);
chunk_info->tokens.push_back(entry->async_dedup_token);
add_to_async_insert_log(entry, query_for_logging, current_exception, num_rows, num_bytes, data->timeout_ms);
/// For some reason, client can pass zero rows and bytes to server.
/// We don't update offsets in this case, because we assume every insert has some rows during dedup
/// but we have nothing to deduplicate for this insert.
if (num_rows > 0)
{
chunk_info->offsets.push_back(total_rows);
chunk_info->tokens.push_back(entry->async_dedup_token);
}
add_to_async_insert_log(entry, current_exception, num_rows, num_bytes);
current_exception.clear();
entry->resetChunk();
}
@ -1020,30 +1067,14 @@ Chunk AsynchronousInsertQueue::processEntriesWithParsing(
template <typename LogFunc>
Chunk AsynchronousInsertQueue::processPreprocessedEntries(
const InsertQuery & key,
const InsertDataPtr & data,
const Block & header,
const ContextPtr & insert_context,
LogFunc && add_to_async_insert_log)
{
size_t total_rows = 0;
auto chunk_info = std::make_shared<AsyncInsertInfo>();
auto result_columns = header.cloneEmptyColumns();
std::unordered_map<String, String> format_to_query;
auto get_query_by_format = [&](const String & format) -> const String &
{
auto [it, inserted] = format_to_query.try_emplace(format);
if (!inserted)
return it->second;
auto query = key.query->clone();
assert_cast<ASTInsertQuery &>(*query).format = format;
it->second = serializeQuery(*query, insert_context->getSettingsRef().log_queries_cut_to_length);
return it->second;
};
for (const auto & entry : data->entries)
{
const auto * block = entry->chunk.asBlock();
@ -1051,17 +1082,26 @@ Chunk AsynchronousInsertQueue::processPreprocessedEntries(
throw Exception(ErrorCodes::LOGICAL_ERROR,
"Expected entry with data kind Preprocessed. Got: {}", entry->chunk.getDataKind());
auto columns = block->getColumns();
Block block_to_insert = *block;
if (!isCompatibleHeader(block_to_insert, header))
convertBlockToHeader(block_to_insert, header);
auto columns = block_to_insert.getColumns();
for (size_t i = 0, s = columns.size(); i < s; ++i)
result_columns[i]->insertRangeFrom(*columns[i], 0, columns[i]->size());
total_rows += block->rows();
chunk_info->offsets.push_back(total_rows);
chunk_info->tokens.push_back(entry->async_dedup_token);
total_rows += block_to_insert.rows();
const auto & query_for_logging = get_query_by_format(entry->format);
add_to_async_insert_log(entry, query_for_logging, "", block->rows(), block->bytes(), data->timeout_ms);
/// For some reason, client can pass zero rows and bytes to server.
/// We don't update offsets in this case, because we assume every insert has some rows during dedup,
/// but we have nothing to deduplicate for this insert.
if (block_to_insert.rows() > 0)
{
chunk_info->offsets.push_back(total_rows);
chunk_info->tokens.push_back(entry->async_dedup_token);
}
add_to_async_insert_log(entry, /*parsing_exception=*/ "", block_to_insert.rows(), block_to_insert.bytes());
entry->resetChunk();
}

View File

@ -147,6 +147,7 @@ private:
const String format;
MemoryTracker * const user_memory_tracker;
const std::chrono::time_point<std::chrono::system_clock> create_time;
NameToNameMap query_parameters;
Entry(
DataChunk && chunk_,
@ -287,10 +288,8 @@ private:
template <typename LogFunc>
static Chunk processPreprocessedEntries(
const InsertQuery & key,
const InsertDataPtr & data,
const Block & header,
const ContextPtr & insert_context,
LogFunc && add_to_async_insert_log);
template <typename E>

View File

@ -893,6 +893,12 @@ ContextData::ContextData(const ContextData &o) :
{
}
void ContextData::resetSharedContext()
{
std::lock_guard<std::mutex> lock(mutex_shared_context);
shared = nullptr;
}
Context::Context() = default;
Context::Context(const Context & rhs) : ContextData(rhs), std::enable_shared_from_this<Context>(rhs) {}
@ -914,14 +920,6 @@ ContextMutablePtr Context::createGlobal(ContextSharedPart * shared_part)
return res;
}
void Context::initGlobal()
{
assert(!global_context_instance);
global_context_instance = shared_from_this();
DatabaseCatalog::init(shared_from_this());
EventNotifier::init();
}
SharedContextHolder Context::createShared()
{
return SharedContextHolder(std::make_unique<ContextSharedPart>());
@ -2692,7 +2690,11 @@ void Context::makeSessionContext()
void Context::makeGlobalContext()
{
initGlobal();
assert(!global_context_instance);
global_context_instance = shared_from_this();
DatabaseCatalog::init(shared_from_this());
EventNotifier::init();
global_context = shared_from_this();
}
@ -4088,8 +4090,13 @@ void Context::initializeTraceCollector()
}
/// Call after unexpected crash happen.
void Context::handleCrash() const TSA_NO_THREAD_SAFETY_ANALYSIS
void Context::handleCrash() const
{
std::lock_guard<std::mutex> lock(mutex_shared_context);
if (!shared)
return;
SharedLockGuard lock2(shared->mutex);
if (shared->system_logs)
shared->system_logs->handleCrash();
}

View File

@ -492,6 +492,8 @@ public:
KitchenSink kitchen_sink;
void resetSharedContext();
protected:
using SampleBlockCache = std::unordered_map<std::string, Block>;
mutable SampleBlockCache sample_block_cache;
@ -529,6 +531,10 @@ protected:
mutable ThrottlerPtr local_write_query_throttler; /// A query-wide throttler for local IO writes
mutable ThrottlerPtr backups_query_throttler; /// A query-wide throttler for BACKUPs
mutable std::mutex mutex_shared_context; /// mutex to avoid accessing destroyed shared context pointer
/// some Context methods can be called after the shared context is destroyed
/// example, Context::handleCrash() method - called from signal handler
};
/** A set of known objects that can be used in the query.
@ -1387,8 +1393,6 @@ private:
ExternalUserDefinedExecutableFunctionsLoader & getExternalUserDefinedExecutableFunctionsLoaderWithLock(const std::lock_guard<std::mutex> & lock);
void initGlobal();
void setUserID(const UUID & user_id_);
void setCurrentRolesImpl(const std::vector<UUID> & new_current_roles, bool throw_if_not_granted, bool skip_if_not_granted, const std::shared_ptr<const User> & user);

View File

@ -821,6 +821,19 @@ InterpreterCreateQuery::TableProperties InterpreterCreateQuery::getTableProperti
{
properties.indices = as_storage_metadata->getSecondaryIndices();
properties.projections = as_storage_metadata->getProjections().clone();
/// CREATE TABLE AS should copy PRIMARY KEY, ORDER BY, and similar clauses.
if (!create.storage->primary_key && as_storage_metadata->isPrimaryKeyDefined() && as_storage_metadata->hasPrimaryKey())
create.storage->set(create.storage->primary_key, as_storage_metadata->getPrimaryKeyAST()->clone());
if (!create.storage->partition_by && as_storage_metadata->isPartitionKeyDefined() && as_storage_metadata->hasPartitionKey())
create.storage->set(create.storage->partition_by, as_storage_metadata->getPartitionKeyAST()->clone());
if (!create.storage->order_by && as_storage_metadata->isSortingKeyDefined() && as_storage_metadata->hasSortingKey())
create.storage->set(create.storage->order_by, as_storage_metadata->getSortingKeyAST()->clone());
if (!create.storage->sample_by && as_storage_metadata->isSamplingKeyDefined() && as_storage_metadata->hasSamplingKey())
create.storage->set(create.storage->sample_by, as_storage_metadata->getSamplingKeyAST()->clone());
}
else
{

View File

@ -107,7 +107,9 @@ BlockIO InterpreterDeleteQuery::execute()
String alter_query =
"ALTER TABLE " + table->getStorageID().getFullTableName()
+ (delete_query.cluster.empty() ? "" : " ON CLUSTER " + backQuoteIfNeed(delete_query.cluster))
+ " UPDATE `_row_exists` = 0 WHERE " + serializeAST(*delete_query.predicate);
+ " UPDATE `_row_exists` = 0"
+ (delete_query.partition ? " IN PARTITION " + serializeAST(*delete_query.partition) : "")
+ " WHERE " + serializeAST(*delete_query.predicate);
ParserAlterQuery parser;
ASTPtr alter_ast = parseQuery(

Some files were not shown because too many files have changed in this diff Show More