mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-10 01:25:21 +00:00
Merge branch 'master' into pufit/correct-error-for-alter-modify-definer
This commit is contained in:
commit
32153d6e0d
2
.gitmodules
vendored
2
.gitmodules
vendored
@ -341,7 +341,7 @@
|
||||
url = https://github.com/graphitemaster/incbin.git
|
||||
[submodule "contrib/usearch"]
|
||||
path = contrib/usearch
|
||||
url = https://github.com/unum-cloud/usearch.git
|
||||
url = https://github.com/ClickHouse/usearch.git
|
||||
[submodule "contrib/SimSIMD"]
|
||||
path = contrib/SimSIMD
|
||||
url = https://github.com/ashvardanian/SimSIMD.git
|
||||
|
@ -21,6 +21,7 @@
|
||||
#include "Poco/Exception.h"
|
||||
#include "Poco/Foundation.h"
|
||||
#include "Poco/Mutex.h"
|
||||
#include "Poco/Message.h"
|
||||
|
||||
|
||||
namespace Poco
|
||||
@ -78,6 +79,10 @@ public:
|
||||
///
|
||||
/// The default implementation just breaks into the debugger.
|
||||
|
||||
virtual void logMessageImpl(Message::Priority priority, const std::string & msg) {}
|
||||
/// Write a messages to the log
|
||||
/// Useful for logging from Poco
|
||||
|
||||
static void handle(const Exception & exc);
|
||||
/// Invokes the currently registered ErrorHandler.
|
||||
|
||||
@ -87,6 +92,9 @@ public:
|
||||
static void handle();
|
||||
/// Invokes the currently registered ErrorHandler.
|
||||
|
||||
static void logMessage(Message::Priority priority, const std::string & msg);
|
||||
/// Invokes the currently registered ErrorHandler to log a message.
|
||||
|
||||
static ErrorHandler * set(ErrorHandler * pHandler);
|
||||
/// Registers the given handler as the current error handler.
|
||||
///
|
||||
|
@ -8,7 +8,7 @@
|
||||
// Copyright (c) 2005-2006, Applied Informatics Software Engineering GmbH.
|
||||
// and Contributors.
|
||||
//
|
||||
// SPDX-License-Identifier: BSL-1.0
|
||||
// SPDX-License-Identifier: BSL-1.0
|
||||
//
|
||||
|
||||
|
||||
@ -35,79 +35,91 @@ ErrorHandler::~ErrorHandler()
|
||||
|
||||
void ErrorHandler::exception(const Exception& exc)
|
||||
{
|
||||
poco_debugger_msg(exc.what());
|
||||
poco_debugger_msg(exc.what());
|
||||
}
|
||||
|
||||
|
||||
|
||||
void ErrorHandler::exception(const std::exception& exc)
|
||||
{
|
||||
poco_debugger_msg(exc.what());
|
||||
poco_debugger_msg(exc.what());
|
||||
}
|
||||
|
||||
|
||||
void ErrorHandler::exception()
|
||||
{
|
||||
poco_debugger_msg("unknown exception");
|
||||
poco_debugger_msg("unknown exception");
|
||||
}
|
||||
|
||||
|
||||
void ErrorHandler::handle(const Exception& exc)
|
||||
{
|
||||
FastMutex::ScopedLock lock(_mutex);
|
||||
try
|
||||
{
|
||||
_pHandler->exception(exc);
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
}
|
||||
FastMutex::ScopedLock lock(_mutex);
|
||||
try
|
||||
{
|
||||
_pHandler->exception(exc);
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
void ErrorHandler::handle(const std::exception& exc)
|
||||
{
|
||||
FastMutex::ScopedLock lock(_mutex);
|
||||
try
|
||||
{
|
||||
_pHandler->exception(exc);
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
}
|
||||
FastMutex::ScopedLock lock(_mutex);
|
||||
try
|
||||
{
|
||||
_pHandler->exception(exc);
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void ErrorHandler::handle()
|
||||
{
|
||||
FastMutex::ScopedLock lock(_mutex);
|
||||
try
|
||||
{
|
||||
_pHandler->exception();
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
}
|
||||
FastMutex::ScopedLock lock(_mutex);
|
||||
try
|
||||
{
|
||||
_pHandler->exception();
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
}
|
||||
}
|
||||
|
||||
void ErrorHandler::logMessage(Message::Priority priority, const std::string & msg)
|
||||
{
|
||||
FastMutex::ScopedLock lock(_mutex);
|
||||
try
|
||||
{
|
||||
_pHandler->logMessageImpl(priority, msg);
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
ErrorHandler* ErrorHandler::set(ErrorHandler* pHandler)
|
||||
{
|
||||
poco_check_ptr(pHandler);
|
||||
poco_check_ptr(pHandler);
|
||||
|
||||
FastMutex::ScopedLock lock(_mutex);
|
||||
ErrorHandler* pOld = _pHandler;
|
||||
_pHandler = pHandler;
|
||||
return pOld;
|
||||
FastMutex::ScopedLock lock(_mutex);
|
||||
ErrorHandler* pOld = _pHandler;
|
||||
_pHandler = pHandler;
|
||||
return pOld;
|
||||
}
|
||||
|
||||
|
||||
ErrorHandler* ErrorHandler::defaultHandler()
|
||||
{
|
||||
// NOTE: Since this is called to initialize the static _pHandler
|
||||
// variable, sh has to be a local static, otherwise we run
|
||||
// into static initialization order issues.
|
||||
static SingletonHolder<ErrorHandler> sh;
|
||||
return sh.get();
|
||||
// NOTE: Since this is called to initialize the static _pHandler
|
||||
// variable, sh has to be a local static, otherwise we run
|
||||
// into static initialization order issues.
|
||||
static SingletonHolder<ErrorHandler> sh;
|
||||
return sh.get();
|
||||
}
|
||||
|
||||
|
||||
|
@ -17,6 +17,7 @@
|
||||
#include "Poco/Net/StreamSocketImpl.h"
|
||||
#include "Poco/NumberFormatter.h"
|
||||
#include "Poco/Timestamp.h"
|
||||
#include "Poco/ErrorHandler.h"
|
||||
#include <string.h> // FD_SET needs memset on some platforms, so we can't use <cstring>
|
||||
|
||||
|
||||
|
@ -8,7 +8,7 @@
|
||||
// Copyright (c) 2005-2006, Applied Informatics Software Engineering GmbH.
|
||||
// and Contributors.
|
||||
//
|
||||
// SPDX-License-Identifier: BSL-1.0
|
||||
// SPDX-License-Identifier: BSL-1.0
|
||||
//
|
||||
|
||||
|
||||
@ -44,190 +44,194 @@ TCPServerConnectionFilter::~TCPServerConnectionFilter()
|
||||
|
||||
|
||||
TCPServer::TCPServer(TCPServerConnectionFactory::Ptr pFactory, Poco::UInt16 portNumber, TCPServerParams::Ptr pParams):
|
||||
_socket(ServerSocket(portNumber)),
|
||||
_thread(threadName(_socket)),
|
||||
_stopped(true)
|
||||
{
|
||||
Poco::ThreadPool& pool = Poco::ThreadPool::defaultPool();
|
||||
if (pParams)
|
||||
{
|
||||
int toAdd = pParams->getMaxThreads() - pool.capacity();
|
||||
if (toAdd > 0) pool.addCapacity(toAdd);
|
||||
}
|
||||
_pDispatcher = new TCPServerDispatcher(pFactory, pool, pParams);
|
||||
|
||||
_socket(ServerSocket(portNumber)),
|
||||
_thread(threadName(_socket)),
|
||||
_stopped(true)
|
||||
{
|
||||
Poco::ThreadPool& pool = Poco::ThreadPool::defaultPool();
|
||||
if (pParams)
|
||||
{
|
||||
int toAdd = pParams->getMaxThreads() - pool.capacity();
|
||||
if (toAdd > 0) pool.addCapacity(toAdd);
|
||||
}
|
||||
_pDispatcher = new TCPServerDispatcher(pFactory, pool, pParams);
|
||||
|
||||
}
|
||||
|
||||
|
||||
TCPServer::TCPServer(TCPServerConnectionFactory::Ptr pFactory, const ServerSocket& socket, TCPServerParams::Ptr pParams):
|
||||
_socket(socket),
|
||||
_thread(threadName(socket)),
|
||||
_stopped(true)
|
||||
_socket(socket),
|
||||
_thread(threadName(socket)),
|
||||
_stopped(true)
|
||||
{
|
||||
Poco::ThreadPool& pool = Poco::ThreadPool::defaultPool();
|
||||
if (pParams)
|
||||
{
|
||||
int toAdd = pParams->getMaxThreads() - pool.capacity();
|
||||
if (toAdd > 0) pool.addCapacity(toAdd);
|
||||
}
|
||||
_pDispatcher = new TCPServerDispatcher(pFactory, pool, pParams);
|
||||
Poco::ThreadPool& pool = Poco::ThreadPool::defaultPool();
|
||||
if (pParams)
|
||||
{
|
||||
int toAdd = pParams->getMaxThreads() - pool.capacity();
|
||||
if (toAdd > 0) pool.addCapacity(toAdd);
|
||||
}
|
||||
_pDispatcher = new TCPServerDispatcher(pFactory, pool, pParams);
|
||||
}
|
||||
|
||||
|
||||
TCPServer::TCPServer(TCPServerConnectionFactory::Ptr pFactory, Poco::ThreadPool& threadPool, const ServerSocket& socket, TCPServerParams::Ptr pParams):
|
||||
_socket(socket),
|
||||
_pDispatcher(new TCPServerDispatcher(pFactory, threadPool, pParams)),
|
||||
_thread(threadName(socket)),
|
||||
_stopped(true)
|
||||
_socket(socket),
|
||||
_pDispatcher(new TCPServerDispatcher(pFactory, threadPool, pParams)),
|
||||
_thread(threadName(socket)),
|
||||
_stopped(true)
|
||||
{
|
||||
}
|
||||
|
||||
|
||||
TCPServer::~TCPServer()
|
||||
{
|
||||
try
|
||||
{
|
||||
stop();
|
||||
_pDispatcher->release();
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
poco_unexpected();
|
||||
}
|
||||
try
|
||||
{
|
||||
stop();
|
||||
_pDispatcher->release();
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
poco_unexpected();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
const TCPServerParams& TCPServer::params() const
|
||||
{
|
||||
return _pDispatcher->params();
|
||||
return _pDispatcher->params();
|
||||
}
|
||||
|
||||
|
||||
void TCPServer::start()
|
||||
{
|
||||
poco_assert (_stopped);
|
||||
poco_assert (_stopped);
|
||||
|
||||
_stopped = false;
|
||||
_thread.start(*this);
|
||||
_stopped = false;
|
||||
_thread.start(*this);
|
||||
}
|
||||
|
||||
|
||||
|
||||
void TCPServer::stop()
|
||||
{
|
||||
if (!_stopped)
|
||||
{
|
||||
_stopped = true;
|
||||
_thread.join();
|
||||
_pDispatcher->stop();
|
||||
}
|
||||
if (!_stopped)
|
||||
{
|
||||
_stopped = true;
|
||||
_thread.join();
|
||||
_pDispatcher->stop();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void TCPServer::run()
|
||||
{
|
||||
while (!_stopped)
|
||||
{
|
||||
Poco::Timespan timeout(250000);
|
||||
try
|
||||
{
|
||||
if (_socket.poll(timeout, Socket::SELECT_READ))
|
||||
{
|
||||
try
|
||||
{
|
||||
StreamSocket ss = _socket.acceptConnection();
|
||||
|
||||
if (!_pConnectionFilter || _pConnectionFilter->accept(ss))
|
||||
{
|
||||
// enable nodelay per default: OSX really needs that
|
||||
while (!_stopped)
|
||||
{
|
||||
Poco::Timespan timeout(250000);
|
||||
try
|
||||
{
|
||||
if (_socket.poll(timeout, Socket::SELECT_READ))
|
||||
{
|
||||
try
|
||||
{
|
||||
StreamSocket ss = _socket.acceptConnection();
|
||||
|
||||
if (!_pConnectionFilter || _pConnectionFilter->accept(ss))
|
||||
{
|
||||
// enable nodelay per default: OSX really needs that
|
||||
#if defined(POCO_OS_FAMILY_UNIX)
|
||||
if (ss.address().family() != AddressFamily::UNIX_LOCAL)
|
||||
if (ss.address().family() != AddressFamily::UNIX_LOCAL)
|
||||
#endif
|
||||
{
|
||||
ss.setNoDelay(true);
|
||||
}
|
||||
_pDispatcher->enqueue(ss);
|
||||
}
|
||||
}
|
||||
catch (Poco::Exception& exc)
|
||||
{
|
||||
ErrorHandler::handle(exc);
|
||||
}
|
||||
catch (std::exception& exc)
|
||||
{
|
||||
ErrorHandler::handle(exc);
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
ErrorHandler::handle();
|
||||
}
|
||||
}
|
||||
}
|
||||
catch (Poco::Exception& exc)
|
||||
{
|
||||
ErrorHandler::handle(exc);
|
||||
// possibly a resource issue since poll() failed;
|
||||
// give some time to recover before trying again
|
||||
Poco::Thread::sleep(50);
|
||||
}
|
||||
}
|
||||
{
|
||||
ss.setNoDelay(true);
|
||||
}
|
||||
_pDispatcher->enqueue(ss);
|
||||
}
|
||||
else
|
||||
{
|
||||
ErrorHandler::logMessage(Message::PRIO_WARNING, "Filtered out connection from " + ss.peerAddress().toString());
|
||||
}
|
||||
}
|
||||
catch (Poco::Exception& exc)
|
||||
{
|
||||
ErrorHandler::handle(exc);
|
||||
}
|
||||
catch (std::exception& exc)
|
||||
{
|
||||
ErrorHandler::handle(exc);
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
ErrorHandler::handle();
|
||||
}
|
||||
}
|
||||
}
|
||||
catch (Poco::Exception& exc)
|
||||
{
|
||||
ErrorHandler::handle(exc);
|
||||
// possibly a resource issue since poll() failed;
|
||||
// give some time to recover before trying again
|
||||
Poco::Thread::sleep(50);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
int TCPServer::currentThreads() const
|
||||
{
|
||||
return _pDispatcher->currentThreads();
|
||||
return _pDispatcher->currentThreads();
|
||||
}
|
||||
|
||||
|
||||
int TCPServer::maxThreads() const
|
||||
{
|
||||
return _pDispatcher->maxThreads();
|
||||
return _pDispatcher->maxThreads();
|
||||
}
|
||||
|
||||
|
||||
|
||||
int TCPServer::totalConnections() const
|
||||
{
|
||||
return _pDispatcher->totalConnections();
|
||||
return _pDispatcher->totalConnections();
|
||||
}
|
||||
|
||||
|
||||
int TCPServer::currentConnections() const
|
||||
{
|
||||
return _pDispatcher->currentConnections();
|
||||
return _pDispatcher->currentConnections();
|
||||
}
|
||||
|
||||
|
||||
int TCPServer::maxConcurrentConnections() const
|
||||
{
|
||||
return _pDispatcher->maxConcurrentConnections();
|
||||
return _pDispatcher->maxConcurrentConnections();
|
||||
}
|
||||
|
||||
|
||||
|
||||
int TCPServer::queuedConnections() const
|
||||
{
|
||||
return _pDispatcher->queuedConnections();
|
||||
return _pDispatcher->queuedConnections();
|
||||
}
|
||||
|
||||
|
||||
int TCPServer::refusedConnections() const
|
||||
{
|
||||
return _pDispatcher->refusedConnections();
|
||||
return _pDispatcher->refusedConnections();
|
||||
}
|
||||
|
||||
|
||||
void TCPServer::setConnectionFilter(const TCPServerConnectionFilter::Ptr& pConnectionFilter)
|
||||
{
|
||||
poco_assert (_stopped);
|
||||
poco_assert (_stopped);
|
||||
|
||||
_pConnectionFilter = pConnectionFilter;
|
||||
_pConnectionFilter = pConnectionFilter;
|
||||
}
|
||||
|
||||
|
||||
std::string TCPServer::threadName(const ServerSocket& socket)
|
||||
{
|
||||
std::string name("TCPServer: ");
|
||||
name.append(socket.address().toString());
|
||||
return name;
|
||||
std::string name("TCPServer: ");
|
||||
name.append(socket.address().toString());
|
||||
return name;
|
||||
|
||||
}
|
||||
|
||||
|
@ -8,7 +8,7 @@
|
||||
// Copyright (c) 2005-2007, Applied Informatics Software Engineering GmbH.
|
||||
// and Contributors.
|
||||
//
|
||||
// SPDX-License-Identifier: BSL-1.0
|
||||
// SPDX-License-Identifier: BSL-1.0
|
||||
//
|
||||
|
||||
|
||||
@ -33,44 +33,44 @@ namespace Net {
|
||||
class TCPConnectionNotification: public Notification
|
||||
{
|
||||
public:
|
||||
TCPConnectionNotification(const StreamSocket& socket):
|
||||
_socket(socket)
|
||||
{
|
||||
}
|
||||
|
||||
~TCPConnectionNotification()
|
||||
{
|
||||
}
|
||||
|
||||
const StreamSocket& socket() const
|
||||
{
|
||||
return _socket;
|
||||
}
|
||||
TCPConnectionNotification(const StreamSocket& socket):
|
||||
_socket(socket)
|
||||
{
|
||||
}
|
||||
|
||||
~TCPConnectionNotification()
|
||||
{
|
||||
}
|
||||
|
||||
const StreamSocket& socket() const
|
||||
{
|
||||
return _socket;
|
||||
}
|
||||
|
||||
private:
|
||||
StreamSocket _socket;
|
||||
StreamSocket _socket;
|
||||
};
|
||||
|
||||
|
||||
TCPServerDispatcher::TCPServerDispatcher(TCPServerConnectionFactory::Ptr pFactory, Poco::ThreadPool& threadPool, TCPServerParams::Ptr pParams):
|
||||
_rc(1),
|
||||
_pParams(pParams),
|
||||
_currentThreads(0),
|
||||
_totalConnections(0),
|
||||
_currentConnections(0),
|
||||
_maxConcurrentConnections(0),
|
||||
_refusedConnections(0),
|
||||
_stopped(false),
|
||||
_pConnectionFactory(pFactory),
|
||||
_threadPool(threadPool)
|
||||
_rc(1),
|
||||
_pParams(pParams),
|
||||
_currentThreads(0),
|
||||
_totalConnections(0),
|
||||
_currentConnections(0),
|
||||
_maxConcurrentConnections(0),
|
||||
_refusedConnections(0),
|
||||
_stopped(false),
|
||||
_pConnectionFactory(pFactory),
|
||||
_threadPool(threadPool)
|
||||
{
|
||||
poco_check_ptr (pFactory);
|
||||
poco_check_ptr (pFactory);
|
||||
|
||||
if (!_pParams)
|
||||
_pParams = new TCPServerParams;
|
||||
|
||||
if (_pParams->getMaxThreads() == 0)
|
||||
_pParams->setMaxThreads(threadPool.capacity());
|
||||
if (!_pParams)
|
||||
_pParams = new TCPServerParams;
|
||||
|
||||
if (_pParams->getMaxThreads() == 0)
|
||||
_pParams->setMaxThreads(threadPool.capacity());
|
||||
}
|
||||
|
||||
|
||||
@ -81,161 +81,184 @@ TCPServerDispatcher::~TCPServerDispatcher()
|
||||
|
||||
void TCPServerDispatcher::duplicate()
|
||||
{
|
||||
++_rc;
|
||||
++_rc;
|
||||
}
|
||||
|
||||
|
||||
void TCPServerDispatcher::release()
|
||||
{
|
||||
if (--_rc == 0) delete this;
|
||||
if (--_rc == 0) delete this;
|
||||
}
|
||||
|
||||
|
||||
void TCPServerDispatcher::run()
|
||||
{
|
||||
AutoPtr<TCPServerDispatcher> guard(this); // ensure object stays alive
|
||||
AutoPtr<TCPServerDispatcher> guard(this); // ensure object stays alive
|
||||
|
||||
int idleTime = (int) _pParams->getThreadIdleTime().totalMilliseconds();
|
||||
int idleTime = (int) _pParams->getThreadIdleTime().totalMilliseconds();
|
||||
|
||||
for (;;)
|
||||
{
|
||||
try
|
||||
{
|
||||
AutoPtr<Notification> pNf = _queue.waitDequeueNotification(idleTime);
|
||||
if (pNf && !_stopped)
|
||||
{
|
||||
TCPConnectionNotification* pCNf = dynamic_cast<TCPConnectionNotification*>(pNf.get());
|
||||
if (pCNf)
|
||||
{
|
||||
beginConnection();
|
||||
if (!_stopped)
|
||||
{
|
||||
std::unique_ptr<TCPServerConnection> pConnection(_pConnectionFactory->createConnection(pCNf->socket()));
|
||||
poco_check_ptr(pConnection.get());
|
||||
pConnection->start();
|
||||
}
|
||||
/// endConnection() should be called after destroying TCPServerConnection,
|
||||
/// otherwise currentConnections() could become zero while some connections are yet still alive.
|
||||
endConnection();
|
||||
}
|
||||
}
|
||||
}
|
||||
catch (Poco::Exception &exc) { ErrorHandler::handle(exc); }
|
||||
catch (std::exception &exc) { ErrorHandler::handle(exc); }
|
||||
catch (...) { ErrorHandler::handle(); }
|
||||
FastMutex::ScopedLock lock(_mutex);
|
||||
if (_stopped || (_currentThreads > 1 && _queue.empty()))
|
||||
{
|
||||
--_currentThreads;
|
||||
break;
|
||||
}
|
||||
}
|
||||
for (;;)
|
||||
{
|
||||
try
|
||||
{
|
||||
AutoPtr<Notification> pNf = _queue.waitDequeueNotification(idleTime);
|
||||
if (pNf && !_stopped)
|
||||
{
|
||||
TCPConnectionNotification* pCNf = dynamic_cast<TCPConnectionNotification*>(pNf.get());
|
||||
if (pCNf)
|
||||
{
|
||||
beginConnection();
|
||||
if (!_stopped)
|
||||
{
|
||||
std::unique_ptr<TCPServerConnection> pConnection(_pConnectionFactory->createConnection(pCNf->socket()));
|
||||
poco_check_ptr(pConnection.get());
|
||||
pConnection->start();
|
||||
}
|
||||
/// endConnection() should be called after destroying TCPServerConnection,
|
||||
/// otherwise currentConnections() could become zero while some connections are yet still alive.
|
||||
endConnection();
|
||||
}
|
||||
}
|
||||
}
|
||||
catch (Poco::Exception &exc) { ErrorHandler::handle(exc); }
|
||||
catch (std::exception &exc) { ErrorHandler::handle(exc); }
|
||||
catch (...) { ErrorHandler::handle(); }
|
||||
FastMutex::ScopedLock lock(_mutex);
|
||||
if (_stopped || (_currentThreads > 1 && _queue.empty()))
|
||||
{
|
||||
--_currentThreads;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
namespace
|
||||
{
|
||||
static const std::string threadName("TCPServerConnection");
|
||||
static const std::string threadName("TCPServerConnection");
|
||||
}
|
||||
|
||||
|
||||
|
||||
void TCPServerDispatcher::enqueue(const StreamSocket& socket)
|
||||
{
|
||||
FastMutex::ScopedLock lock(_mutex);
|
||||
FastMutex::ScopedLock lock(_mutex);
|
||||
|
||||
if (_queue.size() < _pParams->getMaxQueued())
|
||||
{
|
||||
if (!_queue.hasIdleThreads() && _currentThreads < _pParams->getMaxThreads())
|
||||
{
|
||||
try
|
||||
{
|
||||
ErrorHandler::logMessage(Message::PRIO_TEST, "Queue size: " + std::to_string(_queue.size()) +
|
||||
", current threads: " + std::to_string(_currentThreads) +
|
||||
", threads in pool: " + std::to_string(_threadPool.allocated()) +
|
||||
", current connections: " + std::to_string(_currentConnections));
|
||||
|
||||
|
||||
if (_queue.size() < _pParams->getMaxQueued())
|
||||
{
|
||||
/// NOTE: the condition below is wrong.
|
||||
/// Since the thread pool is shared between multiple servers/TCPServerDispatchers,
|
||||
/// _currentThreads < _pParams->getMaxThreads() will be true when the pool is actually saturated.
|
||||
/// As a result, queue is useless and connections never wait in queue.
|
||||
/// Instead, we (mistakenly) think that we can create a thread for this connection, but we fail to create it
|
||||
/// and the connection get rejected.
|
||||
/// We could check _currentThreads < _threadPool.allocated() to make it work,
|
||||
/// but it's not clear if we want to make it work
|
||||
/// because it may be better to reject connection immediately if we don't have resources to handle it.
|
||||
if (!_queue.hasIdleThreads() && _currentThreads < _pParams->getMaxThreads())
|
||||
{
|
||||
try
|
||||
{
|
||||
this->duplicate();
|
||||
_threadPool.startWithPriority(_pParams->getThreadPriority(), *this, threadName);
|
||||
++_currentThreads;
|
||||
}
|
||||
catch (Poco::Exception& exc)
|
||||
{
|
||||
_threadPool.startWithPriority(_pParams->getThreadPriority(), *this, threadName);
|
||||
++_currentThreads;
|
||||
}
|
||||
catch (Poco::Exception& exc)
|
||||
{
|
||||
ErrorHandler::logMessage(Message::PRIO_WARNING, "Got an exception while starting thread for connection from " +
|
||||
socket.peerAddress().toString());
|
||||
ErrorHandler::handle(exc);
|
||||
this->release();
|
||||
++_refusedConnections;
|
||||
std::cerr << "Got exception while starting thread for connection. Error code: "
|
||||
<< exc.code() << ", message: '" << exc.displayText() << "'" << std::endl;
|
||||
return;
|
||||
}
|
||||
}
|
||||
_queue.enqueueNotification(new TCPConnectionNotification(socket));
|
||||
}
|
||||
else
|
||||
{
|
||||
++_refusedConnections;
|
||||
}
|
||||
++_refusedConnections;
|
||||
return;
|
||||
}
|
||||
}
|
||||
else if (!_queue.hasIdleThreads())
|
||||
{
|
||||
ErrorHandler::logMessage(Message::PRIO_TRACE, "Don't have idle threads, adding connection from " +
|
||||
socket.peerAddress().toString() + " to the queue, size: " + std::to_string(_queue.size()));
|
||||
}
|
||||
_queue.enqueueNotification(new TCPConnectionNotification(socket));
|
||||
}
|
||||
else
|
||||
{
|
||||
ErrorHandler::logMessage(Message::PRIO_WARNING, "Refusing connection from " + socket.peerAddress().toString() +
|
||||
", reached max queue size " + std::to_string(_pParams->getMaxQueued()));
|
||||
++_refusedConnections;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void TCPServerDispatcher::stop()
|
||||
{
|
||||
_stopped = true;
|
||||
_queue.clear();
|
||||
_queue.wakeUpAll();
|
||||
_stopped = true;
|
||||
_queue.clear();
|
||||
_queue.wakeUpAll();
|
||||
}
|
||||
|
||||
|
||||
int TCPServerDispatcher::currentThreads() const
|
||||
{
|
||||
return _currentThreads;
|
||||
return _currentThreads;
|
||||
}
|
||||
|
||||
int TCPServerDispatcher::maxThreads() const
|
||||
{
|
||||
FastMutex::ScopedLock lock(_mutex);
|
||||
|
||||
return _threadPool.capacity();
|
||||
FastMutex::ScopedLock lock(_mutex);
|
||||
|
||||
return _threadPool.capacity();
|
||||
}
|
||||
|
||||
|
||||
int TCPServerDispatcher::totalConnections() const
|
||||
{
|
||||
return _totalConnections;
|
||||
return _totalConnections;
|
||||
}
|
||||
|
||||
|
||||
int TCPServerDispatcher::currentConnections() const
|
||||
{
|
||||
return _currentConnections;
|
||||
return _currentConnections;
|
||||
}
|
||||
|
||||
|
||||
int TCPServerDispatcher::maxConcurrentConnections() const
|
||||
{
|
||||
return _maxConcurrentConnections;
|
||||
return _maxConcurrentConnections;
|
||||
}
|
||||
|
||||
|
||||
int TCPServerDispatcher::queuedConnections() const
|
||||
{
|
||||
return _queue.size();
|
||||
return _queue.size();
|
||||
}
|
||||
|
||||
|
||||
int TCPServerDispatcher::refusedConnections() const
|
||||
{
|
||||
return _refusedConnections;
|
||||
return _refusedConnections;
|
||||
}
|
||||
|
||||
|
||||
void TCPServerDispatcher::beginConnection()
|
||||
{
|
||||
FastMutex::ScopedLock lock(_mutex);
|
||||
FastMutex::ScopedLock lock(_mutex);
|
||||
|
||||
++_totalConnections;
|
||||
++_currentConnections;
|
||||
if (_currentConnections > _maxConcurrentConnections)
|
||||
_maxConcurrentConnections.store(_currentConnections);
|
||||
++_totalConnections;
|
||||
++_currentConnections;
|
||||
if (_currentConnections > _maxConcurrentConnections)
|
||||
_maxConcurrentConnections.store(_currentConnections);
|
||||
}
|
||||
|
||||
|
||||
void TCPServerDispatcher::endConnection()
|
||||
{
|
||||
--_currentConnections;
|
||||
--_currentConnections;
|
||||
}
|
||||
|
||||
|
||||
|
2
contrib/qpl
vendored
2
contrib/qpl
vendored
@ -1 +1 @@
|
||||
Subproject commit d4715e0e79896b85612158e135ee1a85f3b3e04d
|
||||
Subproject commit c2ced94c53c1ee22191201a59878e9280bc9b9b8
|
@ -4,7 +4,6 @@ set (QPL_PROJECT_DIR "${ClickHouse_SOURCE_DIR}/contrib/qpl")
|
||||
set (QPL_SRC_DIR "${ClickHouse_SOURCE_DIR}/contrib/qpl/sources")
|
||||
set (QPL_BINARY_DIR "${ClickHouse_BINARY_DIR}/build/contrib/qpl")
|
||||
set (EFFICIENT_WAIT OFF)
|
||||
set (BLOCK_ON_FAULT ON)
|
||||
set (LOG_HW_INIT OFF)
|
||||
set (SANITIZE_MEMORY OFF)
|
||||
set (SANITIZE_THREADS OFF)
|
||||
@ -16,16 +15,20 @@ function(GetLibraryVersion _content _outputVar)
|
||||
SET(${_outputVar} ${CMAKE_MATCH_1} PARENT_SCOPE)
|
||||
endfunction()
|
||||
|
||||
set (QPL_VERSION 1.2.0)
|
||||
set (QPL_VERSION 1.6.0)
|
||||
|
||||
message(STATUS "Intel QPL version: ${QPL_VERSION}")
|
||||
|
||||
# There are 5 source subdirectories under $QPL_SRC_DIR: isal, c_api, core-sw, middle-layer, c_api.
|
||||
# Generate 8 library targets: middle_layer_lib, isal, isal_asm, qplcore_px, qplcore_avx512, qplcore_sw_dispatcher, core_iaa, middle_layer_lib.
|
||||
# There are 5 source subdirectories under $QPL_SRC_DIR: c_api, core-iaa, core-sw, middle-layer and isal.
|
||||
# Generate 8 library targets: qpl_c_api, core_iaa, qplcore_px, qplcore_avx512, qplcore_sw_dispatcher, middle_layer_lib, isal and isal_asm,
|
||||
# which are then combined into static or shared qpl.
|
||||
# Output ch_contrib::qpl by linking with 8 library targets.
|
||||
|
||||
# The qpl submodule comes with its own version of isal. It contains code which does not exist in upstream isal. It would be nice to link
|
||||
# only upstream isal (ch_contrib::isal) but at this point we can't.
|
||||
# Note, QPL has integrated a customized version of ISA-L to meet specific needs.
|
||||
# This version has been significantly modified and there are no plans to maintain compatibility with the upstream version
|
||||
# or upgrade the current copy.
|
||||
|
||||
## cmake/CompileOptions.cmake and automatic wrappers generation
|
||||
|
||||
# ==========================================================================
|
||||
# Copyright (C) 2022 Intel Corporation
|
||||
@ -442,6 +445,7 @@ function(generate_unpack_kernel_arrays current_directory PLATFORMS_LIST)
|
||||
endforeach()
|
||||
endfunction()
|
||||
|
||||
# [SUBDIR]isal
|
||||
|
||||
enable_language(ASM_NASM)
|
||||
|
||||
@ -479,7 +483,6 @@ set(ISAL_ASM_SRC ${QPL_SRC_DIR}/isal/igzip/igzip_body.asm
|
||||
${QPL_SRC_DIR}/isal/igzip/igzip_set_long_icf_fg_04.asm
|
||||
${QPL_SRC_DIR}/isal/igzip/igzip_set_long_icf_fg_06.asm
|
||||
${QPL_SRC_DIR}/isal/igzip/igzip_multibinary.asm
|
||||
${QPL_SRC_DIR}/isal/igzip/stdmac.asm
|
||||
${QPL_SRC_DIR}/isal/crc/crc_multibinary.asm
|
||||
${QPL_SRC_DIR}/isal/crc/crc32_gzip_refl_by8.asm
|
||||
${QPL_SRC_DIR}/isal/crc/crc32_gzip_refl_by8_02.asm
|
||||
@ -505,7 +508,6 @@ set_property(GLOBAL APPEND PROPERTY QPL_LIB_DEPS
|
||||
# Setting external and internal interfaces for ISA-L library
|
||||
target_include_directories(isal
|
||||
PUBLIC $<BUILD_INTERFACE:${QPL_SRC_DIR}/isal/include>
|
||||
PRIVATE ${QPL_SRC_DIR}/isal/include
|
||||
PUBLIC ${QPL_SRC_DIR}/isal/igzip)
|
||||
|
||||
set_target_properties(isal PROPERTIES
|
||||
@ -617,12 +619,9 @@ target_compile_options(qplcore_sw_dispatcher
|
||||
|
||||
# [SUBDIR]core-iaa
|
||||
file(GLOB HW_PATH_SRC ${QPL_SRC_DIR}/core-iaa/sources/aecs/*.c
|
||||
${QPL_SRC_DIR}/core-iaa/sources/aecs/*.cpp
|
||||
${QPL_SRC_DIR}/core-iaa/sources/driver_loader/*.c
|
||||
${QPL_SRC_DIR}/core-iaa/sources/driver_loader/*.cpp
|
||||
${QPL_SRC_DIR}/core-iaa/sources/descriptors/*.c
|
||||
${QPL_SRC_DIR}/core-iaa/sources/descriptors/*.cpp
|
||||
${QPL_SRC_DIR}/core-iaa/sources/bit_rev.c)
|
||||
${QPL_SRC_DIR}/core-iaa/sources/*.c)
|
||||
|
||||
# Create library
|
||||
add_library(core_iaa OBJECT ${HW_PATH_SRC})
|
||||
@ -634,31 +633,27 @@ target_include_directories(core_iaa
|
||||
PRIVATE ${UUID_DIR}
|
||||
PUBLIC $<BUILD_INTERFACE:${QPL_SRC_DIR}/core-iaa/include>
|
||||
PUBLIC $<BUILD_INTERFACE:${QPL_SRC_DIR}/core-iaa/sources/include>
|
||||
PRIVATE $<BUILD_INTERFACE:${QPL_PROJECT_DIR}/include> # status.h in own_checkers.h
|
||||
PRIVATE $<BUILD_INTERFACE:${QPL_PROJECT_DIR}/sources/c_api> # own_checkers.h
|
||||
PRIVATE $<BUILD_INTERFACE:${QPL_PROJECT_DIR}/include> # status.h in own_checkers.h
|
||||
PRIVATE $<TARGET_PROPERTY:qpl_c_api,INTERFACE_INCLUDE_DIRECTORIES> # for own_checkers.h
|
||||
PRIVATE $<TARGET_PROPERTY:qplcore_sw_dispatcher,INTERFACE_INCLUDE_DIRECTORIES>)
|
||||
|
||||
target_compile_features(core_iaa PRIVATE c_std_11)
|
||||
|
||||
target_compile_definitions(core_iaa PRIVATE QPL_BADARG_CHECK
|
||||
PRIVATE $<$<BOOL:${BLOCK_ON_FAULT}>: BLOCK_ON_FAULT_ENABLED>
|
||||
PRIVATE $<$<BOOL:${LOG_HW_INIT}>:LOG_HW_INIT>
|
||||
PRIVATE $<$<BOOL:${DYNAMIC_LOADING_LIBACCEL_CONFIG}>:DYNAMIC_LOADING_LIBACCEL_CONFIG>)
|
||||
|
||||
# [SUBDIR]middle-layer
|
||||
file(GLOB MIDDLE_LAYER_SRC
|
||||
${QPL_SRC_DIR}/middle-layer/analytics/*.cpp
|
||||
${QPL_SRC_DIR}/middle-layer/c_wrapper/*.cpp
|
||||
${QPL_SRC_DIR}/middle-layer/checksum/*.cpp
|
||||
${QPL_SRC_DIR}/middle-layer/accelerator/*.cpp
|
||||
${QPL_SRC_DIR}/middle-layer/analytics/*.cpp
|
||||
${QPL_SRC_DIR}/middle-layer/common/*.cpp
|
||||
${QPL_SRC_DIR}/middle-layer/compression/*.cpp
|
||||
${QPL_SRC_DIR}/middle-layer/compression/*/*.cpp
|
||||
${QPL_SRC_DIR}/middle-layer/compression/*/*/*.cpp
|
||||
${QPL_SRC_DIR}/middle-layer/dispatcher/*.cpp
|
||||
${QPL_SRC_DIR}/middle-layer/other/*.cpp
|
||||
${QPL_SRC_DIR}/middle-layer/util/*.cpp
|
||||
${QPL_SRC_DIR}/middle-layer/inflate/*.cpp
|
||||
${QPL_SRC_DIR}/core-iaa/sources/accelerator/*.cpp) # todo
|
||||
${QPL_SRC_DIR}/middle-layer/util/*.cpp)
|
||||
|
||||
add_library(middle_layer_lib OBJECT
|
||||
${MIDDLE_LAYER_SRC})
|
||||
@ -667,6 +662,7 @@ set_property(GLOBAL APPEND PROPERTY QPL_LIB_DEPS
|
||||
$<TARGET_OBJECTS:middle_layer_lib>)
|
||||
|
||||
target_compile_options(middle_layer_lib
|
||||
PRIVATE $<$<C_COMPILER_ID:GNU,Clang>:$<$<CONFIG:Release>:-O3;-U_FORTIFY_SOURCE;-D_FORTIFY_SOURCE=2>>
|
||||
PRIVATE ${QPL_LINUX_TOOLCHAIN_CPP_EMBEDDED_FLAGS})
|
||||
|
||||
target_compile_definitions(middle_layer_lib
|
||||
@ -682,6 +678,7 @@ target_include_directories(middle_layer_lib
|
||||
PRIVATE ${UUID_DIR}
|
||||
PUBLIC $<BUILD_INTERFACE:${QPL_SRC_DIR}/middle-layer>
|
||||
PUBLIC $<TARGET_PROPERTY:_qpl,INTERFACE_INCLUDE_DIRECTORIES>
|
||||
PRIVATE $<TARGET_PROPERTY:qpl_c_api,INTERFACE_INCLUDE_DIRECTORIES>
|
||||
PUBLIC $<TARGET_PROPERTY:qplcore_sw_dispatcher,INTERFACE_INCLUDE_DIRECTORIES>
|
||||
PUBLIC $<TARGET_PROPERTY:isal,INTERFACE_INCLUDE_DIRECTORIES>
|
||||
PUBLIC $<TARGET_PROPERTY:core_iaa,INTERFACE_INCLUDE_DIRECTORIES>)
|
||||
@ -689,31 +686,54 @@ target_include_directories(middle_layer_lib
|
||||
target_compile_definitions(middle_layer_lib PUBLIC -DQPL_LIB)
|
||||
|
||||
# [SUBDIR]c_api
|
||||
file(GLOB_RECURSE QPL_C_API_SRC
|
||||
${QPL_SRC_DIR}/c_api/*.c
|
||||
${QPL_SRC_DIR}/c_api/*.cpp)
|
||||
file(GLOB QPL_C_API_SRC
|
||||
${QPL_SRC_DIR}/c_api/compression_operations/*.c
|
||||
${QPL_SRC_DIR}/c_api/compression_operations/*.cpp
|
||||
${QPL_SRC_DIR}/c_api/filter_operations/*.cpp
|
||||
${QPL_SRC_DIR}/c_api/legacy_hw_path/*.c
|
||||
${QPL_SRC_DIR}/c_api/legacy_hw_path/*.cpp
|
||||
${QPL_SRC_DIR}/c_api/other_operations/*.cpp
|
||||
${QPL_SRC_DIR}/c_api/serialization/*.cpp
|
||||
${QPL_SRC_DIR}/c_api/*.cpp)
|
||||
|
||||
add_library(qpl_c_api OBJECT ${QPL_C_API_SRC})
|
||||
|
||||
target_include_directories(qpl_c_api
|
||||
PUBLIC $<BUILD_INTERFACE:${QPL_SRC_DIR}/c_api/>
|
||||
PUBLIC $<BUILD_INTERFACE:${QPL_SRC_DIR}/include/> $<INSTALL_INTERFACE:include>
|
||||
PRIVATE $<TARGET_PROPERTY:middle_layer_lib,INTERFACE_INCLUDE_DIRECTORIES>)
|
||||
|
||||
set_target_properties(qpl_c_api PROPERTIES
|
||||
$<$<C_COMPILER_ID:GNU,Clang>:C_STANDARD 17
|
||||
CXX_STANDARD 17)
|
||||
|
||||
target_compile_options(qpl_c_api
|
||||
PRIVATE $<$<C_COMPILER_ID:GNU,Clang>:$<$<CONFIG:Release>:-O3;-U_FORTIFY_SOURCE;-D_FORTIFY_SOURCE=2>>
|
||||
PRIVATE $<$<COMPILE_LANG_AND_ID:CXX,GNU,Clang>:${QPL_LINUX_TOOLCHAIN_CPP_EMBEDDED_FLAGS}>)
|
||||
|
||||
target_compile_definitions(qpl_c_api
|
||||
PUBLIC -DQPL_BADARG_CHECK # own_checkers.h
|
||||
PUBLIC -DQPL_LIB # needed for middle_layer_lib
|
||||
PUBLIC $<$<BOOL:${LOG_HW_INIT}>:LOG_HW_INIT>) # needed for middle_layer_lib
|
||||
|
||||
set_property(GLOBAL APPEND PROPERTY QPL_LIB_DEPS
|
||||
$<TARGET_OBJECTS:qpl_c_api>)
|
||||
|
||||
# Final _qpl target
|
||||
|
||||
get_property(LIB_DEPS GLOBAL PROPERTY QPL_LIB_DEPS)
|
||||
|
||||
add_library(_qpl STATIC ${QPL_C_API_SRC} ${LIB_DEPS})
|
||||
add_library(_qpl STATIC ${LIB_DEPS})
|
||||
|
||||
target_include_directories(_qpl
|
||||
PUBLIC $<BUILD_INTERFACE:${QPL_PROJECT_DIR}/include/> $<INSTALL_INTERFACE:include>
|
||||
PRIVATE $<TARGET_PROPERTY:middle_layer_lib,INTERFACE_INCLUDE_DIRECTORIES>
|
||||
PRIVATE $<BUILD_INTERFACE:${QPL_SRC_DIR}/c_api>)
|
||||
PUBLIC $<BUILD_INTERFACE:${QPL_PROJECT_DIR}/include/> $<INSTALL_INTERFACE:include>)
|
||||
|
||||
target_compile_options(_qpl
|
||||
PRIVATE ${QPL_LINUX_TOOLCHAIN_CPP_EMBEDDED_FLAGS})
|
||||
|
||||
target_compile_definitions(_qpl
|
||||
PRIVATE -DQPL_LIB
|
||||
PRIVATE -DQPL_BADARG_CHECK
|
||||
PRIVATE $<$<BOOL:${DYNAMIC_LOADING_LIBACCEL_CONFIG}>:DYNAMIC_LOADING_LIBACCEL_CONFIG>
|
||||
PUBLIC -DENABLE_QPL_COMPRESSION)
|
||||
|
||||
target_link_libraries(_qpl
|
||||
PRIVATE ch_contrib::accel-config
|
||||
PRIVATE ch_contrib::isal)
|
||||
PRIVATE ch_contrib::accel-config)
|
||||
|
||||
target_include_directories(_qpl SYSTEM BEFORE
|
||||
PUBLIC "${QPL_PROJECT_DIR}/include"
|
||||
|
2
contrib/usearch
vendored
2
contrib/usearch
vendored
@ -1 +1 @@
|
||||
Subproject commit 955c6f9c11adfd89c912e0d1643d160b4e9e543f
|
||||
Subproject commit 30810452bec5d3d3aa0931bb5d761e2f09aa6356
|
@ -28,12 +28,14 @@ RUN echo "TSAN_OPTIONS='verbosity=1000 halt_on_error=1 abort_on_error=1 history_
|
||||
RUN echo "UBSAN_OPTIONS='print_stacktrace=1 max_allocation_size_mb=32768'" >> /etc/environment
|
||||
RUN echo "MSAN_OPTIONS='abort_on_error=1 poison_in_dtor=1 max_allocation_size_mb=32768'" >> /etc/environment
|
||||
RUN echo "LSAN_OPTIONS='suppressions=/usr/share/clickhouse-test/config/lsan_suppressions.txt max_allocation_size_mb=32768'" >> /etc/environment
|
||||
RUN echo "ASAN_OPTIONS='halt_on_error=1 abort_on_error=1'" >> /etc/environment
|
||||
# Sanitizer options for current shell (not current, but the one that will be spawned on "docker run")
|
||||
# (but w/o verbosity for TSAN, otherwise test.reference will not match)
|
||||
ENV TSAN_OPTIONS='halt_on_error=1 abort_on_error=1 history_size=7 memory_limit_mb=46080 second_deadlock_stack=1 max_allocation_size_mb=32768'
|
||||
ENV UBSAN_OPTIONS='print_stacktrace=1 max_allocation_size_mb=32768'
|
||||
ENV MSAN_OPTIONS='abort_on_error=1 poison_in_dtor=1 max_allocation_size_mb=32768'
|
||||
ENV LSAN_OPTIONS='max_allocation_size_mb=32768'
|
||||
ENV ASAN_OPTIONS='halt_on_error=1 abort_on_error=1'
|
||||
|
||||
# for external_symbolizer_path, and also ensure that llvm-symbolizer really
|
||||
# exists (since you don't want to fallback to addr2line, it is very slow)
|
||||
|
@ -193,53 +193,60 @@ function fuzz
|
||||
|
||||
kill -0 $server_pid
|
||||
|
||||
# Set follow-fork-mode to parent, because we attach to clickhouse-server, not to watchdog
|
||||
# and clickhouse-server can do fork-exec, for example, to run some bridge.
|
||||
# Do not set nostop noprint for all signals, because some it may cause gdb to hang,
|
||||
# explicitly ignore non-fatal signals that are used by server.
|
||||
# Number of SIGRTMIN can be determined only in runtime.
|
||||
RTMIN=$(kill -l SIGRTMIN)
|
||||
echo "
|
||||
set follow-fork-mode parent
|
||||
handle SIGHUP nostop noprint pass
|
||||
handle SIGINT nostop noprint pass
|
||||
handle SIGQUIT nostop noprint pass
|
||||
handle SIGPIPE nostop noprint pass
|
||||
handle SIGTERM nostop noprint pass
|
||||
handle SIGUSR1 nostop noprint pass
|
||||
handle SIGUSR2 nostop noprint pass
|
||||
handle SIG$RTMIN nostop noprint pass
|
||||
info signals
|
||||
continue
|
||||
backtrace full
|
||||
thread apply all backtrace full
|
||||
info registers
|
||||
disassemble /s
|
||||
up
|
||||
disassemble /s
|
||||
up
|
||||
disassemble /s
|
||||
p \"done\"
|
||||
detach
|
||||
quit
|
||||
" > script.gdb
|
||||
IS_ASAN=$(clickhouse-client --query "SELECT count() FROM system.build_options WHERE name = 'CXX_FLAGS' AND position('sanitize=address' IN value)")
|
||||
if [[ "$IS_ASAN" = "1" ]];
|
||||
then
|
||||
echo "ASAN build detected. Not using gdb since it disables LeakSanitizer detections"
|
||||
else
|
||||
# Set follow-fork-mode to parent, because we attach to clickhouse-server, not to watchdog
|
||||
# and clickhouse-server can do fork-exec, for example, to run some bridge.
|
||||
# Do not set nostop noprint for all signals, because some it may cause gdb to hang,
|
||||
# explicitly ignore non-fatal signals that are used by server.
|
||||
# Number of SIGRTMIN can be determined only in runtime.
|
||||
RTMIN=$(kill -l SIGRTMIN)
|
||||
echo "
|
||||
set follow-fork-mode parent
|
||||
handle SIGHUP nostop noprint pass
|
||||
handle SIGINT nostop noprint pass
|
||||
handle SIGQUIT nostop noprint pass
|
||||
handle SIGPIPE nostop noprint pass
|
||||
handle SIGTERM nostop noprint pass
|
||||
handle SIGUSR1 nostop noprint pass
|
||||
handle SIGUSR2 nostop noprint pass
|
||||
handle SIG$RTMIN nostop noprint pass
|
||||
info signals
|
||||
continue
|
||||
backtrace full
|
||||
thread apply all backtrace full
|
||||
info registers
|
||||
disassemble /s
|
||||
up
|
||||
disassemble /s
|
||||
up
|
||||
disassemble /s
|
||||
p \"done\"
|
||||
detach
|
||||
quit
|
||||
" > script.gdb
|
||||
|
||||
gdb -batch -command script.gdb -p $server_pid &
|
||||
sleep 5
|
||||
# gdb will send SIGSTOP, spend some time loading debug info, and then send SIGCONT, wait for it (up to send_timeout, 300s)
|
||||
time clickhouse-client --query "SELECT 'Connected to clickhouse-server after attaching gdb'" ||:
|
||||
gdb -batch -command script.gdb -p $server_pid &
|
||||
sleep 5
|
||||
# gdb will send SIGSTOP, spend some time loading debug info, and then send SIGCONT, wait for it (up to send_timeout, 300s)
|
||||
time clickhouse-client --query "SELECT 'Connected to clickhouse-server after attaching gdb'" ||:
|
||||
|
||||
# Check connectivity after we attach gdb, because it might cause the server
|
||||
# to freeze, and the fuzzer will fail. In debug build, it can take a lot of time.
|
||||
for _ in {1..180}
|
||||
do
|
||||
if clickhouse-client --query "select 1"
|
||||
then
|
||||
break
|
||||
fi
|
||||
sleep 1
|
||||
done
|
||||
kill -0 $server_pid # This checks that it is our server that is started and not some other one
|
||||
fi
|
||||
|
||||
# Check connectivity after we attach gdb, because it might cause the server
|
||||
# to freeze, and the fuzzer will fail. In debug build, it can take a lot of time.
|
||||
for _ in {1..180}
|
||||
do
|
||||
if clickhouse-client --query "select 1"
|
||||
then
|
||||
break
|
||||
fi
|
||||
sleep 1
|
||||
done
|
||||
kill -0 $server_pid # This checks that it is our server that is started and not some other one
|
||||
echo 'Server started and responded.'
|
||||
|
||||
setup_logs_replication
|
||||
@ -264,8 +271,13 @@ quit
|
||||
# The fuzzer_pid belongs to the timeout process.
|
||||
actual_fuzzer_pid=$(ps -o pid= --ppid "$fuzzer_pid")
|
||||
|
||||
echo "Attaching gdb to the fuzzer itself"
|
||||
gdb -batch -command script.gdb -p $actual_fuzzer_pid &
|
||||
if [[ "$IS_ASAN" = "1" ]];
|
||||
then
|
||||
echo "ASAN build detected. Not using gdb since it disables LeakSanitizer detections"
|
||||
else
|
||||
echo "Attaching gdb to the fuzzer itself"
|
||||
gdb -batch -command script.gdb -p $actual_fuzzer_pid &
|
||||
fi
|
||||
|
||||
# Wait for the fuzzer to complete.
|
||||
# Note that the 'wait || ...' thing is required so that the script doesn't
|
||||
|
@ -5,47 +5,53 @@ source /utils.lib
|
||||
|
||||
function attach_gdb_to_clickhouse()
|
||||
{
|
||||
# Set follow-fork-mode to parent, because we attach to clickhouse-server, not to watchdog
|
||||
# and clickhouse-server can do fork-exec, for example, to run some bridge.
|
||||
# Do not set nostop noprint for all signals, because some it may cause gdb to hang,
|
||||
# explicitly ignore non-fatal signals that are used by server.
|
||||
# Number of SIGRTMIN can be determined only in runtime.
|
||||
RTMIN=$(kill -l SIGRTMIN)
|
||||
# shellcheck disable=SC2016
|
||||
echo "
|
||||
set follow-fork-mode parent
|
||||
handle SIGHUP nostop noprint pass
|
||||
handle SIGINT nostop noprint pass
|
||||
handle SIGQUIT nostop noprint pass
|
||||
handle SIGPIPE nostop noprint pass
|
||||
handle SIGTERM nostop noprint pass
|
||||
handle SIGUSR1 nostop noprint pass
|
||||
handle SIGUSR2 nostop noprint pass
|
||||
handle SIG$RTMIN nostop noprint pass
|
||||
info signals
|
||||
continue
|
||||
backtrace full
|
||||
info registers
|
||||
p "top 1 KiB of the stack:"
|
||||
p/x *(uint64_t[128]*)"'$sp'"
|
||||
maintenance info sections
|
||||
thread apply all backtrace full
|
||||
disassemble /s
|
||||
up
|
||||
disassemble /s
|
||||
up
|
||||
disassemble /s
|
||||
p \"done\"
|
||||
detach
|
||||
quit
|
||||
" > script.gdb
|
||||
IS_ASAN=$(clickhouse-client --query "SELECT count() FROM system.build_options WHERE name = 'CXX_FLAGS' AND position('sanitize=address' IN value)")
|
||||
if [[ "$IS_ASAN" = "1" ]];
|
||||
then
|
||||
echo "ASAN build detected. Not using gdb since it disables LeakSanitizer detections"
|
||||
else
|
||||
# Set follow-fork-mode to parent, because we attach to clickhouse-server, not to watchdog
|
||||
# and clickhouse-server can do fork-exec, for example, to run some bridge.
|
||||
# Do not set nostop noprint for all signals, because some it may cause gdb to hang,
|
||||
# explicitly ignore non-fatal signals that are used by server.
|
||||
# Number of SIGRTMIN can be determined only in runtime.
|
||||
RTMIN=$(kill -l SIGRTMIN)
|
||||
# shellcheck disable=SC2016
|
||||
echo "
|
||||
set follow-fork-mode parent
|
||||
handle SIGHUP nostop noprint pass
|
||||
handle SIGINT nostop noprint pass
|
||||
handle SIGQUIT nostop noprint pass
|
||||
handle SIGPIPE nostop noprint pass
|
||||
handle SIGTERM nostop noprint pass
|
||||
handle SIGUSR1 nostop noprint pass
|
||||
handle SIGUSR2 nostop noprint pass
|
||||
handle SIG$RTMIN nostop noprint pass
|
||||
info signals
|
||||
continue
|
||||
backtrace full
|
||||
info registers
|
||||
p "top 1 KiB of the stack:"
|
||||
p/x *(uint64_t[128]*)"'$sp'"
|
||||
maintenance info sections
|
||||
thread apply all backtrace full
|
||||
disassemble /s
|
||||
up
|
||||
disassemble /s
|
||||
up
|
||||
disassemble /s
|
||||
p \"done\"
|
||||
detach
|
||||
quit
|
||||
" > script.gdb
|
||||
|
||||
# FIXME Hung check may work incorrectly because of attached gdb
|
||||
# We cannot attach another gdb to get stacktraces if some queries hung
|
||||
gdb -batch -command script.gdb -p "$(cat /var/run/clickhouse-server/clickhouse-server.pid)" | ts '%Y-%m-%d %H:%M:%S' >> /test_output/gdb.log &
|
||||
sleep 5
|
||||
# gdb will send SIGSTOP, spend some time loading debug info and then send SIGCONT, wait for it (up to send_timeout, 300s)
|
||||
run_with_retry 60 clickhouse-client --query "SELECT 'Connected to clickhouse-server after attaching gdb'"
|
||||
# FIXME Hung check may work incorrectly because of attached gdb
|
||||
# We cannot attach another gdb to get stacktraces if some queries hung
|
||||
gdb -batch -command script.gdb -p "$(cat /var/run/clickhouse-server/clickhouse-server.pid)" | ts '%Y-%m-%d %H:%M:%S' >> /test_output/gdb.log &
|
||||
sleep 5
|
||||
# gdb will send SIGSTOP, spend some time loading debug info and then send SIGCONT, wait for it (up to send_timeout, 300s)
|
||||
run_with_retry 60 clickhouse-client --query "SELECT 'Connected to clickhouse-server after attaching gdb'"
|
||||
fi
|
||||
}
|
||||
|
||||
# vi: ft=bash
|
||||
|
@ -174,7 +174,7 @@ do
|
||||
done
|
||||
|
||||
setup_logs_replication
|
||||
attach_gdb_to_clickhouse || true # FIXME: to not break old builds, clean on 2023-09-01
|
||||
attach_gdb_to_clickhouse
|
||||
|
||||
function fn_exists() {
|
||||
declare -F "$1" > /dev/null;
|
||||
|
@ -308,7 +308,8 @@ function collect_query_and_trace_logs()
|
||||
{
|
||||
for table in query_log trace_log metric_log
|
||||
do
|
||||
clickhouse-local --config-file=/etc/clickhouse-server/config.xml --only-system-tables -q "select * from system.$table format TSVWithNamesAndTypes" | zstd --threads=0 > /test_output/$table.tsv.zst ||:
|
||||
# Don't ignore errors here, it leads to ignore sanitizer reports when running clickhouse-local
|
||||
clickhouse-local --config-file=/etc/clickhouse-server/config.xml --only-system-tables -q "select * from system.$table format TSVWithNamesAndTypes" | zstd --threads=0 > /test_output/$table.tsv.zst
|
||||
done
|
||||
}
|
||||
|
||||
|
@ -4,4 +4,5 @@ ARG FROM_TAG=latest
|
||||
FROM clickhouse/test-base:$FROM_TAG
|
||||
|
||||
COPY run.sh /
|
||||
CMD ["/bin/bash", "/run.sh"]
|
||||
RUN chmod +x run.sh
|
||||
ENTRYPOINT ["/run.sh"]
|
||||
|
@ -1,5 +1,27 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -x
|
||||
# Need to keep error from tests after `tee`. Otherwise we don't alert on asan errors
|
||||
set -o pipefail
|
||||
set -e
|
||||
|
||||
timeout 40m gdb -q -ex 'set print inferior-events off' -ex 'set confirm off' -ex 'set print thread-events off' -ex run -ex bt -ex quit --args ./unit_tests_dbms --gtest_output='json:test_output/test_result.json' | tee test_output/test_result.txt
|
||||
if [ "$#" -ne 1 ]; then
|
||||
echo "Expected exactly one argument"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ "$1" = "GDB" ];
|
||||
then
|
||||
timeout 40m \
|
||||
gdb -q -ex "set print inferior-events off" -ex "set confirm off" -ex "set print thread-events off" -ex run -ex bt -ex quit --args \
|
||||
./unit_tests_dbms --gtest_output='json:test_output/test_result.json' \
|
||||
| tee test_output/test_result.txt
|
||||
elif [ "$1" = "NO_GDB" ];
|
||||
then
|
||||
timeout 40m \
|
||||
./unit_tests_dbms --gtest_output='json:test_output/test_result.json' \
|
||||
| tee test_output/test_result.txt
|
||||
else
|
||||
echo "Unknown argument: $1"
|
||||
exit 1
|
||||
fi
|
||||
|
@ -146,6 +146,7 @@ Code: 48. DB::Exception: Received from localhost:9000. DB::Exception: Reading fr
|
||||
- `_file` — Name of the file. Type: `LowCardinalty(String)`.
|
||||
- `_size` — Size of the file in bytes. Type: `Nullable(UInt64)`. If the size is unknown, the value is `NULL`.
|
||||
- `_time` — Last modified time of the file. Type: `Nullable(DateTime)`. If the time is unknown, the value is `NULL`.
|
||||
- `_etag` — ETag of the file. Type: `LowCardinalty(String)`. If the etag is unknown, the value is `NULL`.
|
||||
|
||||
For more information about virtual columns see [here](../../../engines/table-engines/index.md#table_engines-virtual_columns).
|
||||
|
||||
|
@ -379,7 +379,7 @@ You can mitigate this problem by enabling `wait_end_of_query=1` ([Response Buffe
|
||||
However, this does not completely solve the problem because the result must still fit within the `http_response_buffer_size`, and other settings like `send_progress_in_http_headers` can interfere with the delay of the header.
|
||||
The only way to catch all errors is to analyze the HTTP body before parsing it using the required format.
|
||||
|
||||
### Queries with Parameters {#cli-queries-with-parameters}
|
||||
## Queries with Parameters {#cli-queries-with-parameters}
|
||||
|
||||
You can create a query with parameters and pass values for them from the corresponding HTTP request parameters. For more information, see [Queries with Parameters for CLI](../interfaces/cli.md#cli-queries-with-parameters).
|
||||
|
||||
|
@ -4629,8 +4629,8 @@ Default Value: 5.
|
||||
|
||||
## memory_overcommit_ratio_denominator {#memory_overcommit_ratio_denominator}
|
||||
|
||||
It represents soft memory limit in case when hard limit is reached on user level.
|
||||
This value is used to compute overcommit ratio for the query.
|
||||
It represents the soft memory limit when the hard limit is reached on the global level.
|
||||
This value is used to compute the overcommit ratio for the query.
|
||||
Zero means skip the query.
|
||||
Read more about [memory overcommit](memory-overcommit.md).
|
||||
|
||||
@ -4646,8 +4646,8 @@ Default value: `5000000`.
|
||||
|
||||
## memory_overcommit_ratio_denominator_for_user {#memory_overcommit_ratio_denominator_for_user}
|
||||
|
||||
It represents soft memory limit in case when hard limit is reached on global level.
|
||||
This value is used to compute overcommit ratio for the query.
|
||||
It represents the soft memory limit when the hard limit is reached on the user level.
|
||||
This value is used to compute the overcommit ratio for the query.
|
||||
Zero means skip the query.
|
||||
Read more about [memory overcommit](memory-overcommit.md).
|
||||
|
||||
@ -5609,6 +5609,18 @@ Minimal size of block to compress in CROSS JOIN. Zero value means - disable this
|
||||
|
||||
Default value: `1GiB`.
|
||||
|
||||
## restore_replace_external_engines_to_null
|
||||
|
||||
For testing purposes. Replaces all external engines to Null to not initiate external connections.
|
||||
|
||||
Default value: `False`
|
||||
|
||||
## restore_replace_external_table_functions_to_null
|
||||
|
||||
For testing purposes. Replaces all external table functions to Null to not initiate external connections.
|
||||
|
||||
Default value: `False`
|
||||
|
||||
## disable_insertion_and_mutation
|
||||
|
||||
Disable all insert and mutations (alter table update / alter table delete / alter table drop partition). Set to true, can make this node focus on reading queries.
|
||||
|
@ -28,39 +28,39 @@ A client application to interact with clickhouse-keeper by its native protocol.
|
||||
Connected to ZooKeeper at [::1]:9181 with session_id 137
|
||||
/ :) ls
|
||||
keeper foo bar
|
||||
/ :) cd keeper
|
||||
/ :) cd 'keeper'
|
||||
/keeper :) ls
|
||||
api_version
|
||||
/keeper :) cd api_version
|
||||
/keeper :) cd 'api_version'
|
||||
/keeper/api_version :) ls
|
||||
|
||||
/keeper/api_version :) cd xyz
|
||||
/keeper/api_version :) cd 'xyz'
|
||||
Path /keeper/api_version/xyz does not exist
|
||||
/keeper/api_version :) cd ../../
|
||||
/ :) ls
|
||||
keeper foo bar
|
||||
/ :) get keeper/api_version
|
||||
/ :) get 'keeper/api_version'
|
||||
2
|
||||
```
|
||||
|
||||
## Commands {#clickhouse-keeper-client-commands}
|
||||
|
||||
- `ls [path]` -- Lists the nodes for the given path (default: cwd)
|
||||
- `cd [path]` -- Changes the working path (default `.`)
|
||||
- `exists <path>` -- Returns `1` if node exists, `0` otherwise
|
||||
- `set <path> <value> [version]` -- Updates the node's value. Only updates if version matches (default: -1)
|
||||
- `create <path> <value> [mode]` -- Creates new node with the set value
|
||||
- `touch <path>` -- Creates new node with an empty string as value. Doesn't throw an exception if the node already exists
|
||||
- `get <path>` -- Returns the node's value
|
||||
- `rm <path> [version]` -- Removes the node only if version matches (default: -1)
|
||||
- `rmr <path>` -- Recursively deletes path. Confirmation required
|
||||
- `ls '[path]'` -- Lists the nodes for the given path (default: cwd)
|
||||
- `cd '[path]'` -- Changes the working path (default `.`)
|
||||
- `exists '<path>'` -- Returns `1` if node exists, `0` otherwise
|
||||
- `set '<path>' <value> [version]` -- Updates the node's value. Only updates if version matches (default: -1)
|
||||
- `create '<path>' <value> [mode]` -- Creates new node with the set value
|
||||
- `touch '<path>'` -- Creates new node with an empty string as value. Doesn't throw an exception if the node already exists
|
||||
- `get '<path>'` -- Returns the node's value
|
||||
- `rm '<path>' [version]` -- Removes the node only if version matches (default: -1)
|
||||
- `rmr '<path>'` -- Recursively deletes path. Confirmation required
|
||||
- `flwc <command>` -- Executes four-letter-word command
|
||||
- `help` -- Prints this message
|
||||
- `get_direct_children_number [path]` -- Get numbers of direct children nodes under a specific path
|
||||
- `get_all_children_number [path]` -- Get all numbers of children nodes under a specific path
|
||||
- `get_stat [path]` -- Returns the node's stat (default `.`)
|
||||
- `find_super_nodes <threshold> [path]` -- Finds nodes with number of children larger than some threshold for the given path (default `.`)
|
||||
- `get_direct_children_number '[path]'` -- Get numbers of direct children nodes under a specific path
|
||||
- `get_all_children_number '[path]'` -- Get all numbers of children nodes under a specific path
|
||||
- `get_stat '[path]'` -- Returns the node's stat (default `.`)
|
||||
- `find_super_nodes <threshold> '[path]'` -- Finds nodes with number of children larger than some threshold for the given path (default `.`)
|
||||
- `delete_stale_backups` -- Deletes ClickHouse nodes used for backups that are now inactive
|
||||
- `find_big_family [path] [n]` -- Returns the top n nodes with the biggest family in the subtree (default path = `.` and n = 10)
|
||||
- `sync <path>` -- Synchronizes node between processes and leader
|
||||
- `sync '<path>'` -- Synchronizes node between processes and leader
|
||||
- `reconfig <add|remove|set> "<arg>" [version]` -- Reconfigure Keeper cluster. See https://clickhouse.com/docs/en/guides/sre/keeper/clickhouse-keeper#reconfiguration
|
||||
|
@ -10,7 +10,7 @@ Calculates a concatenated string from a group of strings, optionally separated b
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
groupConcat(expression [, delimiter] [, limit]);
|
||||
groupConcat[(delimiter [, limit])](expression);
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
@ -20,7 +20,7 @@ groupConcat(expression [, delimiter] [, limit]);
|
||||
- `limit` — A positive [integer](../../../sql-reference/data-types/int-uint.md) specifying the maximum number of elements to concatenate. If more elements are present, excess elements are ignored. This parameter is optional.
|
||||
|
||||
:::note
|
||||
If delimiter is specified without limit, it must be the first parameter following the expression. If both delimiter and limit are specified, delimiter must precede limit.
|
||||
If delimiter is specified without limit, it must be the first parameter. If both delimiter and limit are specified, delimiter must precede limit.
|
||||
:::
|
||||
|
||||
**Returned value**
|
||||
@ -61,7 +61,7 @@ This concatenates all names into one continuous string without any separator.
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
SELECT groupConcat(Name, ', ', 2) FROM Employees;
|
||||
SELECT groupConcat(', ')(Name) FROM Employees;
|
||||
```
|
||||
|
||||
Result:
|
||||
@ -78,7 +78,7 @@ This output shows the names separated by a comma followed by a space.
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
SELECT groupConcat(Name, ', ', 2) FROM Employees;
|
||||
SELECT groupConcat(', ', 2)(Name) FROM Employees;
|
||||
```
|
||||
|
||||
Result:
|
||||
|
@ -241,12 +241,12 @@ CREATE OR REPLACE TABLE test
|
||||
(
|
||||
id UInt64,
|
||||
size_bytes Int64,
|
||||
size String Alias formatReadableSize(size_bytes)
|
||||
size String ALIAS formatReadableSize(size_bytes)
|
||||
)
|
||||
ENGINE = MergeTree
|
||||
ORDER BY id;
|
||||
|
||||
INSERT INTO test Values (1, 4678899);
|
||||
INSERT INTO test VALUES (1, 4678899);
|
||||
|
||||
SELECT id, size_bytes, size FROM test;
|
||||
┌─id─┬─size_bytes─┬─size─────┐
|
||||
@ -497,7 +497,7 @@ If you perform a SELECT query mentioning a specific value in an encrypted column
|
||||
```sql
|
||||
CREATE TABLE mytable
|
||||
(
|
||||
x String Codec(AES_128_GCM_SIV)
|
||||
x String CODEC(AES_128_GCM_SIV)
|
||||
)
|
||||
ENGINE = MergeTree ORDER BY x;
|
||||
```
|
||||
|
@ -36,9 +36,10 @@ If you anticipate frequent deletes, consider using a [custom partitioning key](/
|
||||
|
||||
## Limitations of lightweight `DELETE`
|
||||
|
||||
### Lightweight `DELETE`s do not work with projections
|
||||
### Lightweight `DELETE`s with projections
|
||||
|
||||
Currently, `DELETE` does not work for tables with projections. This is because rows in a projection may be affected by a `DELETE` operation and may require the projection to be rebuilt, negatively affecting `DELETE` performance.
|
||||
By default, `DELETE` does not work for tables with projections. This is because rows in a projection may be affected by a `DELETE` operation and may require the projection to be rebuilt, negatively affecting `DELETE` performance.
|
||||
However, there is an option to change this behavior. By changing setting `lightweight_mutation_projection_mode = 'drop'`, deletes will work with projections.
|
||||
|
||||
## Performance considerations when using lightweight `DELETE`
|
||||
|
||||
|
@ -421,7 +421,7 @@ try
|
||||
std::lock_guard lock(servers_lock);
|
||||
metrics.reserve(servers->size());
|
||||
for (const auto & server : *servers)
|
||||
metrics.emplace_back(ProtocolServerMetrics{server.getPortName(), server.currentThreads()});
|
||||
metrics.emplace_back(ProtocolServerMetrics{server.getPortName(), server.currentThreads(), server.refusedConnections()});
|
||||
return metrics;
|
||||
}
|
||||
);
|
||||
|
@ -14,6 +14,7 @@
|
||||
#include <Databases/registerDatabases.h>
|
||||
#include <Databases/DatabaseFilesystem.h>
|
||||
#include <Databases/DatabaseMemory.h>
|
||||
#include <Databases/DatabaseAtomic.h>
|
||||
#include <Databases/DatabasesOverlay.h>
|
||||
#include <Storages/System/attachSystemTables.h>
|
||||
#include <Storages/System/attachInformationSchemaTables.h>
|
||||
@ -50,7 +51,6 @@
|
||||
#include <Dictionaries/registerDictionaries.h>
|
||||
#include <Disks/registerDisks.h>
|
||||
#include <Formats/registerFormats.h>
|
||||
#include <boost/algorithm/string/replace.hpp>
|
||||
#include <boost/program_options/options_description.hpp>
|
||||
#include <base/argsToConfig.h>
|
||||
#include <filesystem>
|
||||
@ -216,12 +216,12 @@ static DatabasePtr createMemoryDatabaseIfNotExists(ContextPtr context, const Str
|
||||
return system_database;
|
||||
}
|
||||
|
||||
static DatabasePtr createClickHouseLocalDatabaseOverlay(const String & name_, ContextPtr context_)
|
||||
static DatabasePtr createClickHouseLocalDatabaseOverlay(const String & name_, ContextPtr context)
|
||||
{
|
||||
auto databaseCombiner = std::make_shared<DatabasesOverlay>(name_, context_);
|
||||
databaseCombiner->registerNextDatabase(std::make_shared<DatabaseFilesystem>(name_, "", context_));
|
||||
databaseCombiner->registerNextDatabase(std::make_shared<DatabaseMemory>(name_, context_));
|
||||
return databaseCombiner;
|
||||
auto overlay = std::make_shared<DatabasesOverlay>(name_, context);
|
||||
overlay->registerNextDatabase(std::make_shared<DatabaseAtomic>(name_, fs::weakly_canonical(context->getPath()), UUIDHelpers::generateV4(), context));
|
||||
overlay->registerNextDatabase(std::make_shared<DatabaseFilesystem>(name_, "", context));
|
||||
return overlay;
|
||||
}
|
||||
|
||||
/// If path is specified and not empty, will try to setup server environment and load existing metadata
|
||||
@ -367,7 +367,7 @@ std::string LocalServer::getInitialCreateTableQuery()
|
||||
else
|
||||
table_structure = "(" + table_structure + ")";
|
||||
|
||||
return fmt::format("CREATE TABLE {} {} ENGINE = File({}, {});",
|
||||
return fmt::format("CREATE TEMPORARY TABLE {} {} ENGINE = File({}, {});",
|
||||
table_name, table_structure, data_format, table_file);
|
||||
}
|
||||
|
||||
@ -761,7 +761,12 @@ void LocalServer::processConfig()
|
||||
DatabaseCatalog::instance().initializeAndLoadTemporaryDatabase();
|
||||
|
||||
std::string default_database = server_settings.default_database;
|
||||
DatabaseCatalog::instance().attachDatabase(default_database, createClickHouseLocalDatabaseOverlay(default_database, global_context));
|
||||
{
|
||||
DatabasePtr database = createClickHouseLocalDatabaseOverlay(default_database, global_context);
|
||||
if (UUID uuid = database->getUUID(); uuid != UUIDHelpers::Nil)
|
||||
DatabaseCatalog::instance().addUUIDMapping(uuid);
|
||||
DatabaseCatalog::instance().attachDatabase(default_database, database);
|
||||
}
|
||||
global_context->setCurrentDatabase(default_database);
|
||||
|
||||
if (getClientConfiguration().has("path"))
|
||||
|
@ -918,10 +918,10 @@ try
|
||||
metrics.reserve(servers_to_start_before_tables.size() + servers.size());
|
||||
|
||||
for (const auto & server : servers_to_start_before_tables)
|
||||
metrics.emplace_back(ProtocolServerMetrics{server.getPortName(), server.currentThreads()});
|
||||
metrics.emplace_back(ProtocolServerMetrics{server.getPortName(), server.currentThreads(), server.refusedConnections()});
|
||||
|
||||
for (const auto & server : servers)
|
||||
metrics.emplace_back(ProtocolServerMetrics{server.getPortName(), server.currentThreads()});
|
||||
metrics.emplace_back(ProtocolServerMetrics{server.getPortName(), server.currentThreads(), server.refusedConnections()});
|
||||
return metrics;
|
||||
}
|
||||
);
|
||||
|
@ -5,6 +5,7 @@
|
||||
#include <Common/assert_cast.h>
|
||||
#include <Common/FieldVisitorToString.h>
|
||||
#include <Common/SipHash.h>
|
||||
#include <DataTypes/DataTypeDateTime64.h>
|
||||
|
||||
#include <IO/WriteBuffer.h>
|
||||
#include <IO/WriteHelpers.h>
|
||||
@ -162,6 +163,7 @@ QueryTreeNodePtr ConstantNode::cloneImpl() const
|
||||
ASTPtr ConstantNode::toASTImpl(const ConvertToASTOptions & options) const
|
||||
{
|
||||
const auto & constant_value_literal = constant_value->getValue();
|
||||
const auto & constant_value_type = constant_value->getType();
|
||||
auto constant_value_ast = std::make_shared<ASTLiteral>(constant_value_literal);
|
||||
|
||||
if (!options.add_cast_for_constants)
|
||||
@ -169,7 +171,25 @@ ASTPtr ConstantNode::toASTImpl(const ConvertToASTOptions & options) const
|
||||
|
||||
if (requiresCastCall())
|
||||
{
|
||||
auto constant_type_name_ast = std::make_shared<ASTLiteral>(constant_value->getType()->getName());
|
||||
/** Value for DateTime64 is Decimal64, which is serialized as a string literal.
|
||||
* If we serialize it as is, DateTime64 would be parsed from that string literal, which can be incorrect.
|
||||
* For example, DateTime64 cannot be parsed from the short value, like '1', while it's a valid Decimal64 value.
|
||||
* It could also lead to ambiguous parsing because we don't know if the string literal represents a date or a Decimal64 literal.
|
||||
* For this reason, we use a string literal representing a date instead of a Decimal64 literal.
|
||||
*/
|
||||
if (WhichDataType(constant_value_type->getTypeId()).isDateTime64())
|
||||
{
|
||||
const auto * date_time_type = typeid_cast<const DataTypeDateTime64 *>(constant_value_type.get());
|
||||
DecimalField<Decimal64> decimal_value;
|
||||
if (constant_value_literal.tryGet<DecimalField<Decimal64>>(decimal_value))
|
||||
{
|
||||
WriteBufferFromOwnString ostr;
|
||||
writeDateTimeText(decimal_value.getValue(), date_time_type->getScale(), ostr, date_time_type->getTimeZone());
|
||||
constant_value_ast = std::make_shared<ASTLiteral>(ostr.str());
|
||||
}
|
||||
}
|
||||
|
||||
auto constant_type_name_ast = std::make_shared<ASTLiteral>(constant_value_type->getName());
|
||||
return makeASTFunction("_CAST", std::move(constant_value_ast), std::move(constant_type_name_ast));
|
||||
}
|
||||
|
||||
|
@ -46,7 +46,7 @@ public:
|
||||
return;
|
||||
|
||||
const auto & storage_snapshot = table_node ? table_node->getStorageSnapshot() : table_function_node->getStorageSnapshot();
|
||||
if (!storage->isVirtualColumn(column.name, storage_snapshot->getMetadataForQuery()))
|
||||
if (!storage->isVirtualColumn(column.name, storage_snapshot->metadata))
|
||||
return;
|
||||
|
||||
auto function_node = std::make_shared<FunctionNode>("shardNum");
|
||||
|
@ -1613,7 +1613,7 @@ void AsynchronousMetrics::update(TimePoint update_time, bool force_update)
|
||||
#endif
|
||||
|
||||
{
|
||||
auto get_metric_name_doc = [](const String & name) -> std::pair<const char *, const char *>
|
||||
auto threads_get_metric_name_doc = [](const String & name) -> std::pair<const char *, const char *>
|
||||
{
|
||||
static std::map<String, std::pair<const char *, const char *>> metric_map =
|
||||
{
|
||||
@ -1637,11 +1637,38 @@ void AsynchronousMetrics::update(TimePoint update_time, bool force_update)
|
||||
return it->second;
|
||||
};
|
||||
|
||||
auto rejected_connections_get_metric_name_doc = [](const String & name) -> std::pair<const char *, const char *>
|
||||
{
|
||||
static std::map<String, std::pair<const char *, const char *>> metric_map =
|
||||
{
|
||||
{"tcp_port", {"TCPRejectedConnections", "Number of rejected connections for the TCP protocol (without TLS)."}},
|
||||
{"tcp_port_secure", {"TCPSecureRejectedConnections", "Number of rejected connections for the TCP protocol (with TLS)."}},
|
||||
{"http_port", {"HTTPRejectedConnections", "Number of rejected connections for the HTTP interface (without TLS)."}},
|
||||
{"https_port", {"HTTPSecureRejectedConnections", "Number of rejected connections for the HTTPS interface."}},
|
||||
{"interserver_http_port", {"InterserverRejectedConnections", "Number of rejected connections for the replicas communication protocol (without TLS)."}},
|
||||
{"interserver_https_port", {"InterserverSecureRejectedConnections", "Number of rejected connections for the replicas communication protocol (with TLS)."}},
|
||||
{"mysql_port", {"MySQLRejectedConnections", "Number of rejected connections for the MySQL compatibility protocol."}},
|
||||
{"postgresql_port", {"PostgreSQLRejectedConnections", "Number of rejected connections for the PostgreSQL compatibility protocol."}},
|
||||
{"grpc_port", {"GRPCRejectedConnections", "Number of rejected connections for the GRPC protocol."}},
|
||||
{"prometheus.port", {"PrometheusRejectedConnections", "Number of rejected connections for the Prometheus endpoint. Note: prometheus endpoints can be also used via the usual HTTP/HTTPs ports."}},
|
||||
{"keeper_server.tcp_port", {"KeeperTCPRejectedConnections", "Number of rejected connections for the Keeper TCP protocol (without TLS)."}},
|
||||
{"keeper_server.tcp_port_secure", {"KeeperTCPSecureRejectedConnections", "Number of rejected connections for the Keeper TCP protocol (with TLS)."}}
|
||||
};
|
||||
auto it = metric_map.find(name);
|
||||
if (it == metric_map.end())
|
||||
return { nullptr, nullptr };
|
||||
else
|
||||
return it->second;
|
||||
};
|
||||
|
||||
const auto server_metrics = protocol_server_metrics_func();
|
||||
for (const auto & server_metric : server_metrics)
|
||||
{
|
||||
if (auto name_doc = get_metric_name_doc(server_metric.port_name); name_doc.first != nullptr)
|
||||
if (auto name_doc = threads_get_metric_name_doc(server_metric.port_name); name_doc.first != nullptr)
|
||||
new_values[name_doc.first] = { server_metric.current_threads, name_doc.second };
|
||||
|
||||
if (auto name_doc = rejected_connections_get_metric_name_doc(server_metric.port_name); name_doc.first != nullptr)
|
||||
new_values[name_doc.first] = { server_metric.rejected_connections, name_doc.second };
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -42,6 +42,7 @@ struct ProtocolServerMetrics
|
||||
{
|
||||
String port_name;
|
||||
size_t current_threads;
|
||||
size_t rejected_connections;
|
||||
};
|
||||
|
||||
/** Periodically (by default, each second)
|
||||
|
@ -243,7 +243,7 @@ public:
|
||||
}
|
||||
|
||||
/// Clear and finish queue
|
||||
void clearAndFinish()
|
||||
void clearAndFinish() noexcept
|
||||
{
|
||||
{
|
||||
std::lock_guard lock(queue_mutex);
|
||||
|
@ -2,6 +2,7 @@
|
||||
|
||||
#include <Poco/ErrorHandler.h>
|
||||
#include <Common/Exception.h>
|
||||
#include <Common/logger_useful.h>
|
||||
|
||||
|
||||
/** ErrorHandler for Poco::Thread,
|
||||
@ -26,8 +27,32 @@ public:
|
||||
void exception(const std::exception &) override { logException(); }
|
||||
void exception() override { logException(); }
|
||||
|
||||
void logMessageImpl(Poco::Message::Priority priority, const std::string & msg) override
|
||||
{
|
||||
switch (priority)
|
||||
{
|
||||
case Poco::Message::PRIO_FATAL: [[fallthrough]];
|
||||
case Poco::Message::PRIO_CRITICAL:
|
||||
LOG_FATAL(trace_log, fmt::runtime(msg)); break;
|
||||
case Poco::Message::PRIO_ERROR:
|
||||
LOG_ERROR(trace_log, fmt::runtime(msg)); break;
|
||||
case Poco::Message::PRIO_WARNING:
|
||||
LOG_WARNING(trace_log, fmt::runtime(msg)); break;
|
||||
case Poco::Message::PRIO_NOTICE: [[fallthrough]];
|
||||
case Poco::Message::PRIO_INFORMATION:
|
||||
LOG_INFO(trace_log, fmt::runtime(msg)); break;
|
||||
case Poco::Message::PRIO_DEBUG:
|
||||
LOG_DEBUG(trace_log, fmt::runtime(msg)); break;
|
||||
case Poco::Message::PRIO_TRACE:
|
||||
LOG_TRACE(trace_log, fmt::runtime(msg)); break;
|
||||
case Poco::Message::PRIO_TEST:
|
||||
LOG_TEST(trace_log, fmt::runtime(msg)); break;
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
LoggerPtr log = getLogger("ServerErrorHandler");
|
||||
LoggerPtr trace_log = getLogger("Poco");
|
||||
|
||||
void logException()
|
||||
{
|
||||
|
@ -8,7 +8,9 @@ using namespace DB;
|
||||
|
||||
using ResourceTest = ResourceTestClass;
|
||||
|
||||
TEST(SchedulerFairPolicy, Factory)
|
||||
/// Tests disabled because of leaks in the test themselves: https://github.com/ClickHouse/ClickHouse/issues/67678
|
||||
|
||||
TEST(DISABLED_SchedulerFairPolicy, Factory)
|
||||
{
|
||||
ResourceTest t;
|
||||
|
||||
@ -17,7 +19,7 @@ TEST(SchedulerFairPolicy, Factory)
|
||||
EXPECT_TRUE(dynamic_cast<FairPolicy *>(fair.get()) != nullptr);
|
||||
}
|
||||
|
||||
TEST(SchedulerFairPolicy, FairnessWeights)
|
||||
TEST(DISABLED_SchedulerFairPolicy, FairnessWeights)
|
||||
{
|
||||
ResourceTest t;
|
||||
|
||||
@ -41,7 +43,7 @@ TEST(SchedulerFairPolicy, FairnessWeights)
|
||||
t.consumed("B", 20);
|
||||
}
|
||||
|
||||
TEST(SchedulerFairPolicy, Activation)
|
||||
TEST(DISABLED_SchedulerFairPolicy, Activation)
|
||||
{
|
||||
ResourceTest t;
|
||||
|
||||
@ -77,7 +79,7 @@ TEST(SchedulerFairPolicy, Activation)
|
||||
t.consumed("B", 10);
|
||||
}
|
||||
|
||||
TEST(SchedulerFairPolicy, FairnessMaxMin)
|
||||
TEST(DISABLED_SchedulerFairPolicy, FairnessMaxMin)
|
||||
{
|
||||
ResourceTest t;
|
||||
|
||||
@ -101,7 +103,7 @@ TEST(SchedulerFairPolicy, FairnessMaxMin)
|
||||
t.consumed("A", 20);
|
||||
}
|
||||
|
||||
TEST(SchedulerFairPolicy, HierarchicalFairness)
|
||||
TEST(DISABLED_SchedulerFairPolicy, HierarchicalFairness)
|
||||
{
|
||||
ResourceTest t;
|
||||
|
||||
|
@ -8,7 +8,9 @@ using namespace DB;
|
||||
|
||||
using ResourceTest = ResourceTestClass;
|
||||
|
||||
TEST(SchedulerPriorityPolicy, Factory)
|
||||
/// Tests disabled because of leaks in the test themselves: https://github.com/ClickHouse/ClickHouse/issues/67678
|
||||
|
||||
TEST(DISABLED_SchedulerPriorityPolicy, Factory)
|
||||
{
|
||||
ResourceTest t;
|
||||
|
||||
@ -17,7 +19,7 @@ TEST(SchedulerPriorityPolicy, Factory)
|
||||
EXPECT_TRUE(dynamic_cast<PriorityPolicy *>(prio.get()) != nullptr);
|
||||
}
|
||||
|
||||
TEST(SchedulerPriorityPolicy, Priorities)
|
||||
TEST(DISABLED_SchedulerPriorityPolicy, Priorities)
|
||||
{
|
||||
ResourceTest t;
|
||||
|
||||
@ -51,7 +53,7 @@ TEST(SchedulerPriorityPolicy, Priorities)
|
||||
t.consumed("C", 0);
|
||||
}
|
||||
|
||||
TEST(SchedulerPriorityPolicy, Activation)
|
||||
TEST(DISABLED_SchedulerPriorityPolicy, Activation)
|
||||
{
|
||||
ResourceTest t;
|
||||
|
||||
@ -92,7 +94,7 @@ TEST(SchedulerPriorityPolicy, Activation)
|
||||
t.consumed("C", 0);
|
||||
}
|
||||
|
||||
TEST(SchedulerPriorityPolicy, SinglePriority)
|
||||
TEST(DISABLED_SchedulerPriorityPolicy, SinglePriority)
|
||||
{
|
||||
ResourceTest t;
|
||||
|
||||
|
@ -10,7 +10,9 @@ using namespace DB;
|
||||
|
||||
using ResourceTest = ResourceTestClass;
|
||||
|
||||
TEST(SchedulerThrottlerConstraint, LeakyBucketConstraint)
|
||||
/// Tests disabled because of leaks in the test themselves: https://github.com/ClickHouse/ClickHouse/issues/67678
|
||||
|
||||
TEST(DISABLED_SchedulerThrottlerConstraint, LeakyBucketConstraint)
|
||||
{
|
||||
ResourceTest t;
|
||||
EventQueue::TimePoint start = std::chrono::system_clock::now();
|
||||
@ -40,7 +42,7 @@ TEST(SchedulerThrottlerConstraint, LeakyBucketConstraint)
|
||||
t.consumed("A", 10);
|
||||
}
|
||||
|
||||
TEST(SchedulerThrottlerConstraint, Unlimited)
|
||||
TEST(DISABLED_SchedulerThrottlerConstraint, Unlimited)
|
||||
{
|
||||
ResourceTest t;
|
||||
EventQueue::TimePoint start = std::chrono::system_clock::now();
|
||||
@ -57,7 +59,7 @@ TEST(SchedulerThrottlerConstraint, Unlimited)
|
||||
}
|
||||
}
|
||||
|
||||
TEST(SchedulerThrottlerConstraint, Pacing)
|
||||
TEST(DISABLED_SchedulerThrottlerConstraint, Pacing)
|
||||
{
|
||||
ResourceTest t;
|
||||
EventQueue::TimePoint start = std::chrono::system_clock::now();
|
||||
@ -77,7 +79,7 @@ TEST(SchedulerThrottlerConstraint, Pacing)
|
||||
}
|
||||
}
|
||||
|
||||
TEST(SchedulerThrottlerConstraint, BucketFilling)
|
||||
TEST(DISABLED_SchedulerThrottlerConstraint, BucketFilling)
|
||||
{
|
||||
ResourceTest t;
|
||||
EventQueue::TimePoint start = std::chrono::system_clock::now();
|
||||
@ -111,7 +113,7 @@ TEST(SchedulerThrottlerConstraint, BucketFilling)
|
||||
t.consumed("A", 3);
|
||||
}
|
||||
|
||||
TEST(SchedulerThrottlerConstraint, PeekAndAvgLimits)
|
||||
TEST(DISABLED_SchedulerThrottlerConstraint, PeekAndAvgLimits)
|
||||
{
|
||||
ResourceTest t;
|
||||
EventQueue::TimePoint start = std::chrono::system_clock::now();
|
||||
@ -139,7 +141,7 @@ TEST(SchedulerThrottlerConstraint, PeekAndAvgLimits)
|
||||
}
|
||||
}
|
||||
|
||||
TEST(SchedulerThrottlerConstraint, ThrottlerAndFairness)
|
||||
TEST(DISABLED_SchedulerThrottlerConstraint, ThrottlerAndFairness)
|
||||
{
|
||||
ResourceTest t;
|
||||
EventQueue::TimePoint start = std::chrono::system_clock::now();
|
||||
|
@ -14,20 +14,21 @@
|
||||
/// because of broken getauxval() [1].
|
||||
///
|
||||
/// [1]: https://github.com/ClickHouse/ClickHouse/pull/33957
|
||||
TEST(Common, LSan)
|
||||
TEST(SanitizerDeathTest, LSan)
|
||||
{
|
||||
int sanitizers_exit_code = 1;
|
||||
|
||||
ASSERT_EXIT({
|
||||
std::thread leak_in_thread([]()
|
||||
EXPECT_DEATH(
|
||||
{
|
||||
void * leak = malloc(4096);
|
||||
ASSERT_NE(leak, nullptr);
|
||||
});
|
||||
leak_in_thread.join();
|
||||
std::thread leak_in_thread(
|
||||
[]()
|
||||
{
|
||||
void * leak = malloc(4096);
|
||||
ASSERT_NE(leak, nullptr);
|
||||
});
|
||||
leak_in_thread.join();
|
||||
|
||||
__lsan_do_leak_check();
|
||||
}, ::testing::ExitedWithCode(sanitizers_exit_code), ".*LeakSanitizer: detected memory leaks.*");
|
||||
__lsan_do_leak_check();
|
||||
},
|
||||
".*LeakSanitizer: detected memory leaks.*");
|
||||
}
|
||||
|
||||
#endif
|
||||
|
@ -5,19 +5,19 @@
|
||||
# If you want really small size of the resulted binary, just link with fuzz_compression and clickhouse_common_io
|
||||
|
||||
clickhouse_add_executable (compressed_buffer_fuzzer compressed_buffer_fuzzer.cpp)
|
||||
target_link_libraries (compressed_buffer_fuzzer PRIVATE dbms)
|
||||
target_link_libraries (compressed_buffer_fuzzer PRIVATE dbms clickhouse_functions)
|
||||
|
||||
clickhouse_add_executable (lz4_decompress_fuzzer lz4_decompress_fuzzer.cpp)
|
||||
target_link_libraries (lz4_decompress_fuzzer PUBLIC dbms ch_contrib::lz4)
|
||||
target_link_libraries (lz4_decompress_fuzzer PUBLIC dbms ch_contrib::lz4 clickhouse_functions)
|
||||
|
||||
clickhouse_add_executable (delta_decompress_fuzzer delta_decompress_fuzzer.cpp)
|
||||
target_link_libraries (delta_decompress_fuzzer PRIVATE dbms)
|
||||
target_link_libraries (delta_decompress_fuzzer PRIVATE dbms clickhouse_functions)
|
||||
|
||||
clickhouse_add_executable (double_delta_decompress_fuzzer double_delta_decompress_fuzzer.cpp)
|
||||
target_link_libraries (double_delta_decompress_fuzzer PRIVATE dbms)
|
||||
target_link_libraries (double_delta_decompress_fuzzer PRIVATE dbms clickhouse_functions)
|
||||
|
||||
clickhouse_add_executable (encrypted_decompress_fuzzer encrypted_decompress_fuzzer.cpp)
|
||||
target_link_libraries (encrypted_decompress_fuzzer PRIVATE dbms)
|
||||
target_link_libraries (encrypted_decompress_fuzzer PRIVATE dbms clickhouse_functions)
|
||||
|
||||
clickhouse_add_executable (gcd_decompress_fuzzer gcd_decompress_fuzzer.cpp)
|
||||
target_link_libraries (gcd_decompress_fuzzer PRIVATE dbms)
|
||||
target_link_libraries (gcd_decompress_fuzzer PRIVATE dbms clickhouse_functions)
|
||||
|
@ -893,6 +893,8 @@ class IColumn;
|
||||
M(Bool, optimize_distinct_in_order, true, "Enable DISTINCT optimization if some columns in DISTINCT form a prefix of sorting. For example, prefix of sorting key in merge tree or ORDER BY statement", 0) \
|
||||
M(Bool, keeper_map_strict_mode, false, "Enforce additional checks during operations on KeeperMap. E.g. throw an exception on an insert for already existing key", 0) \
|
||||
M(UInt64, extract_key_value_pairs_max_pairs_per_row, 1000, "Max number of pairs that can be produced by the `extractKeyValuePairs` function. Used as a safeguard against consuming too much memory.", 0) ALIAS(extract_kvp_max_pairs_per_row) \
|
||||
M(Bool, restore_replace_external_engines_to_null, false, "Replace all the external table engines to Null on restore. Useful for testing purposes", 0) \
|
||||
M(Bool, restore_replace_external_table_functions_to_null, false, "Replace all table functions to Null on restore. Useful for testing purposes", 0) \
|
||||
\
|
||||
\
|
||||
/* ###################################### */ \
|
||||
|
@ -75,6 +75,8 @@ static std::initializer_list<std::pair<ClickHouseVersion, SettingsChangesHistory
|
||||
},
|
||||
{"24.8",
|
||||
{
|
||||
{"restore_replace_external_table_functions_to_null", false, false, "New setting."},
|
||||
{"restore_replace_external_engines_to_null", false, false, "New setting."},
|
||||
{"input_format_json_max_depth", 1000000, 1000, "It was unlimited in previous versions, but that was unsafe."},
|
||||
{"merge_tree_min_bytes_per_task_for_remote_reading", 4194304, 2097152, "Value is unified with `filesystem_prefetch_min_bytes_for_single_read_task`"},
|
||||
{"allow_archive_path_syntax", true, true, "Added new setting to allow disabling archive path syntax."},
|
||||
|
@ -177,6 +177,11 @@ IMPLEMENT_SETTING_ENUM(LightweightMutationProjectionMode, ErrorCodes::BAD_ARGUME
|
||||
{{"throw", LightweightMutationProjectionMode::THROW},
|
||||
{"drop", LightweightMutationProjectionMode::DROP}})
|
||||
|
||||
IMPLEMENT_SETTING_ENUM(DeduplicateMergeProjectionMode, ErrorCodes::BAD_ARGUMENTS,
|
||||
{{"throw", DeduplicateMergeProjectionMode::THROW},
|
||||
{"drop", DeduplicateMergeProjectionMode::DROP},
|
||||
{"rebuild", DeduplicateMergeProjectionMode::REBUILD}})
|
||||
|
||||
IMPLEMENT_SETTING_AUTO_ENUM(LocalFSReadMethod, ErrorCodes::BAD_ARGUMENTS)
|
||||
|
||||
IMPLEMENT_SETTING_ENUM(ParquetVersion, ErrorCodes::BAD_ARGUMENTS,
|
||||
|
@ -315,6 +315,15 @@ enum class LightweightMutationProjectionMode : uint8_t
|
||||
|
||||
DECLARE_SETTING_ENUM(LightweightMutationProjectionMode)
|
||||
|
||||
enum class DeduplicateMergeProjectionMode : uint8_t
|
||||
{
|
||||
THROW,
|
||||
DROP,
|
||||
REBUILD,
|
||||
};
|
||||
|
||||
DECLARE_SETTING_ENUM(DeduplicateMergeProjectionMode)
|
||||
|
||||
DECLARE_SETTING_ENUM(LocalFSReadMethod)
|
||||
|
||||
enum class ObjectStorageQueueMode : uint8_t
|
||||
|
@ -33,6 +33,16 @@ namespace ErrorCodes
|
||||
extern const int LOGICAL_ERROR;
|
||||
}
|
||||
|
||||
|
||||
DataTypeAggregateFunction::DataTypeAggregateFunction(AggregateFunctionPtr function_, const DataTypes & argument_types_,
|
||||
const Array & parameters_, std::optional<size_t> version_)
|
||||
: function(std::move(function_))
|
||||
, argument_types(argument_types_)
|
||||
, parameters(parameters_)
|
||||
, version(version_)
|
||||
{
|
||||
}
|
||||
|
||||
String DataTypeAggregateFunction::getFunctionName() const
|
||||
{
|
||||
return function->getName();
|
||||
|
@ -30,13 +30,7 @@ public:
|
||||
static constexpr bool is_parametric = true;
|
||||
|
||||
DataTypeAggregateFunction(AggregateFunctionPtr function_, const DataTypes & argument_types_,
|
||||
const Array & parameters_, std::optional<size_t> version_ = std::nullopt)
|
||||
: function(std::move(function_))
|
||||
, argument_types(argument_types_)
|
||||
, parameters(parameters_)
|
||||
, version(version_)
|
||||
{
|
||||
}
|
||||
const Array & parameters_, std::optional<size_t> version_ = std::nullopt);
|
||||
|
||||
size_t getVersion() const;
|
||||
|
||||
|
@ -90,7 +90,9 @@ void IDataType::forEachSubcolumn(
|
||||
{
|
||||
auto name = ISerialization::getSubcolumnNameForStream(subpath, prefix_len);
|
||||
auto subdata = ISerialization::createFromPath(subpath, prefix_len);
|
||||
callback(subpath, name, subdata);
|
||||
auto path_copy = subpath;
|
||||
path_copy.resize(prefix_len);
|
||||
callback(path_copy, name, subdata);
|
||||
}
|
||||
subpath[i].visited = true;
|
||||
}
|
||||
|
@ -8,6 +8,7 @@
|
||||
#include <DataTypes/DataTypeNothing.h>
|
||||
#include <DataTypes/DataTypeArray.h>
|
||||
#include <DataTypes/DataTypeMap.h>
|
||||
#include <DataTypes/DataTypeTuple.h>
|
||||
#include <DataTypes/DataTypeNullable.h>
|
||||
#include <DataTypes/DataTypesNumber.h>
|
||||
#include <DataTypes/DataTypeNested.h>
|
||||
@ -66,6 +67,36 @@ DataTypePtr getBaseTypeOfArray(const DataTypePtr & type)
|
||||
return last_array ? last_array->getNestedType() : type;
|
||||
}
|
||||
|
||||
DataTypePtr getBaseTypeOfArray(DataTypePtr type, const Names & tuple_elements)
|
||||
{
|
||||
auto it = tuple_elements.begin();
|
||||
while (true)
|
||||
{
|
||||
if (const auto * type_array = typeid_cast<const DataTypeArray *>(type.get()))
|
||||
{
|
||||
type = type_array->getNestedType();
|
||||
}
|
||||
else if (const auto * type_tuple = typeid_cast<const DataTypeTuple *>(type.get()))
|
||||
{
|
||||
if (it == tuple_elements.end())
|
||||
break;
|
||||
|
||||
auto pos = type_tuple->tryGetPositionByName(*it);
|
||||
if (!pos)
|
||||
break;
|
||||
|
||||
++it;
|
||||
type = type_tuple->getElement(*pos);
|
||||
}
|
||||
else
|
||||
{
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return type;
|
||||
}
|
||||
|
||||
ColumnPtr getBaseColumnOfArray(const ColumnPtr & column)
|
||||
{
|
||||
/// Get raw pointers to avoid extra copying of column pointers.
|
||||
|
@ -27,6 +27,9 @@ size_t getNumberOfDimensions(const IColumn & column);
|
||||
/// Returns type of scalars of Array of arbitrary dimensions.
|
||||
DataTypePtr getBaseTypeOfArray(const DataTypePtr & type);
|
||||
|
||||
/// The same as above but takes into account Tuples of Nested.
|
||||
DataTypePtr getBaseTypeOfArray(DataTypePtr type, const Names & tuple_elements);
|
||||
|
||||
/// Returns Array type with requested scalar type and number of dimensions.
|
||||
DataTypePtr createArrayOfType(DataTypePtr type, size_t num_dimensions);
|
||||
|
||||
|
@ -195,7 +195,7 @@ public:
|
||||
/// Types of substreams that can have arbitrary name.
|
||||
static const std::set<Type> named_types;
|
||||
|
||||
Type type;
|
||||
Type type = Type::Regular;
|
||||
|
||||
/// The name of a variant element type.
|
||||
String variant_element_name;
|
||||
@ -212,6 +212,7 @@ public:
|
||||
/// Flag, that may help to traverse substream paths.
|
||||
mutable bool visited = false;
|
||||
|
||||
Substream() = default;
|
||||
Substream(Type type_) : type(type_) {} /// NOLINT
|
||||
String toString() const;
|
||||
};
|
||||
|
@ -53,9 +53,6 @@ DatabaseAtomic::DatabaseAtomic(String name_, String metadata_path_, UUID uuid, c
|
||||
, db_uuid(uuid)
|
||||
{
|
||||
assert(db_uuid != UUIDHelpers::Nil);
|
||||
fs::create_directories(fs::path(getContext()->getPath()) / "metadata");
|
||||
fs::create_directories(path_to_table_symlinks);
|
||||
tryCreateMetadataSymlink();
|
||||
}
|
||||
|
||||
DatabaseAtomic::DatabaseAtomic(String name_, String metadata_path_, UUID uuid, ContextPtr context_)
|
||||
@ -63,6 +60,16 @@ DatabaseAtomic::DatabaseAtomic(String name_, String metadata_path_, UUID uuid, C
|
||||
{
|
||||
}
|
||||
|
||||
void DatabaseAtomic::createDirectories()
|
||||
{
|
||||
if (database_atomic_directories_created.test_and_set())
|
||||
return;
|
||||
DatabaseOnDisk::createDirectories();
|
||||
fs::create_directories(fs::path(getContext()->getPath()) / "metadata");
|
||||
fs::create_directories(path_to_table_symlinks);
|
||||
tryCreateMetadataSymlink();
|
||||
}
|
||||
|
||||
String DatabaseAtomic::getTableDataPath(const String & table_name) const
|
||||
{
|
||||
std::lock_guard lock(mutex);
|
||||
@ -99,6 +106,7 @@ void DatabaseAtomic::drop(ContextPtr)
|
||||
void DatabaseAtomic::attachTable(ContextPtr /* context_ */, const String & name, const StoragePtr & table, const String & relative_table_path)
|
||||
{
|
||||
assert(relative_table_path != data_path && !relative_table_path.empty());
|
||||
createDirectories();
|
||||
DetachedTables not_in_use;
|
||||
std::lock_guard lock(mutex);
|
||||
not_in_use = cleanupDetachedTables();
|
||||
@ -200,11 +208,15 @@ void DatabaseAtomic::renameTable(ContextPtr local_context, const String & table_
|
||||
if (exchange && !supportsAtomicRename())
|
||||
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "RENAME EXCHANGE is not supported");
|
||||
|
||||
createDirectories();
|
||||
waitDatabaseStarted();
|
||||
|
||||
auto & other_db = dynamic_cast<DatabaseAtomic &>(to_database);
|
||||
bool inside_database = this == &other_db;
|
||||
|
||||
if (!inside_database)
|
||||
other_db.createDirectories();
|
||||
|
||||
String old_metadata_path = getObjectMetadataPath(table_name);
|
||||
String new_metadata_path = to_database.getObjectMetadataPath(to_table_name);
|
||||
|
||||
@ -325,6 +337,7 @@ void DatabaseAtomic::commitCreateTable(const ASTCreateQuery & query, const Stora
|
||||
const String & table_metadata_tmp_path, const String & table_metadata_path,
|
||||
ContextPtr query_context)
|
||||
{
|
||||
createDirectories();
|
||||
DetachedTables not_in_use;
|
||||
auto table_data_path = getTableDataPath(query);
|
||||
try
|
||||
@ -461,6 +474,9 @@ void DatabaseAtomic::beforeLoadingMetadata(ContextMutablePtr /*context*/, Loadin
|
||||
if (mode < LoadingStrictnessLevel::FORCE_RESTORE)
|
||||
return;
|
||||
|
||||
if (!fs::exists(path_to_table_symlinks))
|
||||
return;
|
||||
|
||||
/// Recreate symlinks to table data dirs in case of force restore, because some of them may be broken
|
||||
for (const auto & table_path : fs::directory_iterator(path_to_table_symlinks))
|
||||
{
|
||||
@ -588,6 +604,7 @@ void DatabaseAtomic::renameDatabase(ContextPtr query_context, const String & new
|
||||
{
|
||||
/// CREATE, ATTACH, DROP, DETACH and RENAME DATABASE must hold DDLGuard
|
||||
|
||||
createDirectories();
|
||||
waitDatabaseStarted();
|
||||
|
||||
bool check_ref_deps = query_context->getSettingsRef().check_referential_table_dependencies;
|
||||
@ -679,4 +696,5 @@ void registerDatabaseAtomic(DatabaseFactory & factory)
|
||||
};
|
||||
factory.registerDatabase("Atomic", create_fn);
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -76,6 +76,9 @@ protected:
|
||||
using DetachedTables = std::unordered_map<UUID, StoragePtr>;
|
||||
[[nodiscard]] DetachedTables cleanupDetachedTables() TSA_REQUIRES(mutex);
|
||||
|
||||
std::atomic_flag database_atomic_directories_created = ATOMIC_FLAG_INIT;
|
||||
void createDirectories();
|
||||
|
||||
void tryCreateMetadataSymlink();
|
||||
|
||||
virtual bool allowMoveTableToOtherDatabaseEngine(IDatabase & /*to_database*/) const { return false; }
|
||||
|
@ -47,12 +47,13 @@ DatabaseLazy::DatabaseLazy(const String & name_, const String & metadata_path_,
|
||||
: DatabaseOnDisk(name_, metadata_path_, std::filesystem::path("data") / escapeForFileName(name_) / "", "DatabaseLazy (" + name_ + ")", context_)
|
||||
, expiration_time(expiration_time_)
|
||||
{
|
||||
createDirectories();
|
||||
}
|
||||
|
||||
|
||||
void DatabaseLazy::loadStoredObjects(ContextMutablePtr local_context, LoadingStrictnessLevel /*mode*/)
|
||||
{
|
||||
iterateMetadataFiles(local_context, [this, &local_context](const String & file_name)
|
||||
iterateMetadataFiles([this, &local_context](const String & file_name)
|
||||
{
|
||||
const std::string table_name = unescapeForFileName(file_name.substr(0, file_name.size() - 4));
|
||||
|
||||
|
@ -12,7 +12,7 @@ class DatabaseLazyIterator;
|
||||
class Context;
|
||||
|
||||
/** Lazy engine of databases.
|
||||
* Works like DatabaseOrdinary, but stores in memory only the cache.
|
||||
* Works like DatabaseOrdinary, but stores only recently accessed tables in memory.
|
||||
* Can be used only with *Log engines.
|
||||
*/
|
||||
class DatabaseLazy final : public DatabaseOnDisk
|
||||
|
@ -172,7 +172,14 @@ DatabaseOnDisk::DatabaseOnDisk(
|
||||
, metadata_path(metadata_path_)
|
||||
, data_path(data_path_)
|
||||
{
|
||||
fs::create_directories(local_context->getPath() + data_path);
|
||||
}
|
||||
|
||||
|
||||
void DatabaseOnDisk::createDirectories()
|
||||
{
|
||||
if (directories_created.test_and_set())
|
||||
return;
|
||||
fs::create_directories(std::filesystem::path(getContext()->getPath()) / data_path);
|
||||
fs::create_directories(metadata_path);
|
||||
}
|
||||
|
||||
@ -190,6 +197,8 @@ void DatabaseOnDisk::createTable(
|
||||
const StoragePtr & table,
|
||||
const ASTPtr & query)
|
||||
{
|
||||
createDirectories();
|
||||
|
||||
const auto & settings = local_context->getSettingsRef();
|
||||
const auto & create = query->as<ASTCreateQuery &>();
|
||||
assert(table_name == create.getTable());
|
||||
@ -257,7 +266,6 @@ void DatabaseOnDisk::createTable(
|
||||
}
|
||||
|
||||
commitCreateTable(create, table, table_metadata_tmp_path, table_metadata_path, local_context);
|
||||
|
||||
removeDetachedPermanentlyFlag(local_context, table_name, table_metadata_path, false);
|
||||
}
|
||||
|
||||
@ -285,6 +293,8 @@ void DatabaseOnDisk::commitCreateTable(const ASTCreateQuery & query, const Stora
|
||||
{
|
||||
try
|
||||
{
|
||||
createDirectories();
|
||||
|
||||
/// Add a table to the map of known tables.
|
||||
attachTable(query_context, query.getTable(), table, getTableDataPath(query));
|
||||
|
||||
@ -420,6 +430,7 @@ void DatabaseOnDisk::renameTable(
|
||||
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Moving tables between databases of different engines is not supported");
|
||||
}
|
||||
|
||||
createDirectories();
|
||||
waitDatabaseStarted();
|
||||
|
||||
auto table_data_relative_path = getTableDataPath(table_name);
|
||||
@ -568,14 +579,14 @@ void DatabaseOnDisk::drop(ContextPtr local_context)
|
||||
assert(TSA_SUPPRESS_WARNING_FOR_READ(tables).empty());
|
||||
if (local_context->getSettingsRef().force_remove_data_recursively_on_drop)
|
||||
{
|
||||
(void)fs::remove_all(local_context->getPath() + getDataPath());
|
||||
(void)fs::remove_all(std::filesystem::path(getContext()->getPath()) / data_path);
|
||||
(void)fs::remove_all(getMetadataPath());
|
||||
}
|
||||
else
|
||||
{
|
||||
try
|
||||
{
|
||||
(void)fs::remove(local_context->getPath() + getDataPath());
|
||||
(void)fs::remove(std::filesystem::path(getContext()->getPath()) / data_path);
|
||||
(void)fs::remove(getMetadataPath());
|
||||
}
|
||||
catch (const fs::filesystem_error & e)
|
||||
@ -613,15 +624,18 @@ time_t DatabaseOnDisk::getObjectMetadataModificationTime(const String & object_n
|
||||
}
|
||||
}
|
||||
|
||||
void DatabaseOnDisk::iterateMetadataFiles(ContextPtr local_context, const IteratingFunction & process_metadata_file) const
|
||||
void DatabaseOnDisk::iterateMetadataFiles(const IteratingFunction & process_metadata_file) const
|
||||
{
|
||||
if (!fs::exists(metadata_path))
|
||||
return;
|
||||
|
||||
auto process_tmp_drop_metadata_file = [&](const String & file_name)
|
||||
{
|
||||
assert(getUUID() == UUIDHelpers::Nil);
|
||||
static const char * tmp_drop_ext = ".sql.tmp_drop";
|
||||
const std::string object_name = file_name.substr(0, file_name.size() - strlen(tmp_drop_ext));
|
||||
|
||||
if (fs::exists(local_context->getPath() + getDataPath() + '/' + object_name))
|
||||
if (fs::exists(std::filesystem::path(getContext()->getPath()) / data_path / object_name))
|
||||
{
|
||||
fs::rename(getMetadataPath() + file_name, getMetadataPath() + object_name + ".sql");
|
||||
LOG_WARNING(log, "Object {} was not dropped previously and will be restored", backQuote(object_name));
|
||||
@ -638,7 +652,7 @@ void DatabaseOnDisk::iterateMetadataFiles(ContextPtr local_context, const Iterat
|
||||
std::vector<std::pair<String, bool>> metadata_files;
|
||||
|
||||
fs::directory_iterator dir_end;
|
||||
for (fs::directory_iterator dir_it(getMetadataPath()); dir_it != dir_end; ++dir_it)
|
||||
for (fs::directory_iterator dir_it(metadata_path); dir_it != dir_end; ++dir_it)
|
||||
{
|
||||
String file_name = dir_it->path().filename();
|
||||
/// For '.svn', '.gitignore' directory and similar.
|
||||
|
@ -64,7 +64,7 @@ public:
|
||||
time_t getObjectMetadataModificationTime(const String & object_name) const override;
|
||||
|
||||
String getDataPath() const override { return data_path; }
|
||||
String getTableDataPath(const String & table_name) const override { return data_path + escapeForFileName(table_name) + "/"; }
|
||||
String getTableDataPath(const String & table_name) const override { return std::filesystem::path(data_path) / escapeForFileName(table_name) / ""; }
|
||||
String getTableDataPath(const ASTCreateQuery & query) const override { return getTableDataPath(query.getTable()); }
|
||||
String getMetadataPath() const override { return metadata_path; }
|
||||
|
||||
@ -83,7 +83,7 @@ protected:
|
||||
|
||||
using IteratingFunction = std::function<void(const String &)>;
|
||||
|
||||
void iterateMetadataFiles(ContextPtr context, const IteratingFunction & process_metadata_file) const;
|
||||
void iterateMetadataFiles(const IteratingFunction & process_metadata_file) const;
|
||||
|
||||
ASTPtr getCreateTableQueryImpl(
|
||||
const String & table_name,
|
||||
@ -99,6 +99,9 @@ protected:
|
||||
virtual void removeDetachedPermanentlyFlag(ContextPtr context, const String & table_name, const String & table_metadata_path, bool attach);
|
||||
virtual void setDetachedTableNotInUseForce(const UUID & /*uuid*/) {}
|
||||
|
||||
std::atomic_flag directories_created = ATOMIC_FLAG_INIT;
|
||||
void createDirectories();
|
||||
|
||||
const String metadata_path;
|
||||
const String data_path;
|
||||
};
|
||||
|
@ -55,7 +55,7 @@ static constexpr size_t METADATA_FILE_BUFFER_SIZE = 32768;
|
||||
static constexpr const char * const CONVERT_TO_REPLICATED_FLAG_NAME = "convert_to_replicated";
|
||||
|
||||
DatabaseOrdinary::DatabaseOrdinary(const String & name_, const String & metadata_path_, ContextPtr context_)
|
||||
: DatabaseOrdinary(name_, metadata_path_, "data/" + escapeForFileName(name_) + "/", "DatabaseOrdinary (" + name_ + ")", context_)
|
||||
: DatabaseOrdinary(name_, metadata_path_, std::filesystem::path("data") / escapeForFileName(name_) / "", "DatabaseOrdinary (" + name_ + ")", context_)
|
||||
{
|
||||
}
|
||||
|
||||
@ -265,7 +265,7 @@ void DatabaseOrdinary::loadTablesMetadata(ContextPtr local_context, ParsedTables
|
||||
}
|
||||
};
|
||||
|
||||
iterateMetadataFiles(local_context, process_metadata);
|
||||
iterateMetadataFiles(process_metadata);
|
||||
|
||||
size_t objects_in_database = metadata.parsed_tables.size() - prev_tables_count;
|
||||
size_t dictionaries_in_database = metadata.total_dictionaries - prev_total_dictionaries;
|
||||
|
@ -14,6 +14,8 @@ namespace ErrorCodes
|
||||
{
|
||||
extern const int LOGICAL_ERROR;
|
||||
extern const int CANNOT_GET_CREATE_TABLE_QUERY;
|
||||
extern const int BAD_ARGUMENTS;
|
||||
extern const int UNKNOWN_TABLE;
|
||||
}
|
||||
|
||||
DatabasesOverlay::DatabasesOverlay(const String & name_, ContextPtr context_)
|
||||
@ -124,6 +126,39 @@ StoragePtr DatabasesOverlay::detachTable(ContextPtr context_, const String & tab
|
||||
getEngineName());
|
||||
}
|
||||
|
||||
void DatabasesOverlay::renameTable(
|
||||
ContextPtr current_context,
|
||||
const String & name,
|
||||
IDatabase & to_database,
|
||||
const String & to_name,
|
||||
bool exchange,
|
||||
bool dictionary)
|
||||
{
|
||||
for (auto & db : databases)
|
||||
{
|
||||
if (db->isTableExist(name, current_context))
|
||||
{
|
||||
if (DatabasesOverlay * to_overlay_database = typeid_cast<DatabasesOverlay *>(&to_database))
|
||||
{
|
||||
/// Renaming from Overlay database inside itself or into another Overlay database.
|
||||
/// Just use the first database in the overlay as a destination.
|
||||
if (to_overlay_database->databases.empty())
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "The destination Overlay database {} does not have any members", to_database.getDatabaseName());
|
||||
|
||||
db->renameTable(current_context, name, *to_overlay_database->databases[0], to_name, exchange, dictionary);
|
||||
}
|
||||
else
|
||||
{
|
||||
/// Renaming into a different type of database. E.g. from Overlay on top of Atomic database into just Atomic database.
|
||||
db->renameTable(current_context, name, to_database, to_name, exchange, dictionary);
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
}
|
||||
throw Exception(ErrorCodes::UNKNOWN_TABLE, "Table {}.{} doesn't exist", backQuote(getDatabaseName()), backQuote(name));
|
||||
}
|
||||
|
||||
ASTPtr DatabasesOverlay::getCreateTableQueryImpl(const String & name, ContextPtr context_, bool throw_on_error) const
|
||||
{
|
||||
ASTPtr result = nullptr;
|
||||
@ -178,6 +213,18 @@ String DatabasesOverlay::getTableDataPath(const ASTCreateQuery & query) const
|
||||
return result;
|
||||
}
|
||||
|
||||
UUID DatabasesOverlay::getUUID() const
|
||||
{
|
||||
UUID result = UUIDHelpers::Nil;
|
||||
for (const auto & db : databases)
|
||||
{
|
||||
result = db->getUUID();
|
||||
if (result != UUIDHelpers::Nil)
|
||||
break;
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
UUID DatabasesOverlay::tryGetTableUUID(const String & table_name) const
|
||||
{
|
||||
UUID result = UUIDHelpers::Nil;
|
||||
|
@ -35,12 +35,21 @@ public:
|
||||
|
||||
StoragePtr detachTable(ContextPtr context, const String & table_name) override;
|
||||
|
||||
void renameTable(
|
||||
ContextPtr current_context,
|
||||
const String & name,
|
||||
IDatabase & to_database,
|
||||
const String & to_name,
|
||||
bool exchange,
|
||||
bool dictionary) override;
|
||||
|
||||
ASTPtr getCreateTableQueryImpl(const String & name, ContextPtr context, bool throw_on_error) const override;
|
||||
ASTPtr getCreateDatabaseQuery() const override;
|
||||
|
||||
String getTableDataPath(const String & table_name) const override;
|
||||
String getTableDataPath(const ASTCreateQuery & query) const override;
|
||||
|
||||
UUID getUUID() const override;
|
||||
UUID tryGetTableUUID(const String & table_name) const override;
|
||||
|
||||
void drop(ContextPtr context) override;
|
||||
|
@ -416,6 +416,7 @@ public:
|
||||
std::lock_guard lock{mutex};
|
||||
return database_name;
|
||||
}
|
||||
|
||||
/// Get UUID of database.
|
||||
virtual UUID getUUID() const { return UUIDHelpers::Nil; }
|
||||
|
||||
|
@ -46,6 +46,7 @@ DatabaseMaterializedMySQL::DatabaseMaterializedMySQL(
|
||||
, settings(std::move(settings_))
|
||||
, materialize_thread(context_, database_name_, mysql_database_name_, std::move(pool_), std::move(client_), binlog_client_, settings.get())
|
||||
{
|
||||
createDirectories();
|
||||
}
|
||||
|
||||
void DatabaseMaterializedMySQL::rethrowExceptionIfNeeded() const
|
||||
|
@ -86,6 +86,7 @@ private:
|
||||
Poco::Timestamp::fromEpochTime(
|
||||
std::chrono::duration_cast<std::chrono::seconds>(
|
||||
static_cast<std::chrono::system_clock::time_point>(blob.Details.LastModified).time_since_epoch()).count()),
|
||||
blob.Details.ETag.ToString(),
|
||||
{}}));
|
||||
}
|
||||
|
||||
@ -186,6 +187,7 @@ void AzureObjectStorage::listObjects(const std::string & path, RelativePathsWith
|
||||
Poco::Timestamp::fromEpochTime(
|
||||
std::chrono::duration_cast<std::chrono::seconds>(
|
||||
static_cast<std::chrono::system_clock::time_point>(blob.Details.LastModified).time_since_epoch()).count()),
|
||||
blob.Details.ETag.ToString(),
|
||||
{}}));
|
||||
}
|
||||
|
||||
|
@ -205,7 +205,7 @@ void DiskObjectStorageMetadata::addObject(ObjectStorageKey key, size_t size)
|
||||
}
|
||||
|
||||
total_size += size;
|
||||
keys_with_meta.emplace_back(std::move(key), ObjectMetadata{size, {}, {}});
|
||||
keys_with_meta.emplace_back(std::move(key), ObjectMetadata{size, {}, {}, {}});
|
||||
}
|
||||
|
||||
ObjectKeyWithMetadata DiskObjectStorageMetadata::popLastObject()
|
||||
|
@ -222,6 +222,7 @@ void HDFSObjectStorage::listObjects(const std::string & path, RelativePathsWithM
|
||||
ObjectMetadata{
|
||||
static_cast<uint64_t>(ls.file_info[i].mSize),
|
||||
Poco::Timestamp::fromEpochTime(ls.file_info[i].mLastMod),
|
||||
"",
|
||||
{}}));
|
||||
}
|
||||
|
||||
|
@ -54,6 +54,7 @@ struct ObjectMetadata
|
||||
{
|
||||
uint64_t size_bytes = 0;
|
||||
Poco::Timestamp last_modified;
|
||||
std::string etag;
|
||||
ObjectAttributes attributes;
|
||||
};
|
||||
|
||||
|
@ -146,7 +146,7 @@ private:
|
||||
auto objects = outcome.GetResult().GetContents();
|
||||
for (const auto & object : objects)
|
||||
{
|
||||
ObjectMetadata metadata{static_cast<uint64_t>(object.GetSize()), Poco::Timestamp::fromEpochTime(object.GetLastModified().Seconds()), {}};
|
||||
ObjectMetadata metadata{static_cast<uint64_t>(object.GetSize()), Poco::Timestamp::fromEpochTime(object.GetLastModified().Seconds()), object.GetETag(), {}};
|
||||
batch.emplace_back(std::make_shared<RelativePathWithMetadata>(object.GetKey(), std::move(metadata)));
|
||||
}
|
||||
|
||||
@ -332,6 +332,7 @@ void S3ObjectStorage::listObjects(const std::string & path, RelativePathsWithMet
|
||||
ObjectMetadata{
|
||||
static_cast<uint64_t>(object.GetSize()),
|
||||
Poco::Timestamp::fromEpochTime(object.GetLastModified().Seconds()),
|
||||
object.GetETag(),
|
||||
{}}));
|
||||
|
||||
if (max_keys)
|
||||
@ -479,6 +480,7 @@ ObjectMetadata S3ObjectStorage::getObjectMetadata(const std::string & path) cons
|
||||
ObjectMetadata result;
|
||||
result.size_bytes = object_info.size;
|
||||
result.last_modified = Poco::Timestamp::fromEpochTime(object_info.last_modification_time);
|
||||
result.etag = object_info.etag;
|
||||
result.attributes = object_info.metadata;
|
||||
|
||||
return result;
|
||||
|
@ -54,6 +54,7 @@ namespace
|
||||
ObjectInfo object_info;
|
||||
object_info.size = static_cast<size_t>(result.GetContentLength());
|
||||
object_info.last_modification_time = result.GetLastModified().Seconds();
|
||||
object_info.etag = result.GetETag();
|
||||
|
||||
if (with_metadata)
|
||||
object_info.metadata = result.GetMetadata();
|
||||
|
@ -15,6 +15,7 @@ struct ObjectInfo
|
||||
{
|
||||
size_t size = 0;
|
||||
time_t last_modification_time = 0;
|
||||
String etag;
|
||||
|
||||
std::map<String, String> metadata = {}; /// Set only if getObjectInfo() is called with `with_metadata = true`.
|
||||
};
|
||||
|
@ -3307,6 +3307,17 @@ void NO_INLINE Aggregator::destroyImpl(Table & table) const
|
||||
|
||||
data = nullptr;
|
||||
});
|
||||
|
||||
if constexpr (Method::low_cardinality_optimization || Method::one_key_nullable_optimization)
|
||||
{
|
||||
if (table.getNullKeyData() != nullptr)
|
||||
{
|
||||
for (size_t i = 0; i < params.aggregates_size; ++i)
|
||||
aggregate_functions[i]->destroy(table.getNullKeyData() + offsets_of_aggregate_states[i]);
|
||||
|
||||
table.getNullKeyData() = nullptr;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
@ -648,10 +648,8 @@ bool HashJoin::addBlockToJoin(const Block & source_block_, bool check_limits)
|
||||
return table_join->sizeLimits().check(total_rows, total_bytes, "JOIN", ErrorCodes::SET_SIZE_LIMIT_EXCEEDED);
|
||||
}
|
||||
|
||||
void HashJoin::shrinkStoredBlocksToFit(size_t & total_bytes_in_join)
|
||||
void HashJoin::shrinkStoredBlocksToFit(size_t & total_bytes_in_join, bool force_optimize)
|
||||
{
|
||||
if (shrink_blocks)
|
||||
return; /// Already shrunk
|
||||
|
||||
Int64 current_memory_usage = getCurrentQueryMemoryUsage();
|
||||
Int64 query_memory_usage_delta = current_memory_usage - memory_usage_before_adding_blocks;
|
||||
@ -659,15 +657,21 @@ void HashJoin::shrinkStoredBlocksToFit(size_t & total_bytes_in_join)
|
||||
|
||||
auto max_total_bytes_in_join = table_join->sizeLimits().max_bytes;
|
||||
|
||||
/** If accounted data size is more than half of `max_bytes_in_join`
|
||||
* or query memory consumption growth from the beginning of adding blocks (estimation of memory consumed by join using memory tracker)
|
||||
* is bigger than half of all memory available for query,
|
||||
* then shrink stored blocks to fit.
|
||||
*/
|
||||
shrink_blocks = (max_total_bytes_in_join && total_bytes_in_join > max_total_bytes_in_join / 2) ||
|
||||
(max_total_bytes_for_query && query_memory_usage_delta > max_total_bytes_for_query / 2);
|
||||
if (!shrink_blocks)
|
||||
return;
|
||||
if (!force_optimize)
|
||||
{
|
||||
if (shrink_blocks)
|
||||
return; /// Already shrunk
|
||||
|
||||
/** If accounted data size is more than half of `max_bytes_in_join`
|
||||
* or query memory consumption growth from the beginning of adding blocks (estimation of memory consumed by join using memory tracker)
|
||||
* is bigger than half of all memory available for query,
|
||||
* then shrink stored blocks to fit.
|
||||
*/
|
||||
shrink_blocks = (max_total_bytes_in_join && total_bytes_in_join > max_total_bytes_in_join / 2) ||
|
||||
(max_total_bytes_for_query && query_memory_usage_delta > max_total_bytes_for_query / 2);
|
||||
if (!shrink_blocks)
|
||||
return;
|
||||
}
|
||||
|
||||
LOG_DEBUG(log, "Shrinking stored blocks, memory consumption is {} {} calculated by join, {} {} by memory tracker",
|
||||
ReadableSize(total_bytes_in_join), max_total_bytes_in_join ? fmt::format("/ {}", ReadableSize(max_total_bytes_in_join)) : "",
|
||||
|
@ -372,7 +372,7 @@ public:
|
||||
|
||||
void debugKeys() const;
|
||||
|
||||
void shrinkStoredBlocksToFit(size_t & total_bytes_in_join);
|
||||
void shrinkStoredBlocksToFit(size_t & total_bytes_in_join, bool force_optimize = false);
|
||||
|
||||
void setMaxJoinedBlockRows(size_t value) { max_joined_block_rows = value; }
|
||||
|
||||
|
@ -951,12 +951,40 @@ namespace
|
||||
engine_ast->no_empty_args = true;
|
||||
storage.set(storage.engine, engine_ast);
|
||||
}
|
||||
|
||||
void setNullTableEngine(ASTStorage & storage)
|
||||
{
|
||||
auto engine_ast = std::make_shared<ASTFunction>();
|
||||
engine_ast->name = "Null";
|
||||
engine_ast->no_empty_args = true;
|
||||
storage.set(storage.engine, engine_ast);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
void InterpreterCreateQuery::setEngine(ASTCreateQuery & create) const
|
||||
{
|
||||
if (create.as_table_function)
|
||||
{
|
||||
if (getContext()->getSettingsRef().restore_replace_external_table_functions_to_null)
|
||||
{
|
||||
const auto & factory = TableFunctionFactory::instance();
|
||||
|
||||
auto properties = factory.tryGetProperties(create.as_table_function->as<ASTFunction>()->name);
|
||||
if (properties && properties->allow_readonly)
|
||||
return;
|
||||
if (!create.storage)
|
||||
{
|
||||
auto storage_ast = std::make_shared<ASTStorage>();
|
||||
create.set(create.storage, storage_ast);
|
||||
}
|
||||
else
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Storage should not be created yet, it's a bug.");
|
||||
create.as_table_function = nullptr;
|
||||
setNullTableEngine(*create.storage);
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
if (create.is_dictionary || create.is_ordinary_view || create.is_live_view || create.is_window_view)
|
||||
return;
|
||||
@ -1007,6 +1035,13 @@ void InterpreterCreateQuery::setEngine(ASTCreateQuery & create) const
|
||||
/// Some part of storage definition (such as PARTITION BY) is specified, but ENGINE is not: just set default one.
|
||||
setDefaultTableEngine(*create.storage, getContext()->getSettingsRef().default_table_engine.value);
|
||||
}
|
||||
/// For external tables with restore_replace_external_engine_to_null setting we replace external engines to
|
||||
/// Null table engine.
|
||||
else if (getContext()->getSettingsRef().restore_replace_external_engines_to_null)
|
||||
{
|
||||
if (StorageFactory::instance().getStorageFeatures(create.storage->engine->name).source_access_type != AccessType::NONE)
|
||||
setNullTableEngine(*create.storage);
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -27,7 +27,6 @@ class ASTQueryWithTableAndOutput;
|
||||
class ASTTableIdentifier;
|
||||
class Context;
|
||||
|
||||
// TODO(ilezhankin): refactor and merge |ASTTableIdentifier|
|
||||
struct StorageID
|
||||
{
|
||||
String database_name;
|
||||
|
@ -1158,7 +1158,8 @@ bool TreeRewriterResult::collectUsedColumns(const ASTPtr & query, bool is_select
|
||||
}
|
||||
}
|
||||
|
||||
has_virtual_shard_num = is_remote_storage && storage->isVirtualColumn("_shard_num", storage_snapshot->getMetadataForQuery()) && virtuals->has("_shard_num");
|
||||
has_virtual_shard_num
|
||||
= is_remote_storage && storage->isVirtualColumn("_shard_num", storage_snapshot->metadata) && virtuals->has("_shard_num");
|
||||
}
|
||||
|
||||
/// Collect missed object subcolumns
|
||||
|
@ -18,6 +18,7 @@
|
||||
#include <Storages/ColumnsDescription.h>
|
||||
#include <DataTypes/NestedUtils.h>
|
||||
#include <Columns/ColumnArray.h>
|
||||
#include <Columns/ColumnConst.h>
|
||||
#include <DataTypes/DataTypeArray.h>
|
||||
#include <Storages/StorageInMemoryMetadata.h>
|
||||
|
||||
@ -35,8 +36,13 @@ namespace
|
||||
|
||||
/// Add all required expressions for missing columns calculation
|
||||
void addDefaultRequiredExpressionsRecursively(
|
||||
const Block & block, const String & required_column_name, DataTypePtr required_column_type,
|
||||
const ColumnsDescription & columns, ASTPtr default_expr_list_accum, NameSet & added_columns, bool null_as_default)
|
||||
const Block & block,
|
||||
const String & required_column_name,
|
||||
DataTypePtr required_column_type,
|
||||
const ColumnsDescription & columns,
|
||||
ASTPtr default_expr_list_accum,
|
||||
NameSet & added_columns,
|
||||
bool null_as_default)
|
||||
{
|
||||
checkStackSize();
|
||||
|
||||
@ -271,6 +277,53 @@ static std::unordered_map<String, ColumnPtr> collectOffsetsColumns(
|
||||
return offsets_columns;
|
||||
}
|
||||
|
||||
static ColumnPtr createColumnWithDefaultValue(const IDataType & data_type, const String & subcolumn_name, size_t num_rows)
|
||||
{
|
||||
auto column = data_type.createColumnConstWithDefaultValue(num_rows);
|
||||
|
||||
/// We must turn a constant column into a full column because the interpreter could infer
|
||||
/// that it is constant everywhere but in some blocks (from other parts) it can be a full column.
|
||||
|
||||
if (subcolumn_name.empty())
|
||||
return column->convertToFullColumnIfConst();
|
||||
|
||||
/// Firstly get subcolumn from const column and then replicate.
|
||||
column = assert_cast<const ColumnConst &>(*column).getDataColumnPtr();
|
||||
column = data_type.getSubcolumn(subcolumn_name, column);
|
||||
|
||||
return ColumnConst::create(std::move(column), num_rows)->convertToFullColumnIfConst();
|
||||
}
|
||||
|
||||
static bool hasDefault(const StorageMetadataPtr & metadata_snapshot, const NameAndTypePair & column)
|
||||
{
|
||||
if (!metadata_snapshot)
|
||||
return false;
|
||||
|
||||
const auto & columns = metadata_snapshot->getColumns();
|
||||
if (columns.has(column.name))
|
||||
return columns.hasDefault(column.name);
|
||||
|
||||
auto name_in_storage = column.getNameInStorage();
|
||||
return columns.hasDefault(name_in_storage);
|
||||
}
|
||||
|
||||
static String removeTupleElementsFromSubcolumn(String subcolumn_name, const Names & tuple_elements)
|
||||
{
|
||||
/// Add a dot to the end of name for convenience.
|
||||
subcolumn_name += ".";
|
||||
for (const auto & elem : tuple_elements)
|
||||
{
|
||||
auto pos = subcolumn_name.find(elem + ".");
|
||||
if (pos != std::string::npos)
|
||||
subcolumn_name.erase(pos, elem.size() + 1);
|
||||
}
|
||||
|
||||
if (subcolumn_name.ends_with("."))
|
||||
subcolumn_name.pop_back();
|
||||
|
||||
return subcolumn_name;
|
||||
}
|
||||
|
||||
void fillMissingColumns(
|
||||
Columns & res_columns,
|
||||
size_t num_rows,
|
||||
@ -296,21 +349,17 @@ void fillMissingColumns(
|
||||
auto requested_column = requested_columns.begin();
|
||||
for (size_t i = 0; i < num_columns; ++i, ++requested_column)
|
||||
{
|
||||
const auto & [name, type] = *requested_column;
|
||||
|
||||
if (res_columns[i] && partially_read_columns.contains(name))
|
||||
if (res_columns[i] && partially_read_columns.contains(requested_column->name))
|
||||
res_columns[i] = nullptr;
|
||||
|
||||
if (res_columns[i])
|
||||
continue;
|
||||
|
||||
if (metadata_snapshot && metadata_snapshot->getColumns().hasDefault(name))
|
||||
/// Nothing to fill or default should be filled in evaluateMissingDefaults
|
||||
if (res_columns[i] || hasDefault(metadata_snapshot, *requested_column))
|
||||
continue;
|
||||
|
||||
std::vector<ColumnPtr> current_offsets;
|
||||
size_t num_dimensions = 0;
|
||||
|
||||
const auto * array_type = typeid_cast<const DataTypeArray *>(type.get());
|
||||
const auto * array_type = typeid_cast<const DataTypeArray *>(requested_column->type.get());
|
||||
if (array_type && !offsets_columns.empty())
|
||||
{
|
||||
num_dimensions = getNumberOfDimensions(*array_type);
|
||||
@ -345,20 +394,34 @@ void fillMissingColumns(
|
||||
|
||||
if (!current_offsets.empty())
|
||||
{
|
||||
size_t num_empty_dimensions = num_dimensions - current_offsets.size();
|
||||
auto scalar_type = createArrayOfType(getBaseTypeOfArray(type), num_empty_dimensions);
|
||||
Names tuple_elements;
|
||||
auto serialization = IDataType::getSerialization(*requested_column);
|
||||
|
||||
/// For Nested columns collect names of tuple elements and skip them while getting the base type of array.
|
||||
IDataType::forEachSubcolumn([&](const auto & path, const auto &, const auto &)
|
||||
{
|
||||
if (path.back().type == ISerialization::Substream::TupleElement)
|
||||
tuple_elements.push_back(path.back().name_of_substream);
|
||||
}, ISerialization::SubstreamData(serialization));
|
||||
|
||||
/// The number of dimensions that belongs to the array itself but not shared in Nested column.
|
||||
/// For example for column "n Nested(a UInt64, b Array(UInt64))" this value is 0 for `n.a` and 1 for `n.b`.
|
||||
size_t num_empty_dimensions = num_dimensions - current_offsets.size();
|
||||
|
||||
auto base_type = getBaseTypeOfArray(requested_column->getTypeInStorage(), tuple_elements);
|
||||
auto scalar_type = createArrayOfType(base_type, num_empty_dimensions);
|
||||
size_t data_size = assert_cast<const ColumnUInt64 &>(*current_offsets.back()).getData().back();
|
||||
res_columns[i] = scalar_type->createColumnConstWithDefaultValue(data_size)->convertToFullColumnIfConst();
|
||||
|
||||
/// Remove names of tuple elements because they are already processed by 'getBaseTypeOfArray'.
|
||||
auto subcolumn_name = removeTupleElementsFromSubcolumn(requested_column->getSubcolumnName(), tuple_elements);
|
||||
res_columns[i] = createColumnWithDefaultValue(*scalar_type, subcolumn_name, data_size);
|
||||
|
||||
for (auto it = current_offsets.rbegin(); it != current_offsets.rend(); ++it)
|
||||
res_columns[i] = ColumnArray::create(res_columns[i], *it);
|
||||
}
|
||||
else
|
||||
{
|
||||
/// We must turn a constant column into a full column because the interpreter could infer
|
||||
/// that it is constant everywhere but in some blocks (from other parts) it can be a full column.
|
||||
res_columns[i] = type->createColumnConstWithDefaultValue(num_rows)->convertToFullColumnIfConst();
|
||||
res_columns[i] = createColumnWithDefaultValue(*requested_column->getTypeInStorage(), requested_column->getSubcolumnName(), num_rows);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -417,20 +417,20 @@ void updatePrewhereOutputsIfNeeded(SelectQueryInfo & table_expression_query_info
|
||||
/// We evaluate sampling for Merge lazily so we need to get all the columns
|
||||
if (storage_snapshot->storage.getName() == "Merge")
|
||||
{
|
||||
const auto columns = storage_snapshot->getMetadataForQuery()->getColumns().getAll();
|
||||
const auto columns = storage_snapshot->metadata->getColumns().getAll();
|
||||
for (const auto & column : columns)
|
||||
required_columns.insert(column.name);
|
||||
}
|
||||
else
|
||||
{
|
||||
auto columns_required_for_sampling = storage_snapshot->getMetadataForQuery()->getColumnsRequiredForSampling();
|
||||
auto columns_required_for_sampling = storage_snapshot->metadata->getColumnsRequiredForSampling();
|
||||
required_columns.insert(columns_required_for_sampling.begin(), columns_required_for_sampling.end());
|
||||
}
|
||||
}
|
||||
|
||||
if (table_expression_modifiers->hasFinal())
|
||||
{
|
||||
auto columns_required_for_final = storage_snapshot->getMetadataForQuery()->getColumnsRequiredForFinal();
|
||||
auto columns_required_for_final = storage_snapshot->metadata->getColumnsRequiredForFinal();
|
||||
required_columns.insert(columns_required_for_final.begin(), columns_required_for_final.end());
|
||||
}
|
||||
}
|
||||
|
@ -97,7 +97,9 @@ void CompletedPipelineExecutor::execute()
|
||||
break;
|
||||
|
||||
if (is_cancelled_callback())
|
||||
{
|
||||
data->executor->cancel();
|
||||
}
|
||||
}
|
||||
|
||||
if (data->has_exception)
|
||||
@ -116,7 +118,9 @@ CompletedPipelineExecutor::~CompletedPipelineExecutor()
|
||||
try
|
||||
{
|
||||
if (data && data->executor)
|
||||
{
|
||||
data->executor->cancel();
|
||||
}
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
|
@ -32,7 +32,7 @@ public:
|
||||
private:
|
||||
Chunk read() override;
|
||||
|
||||
void onCancel() override
|
||||
void onCancel() noexcept override
|
||||
{
|
||||
is_stopped = 1;
|
||||
}
|
||||
|
@ -32,7 +32,7 @@ public:
|
||||
protected:
|
||||
Chunk read() override;
|
||||
|
||||
void onCancel() override
|
||||
void onCancel() noexcept override
|
||||
{
|
||||
is_stopped = 1;
|
||||
}
|
||||
|
@ -64,7 +64,7 @@ public:
|
||||
protected:
|
||||
Chunk read() override;
|
||||
|
||||
void onCancel() override { is_stopped = 1; }
|
||||
void onCancel() noexcept override { is_stopped = 1; }
|
||||
|
||||
private:
|
||||
void prepareFileReader();
|
||||
|
@ -34,7 +34,7 @@ public:
|
||||
protected:
|
||||
Chunk read() override;
|
||||
|
||||
void onCancel() override
|
||||
void onCancel() noexcept override
|
||||
{
|
||||
is_stopped = 1;
|
||||
}
|
||||
|
@ -96,7 +96,7 @@ namespace DB
|
||||
}
|
||||
|
||||
|
||||
void ParallelFormattingOutputFormat::finishAndWait()
|
||||
void ParallelFormattingOutputFormat::finishAndWait() noexcept
|
||||
{
|
||||
emergency_stop = true;
|
||||
|
||||
|
@ -122,7 +122,7 @@ public:
|
||||
started_prefix = true;
|
||||
}
|
||||
|
||||
void onCancel() override
|
||||
void onCancel() noexcept override
|
||||
{
|
||||
finishAndWait();
|
||||
}
|
||||
@ -268,7 +268,7 @@ private:
|
||||
bool collected_suffix = false;
|
||||
bool collected_finalize = false;
|
||||
|
||||
void finishAndWait();
|
||||
void finishAndWait() noexcept;
|
||||
|
||||
void onBackgroundException()
|
||||
{
|
||||
|
@ -137,7 +137,7 @@ private:
|
||||
|
||||
Chunk read() final;
|
||||
|
||||
void onCancel() final
|
||||
void onCancel() noexcept final
|
||||
{
|
||||
/*
|
||||
* The format parsers themselves are not being cancelled here, so we'll
|
||||
@ -292,7 +292,7 @@ private:
|
||||
first_parser_finished.wait();
|
||||
}
|
||||
|
||||
void finishAndWait()
|
||||
void finishAndWait() noexcept
|
||||
{
|
||||
/// Defending concurrent segmentator thread join
|
||||
std::lock_guard finish_and_wait_lock(finish_and_wait_mutex);
|
||||
|
@ -68,7 +68,7 @@ public:
|
||||
private:
|
||||
Chunk read() override;
|
||||
|
||||
void onCancel() override
|
||||
void onCancel() noexcept override
|
||||
{
|
||||
is_stopped = 1;
|
||||
}
|
||||
|
@ -270,7 +270,7 @@ void ParquetBlockOutputFormat::resetFormatterImpl()
|
||||
staging_bytes = 0;
|
||||
}
|
||||
|
||||
void ParquetBlockOutputFormat::onCancel()
|
||||
void ParquetBlockOutputFormat::onCancel() noexcept
|
||||
{
|
||||
is_stopped = true;
|
||||
}
|
||||
|
@ -112,7 +112,7 @@ private:
|
||||
void consume(Chunk) override;
|
||||
void finalizeImpl() override;
|
||||
void resetFormatterImpl() override;
|
||||
void onCancel() override;
|
||||
void onCancel() noexcept override;
|
||||
|
||||
void writeRowGroup(std::vector<Chunk> chunks);
|
||||
void writeUsingArrow(std::vector<Chunk> chunks);
|
||||
|
@ -65,7 +65,7 @@ public:
|
||||
private:
|
||||
Chunk read() override;
|
||||
|
||||
void onCancel() override
|
||||
void onCancel() noexcept override
|
||||
{
|
||||
is_stopped = 1;
|
||||
}
|
||||
|
@ -29,7 +29,7 @@ public:
|
||||
|
||||
void setRowsBeforeLimit(size_t rows_before_limit) override;
|
||||
|
||||
void onCancel() override
|
||||
void onCancel() noexcept override
|
||||
{
|
||||
queue.clearAndFinish();
|
||||
}
|
||||
|
@ -9,7 +9,7 @@
|
||||
namespace DB
|
||||
{
|
||||
|
||||
void IProcessor::cancel()
|
||||
void IProcessor::cancel() noexcept
|
||||
{
|
||||
|
||||
bool already_cancelled = is_cancelled.exchange(true, std::memory_order_acq_rel);
|
||||
|
@ -255,7 +255,7 @@ public:
|
||||
/// In case if query was cancelled executor will wait till all processors finish their jobs.
|
||||
/// Generally, there is no reason to check this flag. However, it may be reasonable for long operations (e.g. i/o).
|
||||
bool isCancelled() const { return is_cancelled.load(std::memory_order_acquire); }
|
||||
void cancel();
|
||||
void cancel() noexcept;
|
||||
|
||||
/// Additional method which is called in case if ports were updated while work() method.
|
||||
/// May be used to stop execution in rare cases.
|
||||
@ -380,7 +380,7 @@ public:
|
||||
virtual void setRowsBeforeLimitCounter(RowsBeforeLimitCounterPtr /* counter */) {}
|
||||
|
||||
protected:
|
||||
virtual void onCancel() {}
|
||||
virtual void onCancel() noexcept {}
|
||||
|
||||
std::atomic<bool> is_cancelled{false};
|
||||
|
||||
|
@ -757,9 +757,7 @@ std::optional<String> optimizeUseAggregateProjections(QueryPlan::Node & node, Qu
|
||||
else
|
||||
{
|
||||
auto storage_snapshot = reading->getStorageSnapshot();
|
||||
auto proj_snapshot = std::make_shared<StorageSnapshot>(storage_snapshot->storage, storage_snapshot->metadata);
|
||||
proj_snapshot->addProjection(best_candidate->projection);
|
||||
|
||||
auto proj_snapshot = std::make_shared<StorageSnapshot>(storage_snapshot->storage, best_candidate->projection->metadata);
|
||||
auto projection_query_info = query_info;
|
||||
projection_query_info.prewhere_info = nullptr;
|
||||
projection_query_info.filter_actions_dag = nullptr;
|
||||
|
@ -193,9 +193,7 @@ std::optional<String> optimizeUseNormalProjections(Stack & stack, QueryPlan::Nod
|
||||
}
|
||||
|
||||
auto storage_snapshot = reading->getStorageSnapshot();
|
||||
auto proj_snapshot = std::make_shared<StorageSnapshot>(storage_snapshot->storage, storage_snapshot->metadata);
|
||||
proj_snapshot->addProjection(best_candidate->projection);
|
||||
|
||||
auto proj_snapshot = std::make_shared<StorageSnapshot>(storage_snapshot->storage, best_candidate->projection->metadata);
|
||||
auto query_info_copy = query_info;
|
||||
query_info_copy.prewhere_info = nullptr;
|
||||
|
||||
|
@ -285,7 +285,6 @@ ReadFromMergeTree::ReadFromMergeTree(
|
||||
, all_column_names(std::move(all_column_names_))
|
||||
, data(data_)
|
||||
, actions_settings(ExpressionActionsSettings::fromContext(context_))
|
||||
, metadata_for_reading(storage_snapshot->getMetadataForQuery())
|
||||
, block_size{
|
||||
.max_block_size_rows = max_block_size_,
|
||||
.preferred_block_size_bytes = context->getSettingsRef().preferred_block_size_bytes,
|
||||
@ -327,7 +326,7 @@ ReadFromMergeTree::ReadFromMergeTree(
|
||||
|
||||
updateSortDescriptionForOutputStream(
|
||||
*output_stream,
|
||||
storage_snapshot->getMetadataForQuery()->getSortingKeyColumns(),
|
||||
storage_snapshot->metadata->getSortingKeyColumns(),
|
||||
getSortDirection(),
|
||||
query_info.input_order_info,
|
||||
prewhere_info,
|
||||
@ -782,7 +781,7 @@ Pipe ReadFromMergeTree::spreadMarkRangesAmongStreams(RangesInDataParts && parts_
|
||||
Names in_order_column_names_to_read(column_names);
|
||||
|
||||
/// Add columns needed to calculate the sorting expression
|
||||
for (const auto & column_name : metadata_for_reading->getColumnsRequiredForSortingKey())
|
||||
for (const auto & column_name : storage_snapshot->metadata->getColumnsRequiredForSortingKey())
|
||||
{
|
||||
if (column_names_set.contains(column_name))
|
||||
continue;
|
||||
@ -802,10 +801,10 @@ Pipe ReadFromMergeTree::spreadMarkRangesAmongStreams(RangesInDataParts && parts_
|
||||
info.use_uncompressed_cache);
|
||||
};
|
||||
|
||||
auto sorting_expr = metadata_for_reading->getSortingKey().expression;
|
||||
auto sorting_expr = storage_snapshot->metadata->getSortingKey().expression;
|
||||
|
||||
SplitPartsWithRangesByPrimaryKeyResult split_ranges_result = splitPartsWithRangesByPrimaryKey(
|
||||
metadata_for_reading->getPrimaryKey(),
|
||||
storage_snapshot->metadata->getPrimaryKey(),
|
||||
std::move(sorting_expr),
|
||||
std::move(parts_with_ranges),
|
||||
num_streams,
|
||||
@ -883,7 +882,7 @@ Pipe ReadFromMergeTree::spreadMarkRangesAmongStreamsWithOrder(
|
||||
if (prewhere_info)
|
||||
{
|
||||
NameSet sorting_columns;
|
||||
for (const auto & column : metadata_for_reading->getSortingKey().expression->getRequiredColumnsWithTypes())
|
||||
for (const auto & column : storage_snapshot->metadata->getSortingKey().expression->getRequiredColumnsWithTypes())
|
||||
sorting_columns.insert(column.name);
|
||||
|
||||
have_input_columns_removed_after_prewhere = restorePrewhereInputs(*prewhere_info, sorting_columns);
|
||||
@ -1038,12 +1037,12 @@ Pipe ReadFromMergeTree::spreadMarkRangesAmongStreamsWithOrder(
|
||||
if (need_preliminary_merge || output_each_partition_through_separate_port)
|
||||
{
|
||||
size_t prefix_size = input_order_info->used_prefix_of_sorting_key_size;
|
||||
auto order_key_prefix_ast = metadata_for_reading->getSortingKey().expression_list_ast->clone();
|
||||
auto order_key_prefix_ast = storage_snapshot->metadata->getSortingKey().expression_list_ast->clone();
|
||||
order_key_prefix_ast->children.resize(prefix_size);
|
||||
|
||||
auto syntax_result = TreeRewriter(context).analyze(order_key_prefix_ast, metadata_for_reading->getColumns().getAllPhysical());
|
||||
auto syntax_result = TreeRewriter(context).analyze(order_key_prefix_ast, storage_snapshot->metadata->getColumns().getAllPhysical());
|
||||
auto sorting_key_prefix_expr = ExpressionAnalyzer(order_key_prefix_ast, syntax_result, context).getActionsDAG(false);
|
||||
const auto & sorting_columns = metadata_for_reading->getSortingKey().column_names;
|
||||
const auto & sorting_columns = storage_snapshot->metadata->getSortingKey().column_names;
|
||||
|
||||
SortDescription sort_description;
|
||||
sort_description.compile_sort_description = settings.compile_sort_description;
|
||||
@ -1150,7 +1149,7 @@ bool ReadFromMergeTree::doNotMergePartsAcrossPartitionsFinal() const
|
||||
if (settings.do_not_merge_across_partitions_select_final.changed)
|
||||
return settings.do_not_merge_across_partitions_select_final;
|
||||
|
||||
if (!metadata_for_reading->hasPrimaryKey() || !metadata_for_reading->hasPartitionKey())
|
||||
if (!storage_snapshot->metadata->hasPrimaryKey() || !storage_snapshot->metadata->hasPartitionKey())
|
||||
return false;
|
||||
|
||||
/** To avoid merging parts across partitions we want result of partition key expression for
|
||||
@ -1160,11 +1159,11 @@ bool ReadFromMergeTree::doNotMergePartsAcrossPartitionsFinal() const
|
||||
* in primary key, then for same primary key column values, result of partition key expression
|
||||
* will be the same.
|
||||
*/
|
||||
const auto & partition_key_expression = metadata_for_reading->getPartitionKey().expression;
|
||||
const auto & partition_key_expression = storage_snapshot->metadata->getPartitionKey().expression;
|
||||
if (partition_key_expression->getActionsDAG().hasNonDeterministic())
|
||||
return false;
|
||||
|
||||
const auto & primary_key_columns = metadata_for_reading->getPrimaryKey().column_names;
|
||||
const auto & primary_key_columns = storage_snapshot->metadata->getPrimaryKey().column_names;
|
||||
NameSet primary_key_columns_set(primary_key_columns.begin(), primary_key_columns.end());
|
||||
|
||||
const auto & partition_key_required_columns = partition_key_expression->getRequiredColumns();
|
||||
@ -1217,12 +1216,12 @@ Pipe ReadFromMergeTree::spreadMarkRangesAmongStreamsFinal(
|
||||
/// we will store lonely parts with level > 0 to use parallel select on them.
|
||||
RangesInDataParts non_intersecting_parts_by_primary_key;
|
||||
|
||||
auto sorting_expr = metadata_for_reading->getSortingKey().expression;
|
||||
auto sorting_expr = storage_snapshot->metadata->getSortingKey().expression;
|
||||
|
||||
if (prewhere_info)
|
||||
{
|
||||
NameSet sorting_columns;
|
||||
for (const auto & column : metadata_for_reading->getSortingKey().expression->getRequiredColumnsWithTypes())
|
||||
for (const auto & column : storage_snapshot->metadata->getSortingKey().expression->getRequiredColumnsWithTypes())
|
||||
sorting_columns.insert(column.name);
|
||||
restorePrewhereInputs(*prewhere_info, sorting_columns);
|
||||
}
|
||||
@ -1253,7 +1252,7 @@ Pipe ReadFromMergeTree::spreadMarkRangesAmongStreamsFinal(
|
||||
if (new_parts.empty())
|
||||
continue;
|
||||
|
||||
if (num_streams > 1 && metadata_for_reading->hasPrimaryKey())
|
||||
if (num_streams > 1 && storage_snapshot->metadata->hasPrimaryKey())
|
||||
{
|
||||
// Let's split parts into non intersecting parts ranges and layers to ensure data parallelism of FINAL.
|
||||
auto in_order_reading_step_getter = [this, &column_names, &info](auto parts)
|
||||
@ -1273,7 +1272,7 @@ Pipe ReadFromMergeTree::spreadMarkRangesAmongStreamsFinal(
|
||||
data.merging_params.is_deleted_column.empty() && !reader_settings.read_in_order;
|
||||
|
||||
SplitPartsWithRangesByPrimaryKeyResult split_ranges_result = splitPartsWithRangesByPrimaryKey(
|
||||
metadata_for_reading->getPrimaryKey(),
|
||||
storage_snapshot->metadata->getPrimaryKey(),
|
||||
sorting_expr,
|
||||
std::move(new_parts),
|
||||
num_streams,
|
||||
@ -1305,7 +1304,7 @@ Pipe ReadFromMergeTree::spreadMarkRangesAmongStreamsFinal(
|
||||
if (pipes.empty())
|
||||
continue;
|
||||
|
||||
Names sort_columns = metadata_for_reading->getSortingKeyColumns();
|
||||
Names sort_columns = storage_snapshot->metadata->getSortingKeyColumns();
|
||||
SortDescription sort_description;
|
||||
sort_description.compile_sort_description = settings.compile_sort_description;
|
||||
sort_description.min_count_to_compile_sort_description = settings.min_count_to_compile_sort_description;
|
||||
@ -1313,7 +1312,7 @@ Pipe ReadFromMergeTree::spreadMarkRangesAmongStreamsFinal(
|
||||
size_t sort_columns_size = sort_columns.size();
|
||||
sort_description.reserve(sort_columns_size);
|
||||
|
||||
Names partition_key_columns = metadata_for_reading->getPartitionKey().column_names;
|
||||
Names partition_key_columns = storage_snapshot->metadata->getPartitionKey().column_names;
|
||||
|
||||
for (size_t i = 0; i < sort_columns_size; ++i)
|
||||
sort_description.emplace_back(sort_columns[i], 1, 1);
|
||||
@ -1370,7 +1369,7 @@ ReadFromMergeTree::AnalysisResultPtr ReadFromMergeTree::selectRangesToRead(
|
||||
return selectRangesToRead(
|
||||
std::move(parts),
|
||||
std::move(alter_conversions),
|
||||
metadata_for_reading,
|
||||
storage_snapshot->metadata,
|
||||
query_info,
|
||||
context,
|
||||
requested_num_streams,
|
||||
@ -1534,7 +1533,7 @@ void ReadFromMergeTree::applyFilters(ActionDAGNodes added_filter_nodes)
|
||||
prepared_parts,
|
||||
context,
|
||||
query_info,
|
||||
metadata_for_reading);
|
||||
storage_snapshot->metadata);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1703,7 +1702,7 @@ bool ReadFromMergeTree::requestReadingInOrder(size_t prefix_size, int direction,
|
||||
|
||||
/// update sort info for output stream
|
||||
SortDescription sort_description;
|
||||
const Names & sorting_key_columns = metadata_for_reading->getSortingKeyColumns();
|
||||
const Names & sorting_key_columns = storage_snapshot->metadata->getSortingKeyColumns();
|
||||
const Block & header = output_stream->header;
|
||||
const int sort_direction = getSortDirection();
|
||||
for (const auto & column_name : sorting_key_columns)
|
||||
@ -1745,7 +1744,7 @@ void ReadFromMergeTree::updatePrewhereInfo(const PrewhereInfoPtr & prewhere_info
|
||||
|
||||
updateSortDescriptionForOutputStream(
|
||||
*output_stream,
|
||||
storage_snapshot->getMetadataForQuery()->getSortingKeyColumns(),
|
||||
storage_snapshot->metadata->getSortingKeyColumns(),
|
||||
getSortDirection(),
|
||||
query_info.input_order_info,
|
||||
prewhere_info,
|
||||
@ -1871,7 +1870,7 @@ Pipe ReadFromMergeTree::spreadMarkRanges(
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Optimization isn't supposed to be used for queries with final");
|
||||
|
||||
/// Add columns needed to calculate the sorting expression and the sign.
|
||||
for (const auto & column : metadata_for_reading->getColumnsRequiredForSortingKey())
|
||||
for (const auto & column : storage_snapshot->metadata->getColumnsRequiredForSortingKey())
|
||||
{
|
||||
if (!names.contains(column))
|
||||
{
|
||||
@ -1965,10 +1964,6 @@ void ReadFromMergeTree::initializePipeline(QueryPipelineBuilder & pipeline, cons
|
||||
fmt::format("{}.{}", data.getStorageID().getFullNameNotQuoted(), part.data_part->info.partition_id));
|
||||
}
|
||||
context->getQueryContext()->addQueryAccessInfo(partition_names);
|
||||
|
||||
if (storage_snapshot->projection)
|
||||
context->getQueryContext()->addQueryAccessInfo(
|
||||
Context::QualifiedProjectionName{.storage_id = data.getStorageID(), .projection_name = storage_snapshot->projection->name});
|
||||
}
|
||||
|
||||
ProfileEvents::increment(ProfileEvents::SelectedParts, result.selected_parts);
|
||||
|
@ -171,7 +171,7 @@ public:
|
||||
|
||||
AnalysisResultPtr selectRangesToRead(bool find_exact_ranges = false) const;
|
||||
|
||||
StorageMetadataPtr getStorageMetadata() const { return metadata_for_reading; }
|
||||
StorageMetadataPtr getStorageMetadata() const { return storage_snapshot->metadata; }
|
||||
|
||||
/// Returns `false` if requested reading cannot be performed.
|
||||
bool requestReadingInOrder(size_t prefix_size, int direction, size_t limit);
|
||||
@ -216,8 +216,6 @@ private:
|
||||
const MergeTreeData & data;
|
||||
ExpressionActionsSettings actions_settings;
|
||||
|
||||
StorageMetadataPtr metadata_for_reading;
|
||||
|
||||
const MergeTreeReadTask::BlockSizeParams block_size;
|
||||
|
||||
size_t requested_num_streams;
|
||||
|
@ -4,6 +4,8 @@
|
||||
#include <QueryPipeline/StreamLocalLimits.h>
|
||||
#include <Processors/Transforms/AggregatingTransform.h>
|
||||
#include <DataTypes/DataTypeAggregateFunction.h>
|
||||
#include <Common/Exception.h>
|
||||
#include <Common/Logger.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
@ -202,9 +204,16 @@ std::optional<Chunk> RemoteSource::tryGenerate()
|
||||
return chunk;
|
||||
}
|
||||
|
||||
void RemoteSource::onCancel()
|
||||
void RemoteSource::onCancel() noexcept
|
||||
{
|
||||
query_executor->cancel();
|
||||
try
|
||||
{
|
||||
query_executor->cancel();
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
tryLogCurrentException(getLogger("RemoteSource"), "Error occurs on cancellation.");
|
||||
}
|
||||
}
|
||||
|
||||
void RemoteSource::onUpdatePorts()
|
||||
|
@ -38,7 +38,7 @@ public:
|
||||
|
||||
protected:
|
||||
std::optional<Chunk> tryGenerate() override;
|
||||
void onCancel() override;
|
||||
void onCancel() noexcept override;
|
||||
|
||||
private:
|
||||
bool was_query_sent = false;
|
||||
|
@ -367,7 +367,7 @@ public:
|
||||
return prepareTwoLevel();
|
||||
}
|
||||
|
||||
void onCancel() override
|
||||
void onCancel() noexcept override
|
||||
{
|
||||
shared_data->is_cancelled.store(true, std::memory_order_seq_cst);
|
||||
}
|
||||
|
@ -52,7 +52,7 @@ protected:
|
||||
virtual void onConsume(Chunk chunk) = 0;
|
||||
virtual GenerateResult onGenerate() = 0;
|
||||
virtual void onFinish() {}
|
||||
virtual void onException(std::exception_ptr /* exception */) {}
|
||||
virtual void onException(std::exception_ptr /* exception */) { }
|
||||
|
||||
public:
|
||||
ExceptionKeepingTransform(const Block & in_header, const Block & out_header, bool ignore_on_start_and_finish_ = true);
|
||||
|
@ -20,6 +20,7 @@ public:
|
||||
UInt16 portNumber() const override { return tcp_server->portNumber(); }
|
||||
size_t currentConnections() const override { return tcp_server->currentConnections(); }
|
||||
size_t currentThreads() const override { return tcp_server->currentThreads(); }
|
||||
size_t refusedConnections() const override { return tcp_server->refusedConnections(); }
|
||||
|
||||
private:
|
||||
std::unique_ptr<TCPServer> tcp_server;
|
||||
@ -54,6 +55,7 @@ public:
|
||||
UInt16 portNumber() const override { return grpc_server->portNumber(); }
|
||||
size_t currentConnections() const override { return grpc_server->currentConnections(); }
|
||||
size_t currentThreads() const override { return grpc_server->currentThreads(); }
|
||||
size_t refusedConnections() const override { return 0; }
|
||||
|
||||
private:
|
||||
std::unique_ptr<GRPCServer> grpc_server;
|
||||
|
@ -38,6 +38,8 @@ public:
|
||||
/// Returns the number of currently handled connections.
|
||||
size_t currentConnections() const { return impl->currentConnections(); }
|
||||
|
||||
size_t refusedConnections() const { return impl->refusedConnections(); }
|
||||
|
||||
/// Returns the number of current threads.
|
||||
size_t currentThreads() const { return impl->currentThreads(); }
|
||||
|
||||
@ -61,6 +63,7 @@ private:
|
||||
virtual UInt16 portNumber() const = 0;
|
||||
virtual size_t currentConnections() const = 0;
|
||||
virtual size_t currentThreads() const = 0;
|
||||
virtual size_t refusedConnections() const = 0;
|
||||
};
|
||||
class TCPServerAdapterImpl;
|
||||
class GRPCServerAdapterImpl;
|
||||
|
@ -365,9 +365,6 @@ void TCPHandler::runImpl()
|
||||
|
||||
try
|
||||
{
|
||||
/// If a user passed query-local timeouts, reset socket to initial state at the end of the query
|
||||
SCOPE_EXIT({state.timeout_setter.reset();});
|
||||
|
||||
/** If Query - process it. If Ping or Cancel - go back to the beginning.
|
||||
* There may come settings for a separate query that modify `query_context`.
|
||||
* It's possible to receive part uuids packet before the query, so then receivePacket has to be called twice.
|
||||
@ -397,7 +394,8 @@ void TCPHandler::runImpl()
|
||||
/// So it's better to update the connection settings for flexibility.
|
||||
extractConnectionSettingsFromContext(query_context);
|
||||
|
||||
/// Sync timeouts on client and server during current query to avoid dangling queries on server
|
||||
/// Sync timeouts on client and server during current query to avoid dangling queries on server.
|
||||
/// It should be reset at the end of query.
|
||||
state.timeout_setter = std::make_unique<TimeoutSetter>(socket(), send_timeout, receive_timeout);
|
||||
|
||||
/// Should we send internal logs to client?
|
||||
@ -558,7 +556,9 @@ void TCPHandler::runImpl()
|
||||
std::scoped_lock lock(out_mutex, task_callback_mutex);
|
||||
|
||||
if (getQueryCancellationStatus() == CancellationStatus::FULLY_CANCELLED)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
sendProgress();
|
||||
sendSelectProfileEvents();
|
||||
@ -605,6 +605,7 @@ void TCPHandler::runImpl()
|
||||
/// QueryState should be cleared before QueryScope, since otherwise
|
||||
/// the MemoryTracker will be wrong for possible deallocations.
|
||||
/// (i.e. deallocations from the Aggregator with two-level aggregation)
|
||||
/// Also it resets socket's timeouts.
|
||||
state.reset();
|
||||
last_sent_snapshots = ProfileEvents::ThreadIdToCountersSnapshot{};
|
||||
query_scope.reset();
|
||||
@ -630,6 +631,9 @@ void TCPHandler::runImpl()
|
||||
state.io.onException();
|
||||
exception.reset(e.clone());
|
||||
|
||||
/// In case of exception state was not reset, so socket's timouts must be reset explicitly
|
||||
state.timeout_setter.reset();
|
||||
|
||||
if (e.code() == ErrorCodes::UNKNOWN_PACKET_FROM_CLIENT)
|
||||
throw;
|
||||
|
||||
@ -688,6 +692,9 @@ void TCPHandler::runImpl()
|
||||
exception = std::make_unique<DB::Exception>(Exception(ErrorCodes::UNKNOWN_EXCEPTION, "Unknown exception"));
|
||||
}
|
||||
|
||||
/// In case of exception state was not reset, so socket's timouts must be reset explicitly
|
||||
state.timeout_setter.reset();
|
||||
|
||||
try
|
||||
{
|
||||
if (exception)
|
||||
|
@ -19,6 +19,7 @@
|
||||
#include <Interpreters/inplaceBlockConversions.h>
|
||||
#include <Interpreters/InterpreterSelectWithUnionQuery.h>
|
||||
#include <Interpreters/InterpreterSelectQueryAnalyzer.h>
|
||||
#include <Interpreters/parseColumnsListForTableFunction.h>
|
||||
#include <Storages/StorageView.h>
|
||||
#include <Parsers/ASTAlterQuery.h>
|
||||
#include <Parsers/ASTColumnDeclaration.h>
|
||||
@ -1316,6 +1317,8 @@ void AlterCommands::validate(const StoragePtr & table, ContextPtr context) const
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS,
|
||||
"Data type have to be specified for column {} to add", backQuote(column_name));
|
||||
|
||||
validateDataType(command.data_type, DataTypeValidationSettings(context->getSettingsRef()));
|
||||
|
||||
/// FIXME: Adding a new column of type Object(JSON) is broken.
|
||||
/// Looks like there is something around default expression for this column (method `getDefault` is not implemented for the data type Object).
|
||||
/// But after ALTER TABLE ADD COLUMN we need to fill existing rows with something (exactly the default value).
|
||||
@ -1395,6 +1398,8 @@ void AlterCommands::validate(const StoragePtr & table, ContextPtr context) const
|
||||
/// So we don't allow to do it for now.
|
||||
if (command.data_type)
|
||||
{
|
||||
validateDataType(command.data_type, DataTypeValidationSettings(context->getSettingsRef()));
|
||||
|
||||
const GetColumnsOptions options(GetColumnsOptions::All);
|
||||
const auto old_data_type = all_columns.getColumn(options, column_name).type;
|
||||
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user