Merge branch 'master' into randomize-trace_profile_events

This commit is contained in:
Alexey Milovidov 2024-08-07 22:24:20 +02:00
commit 92d5d5e8dc
1165 changed files with 9385 additions and 4555 deletions

View File

@ -260,13 +260,18 @@ jobs:
- name: Finish label
if: ${{ !failure() }}
run: |
export WORKFLOW_RESULT_FILE="/tmp/workflow_results.json"
cat > "$WORKFLOW_RESULT_FILE" << 'EOF'
${{ toJson(needs) }}
EOF
cd "$GITHUB_WORKSPACE/tests/ci"
# update mergeable check
python3 merge_pr.py --set-ci-status --wf-status ${{ contains(needs.*.result, 'failure') && 'failure' || 'success' }}
python3 merge_pr.py --set-ci-status
# update overall ci report
python3 finish_check.py --wf-status ${{ contains(needs.*.result, 'failure') && 'failure' || 'success' }}
python3 merge_pr.py
- name: Check Workflow results
if: ${{ !cancelled() }}
run: |
export WORKFLOW_RESULT_FILE="/tmp/workflow_results.json"
cat > "$WORKFLOW_RESULT_FILE" << 'EOF'

View File

@ -64,6 +64,7 @@ jobs:
- name: Check out repository code
uses: ClickHouse/checkout@v1
- name: Check Workflow results
if: ${{ !cancelled() }}
run: |
export WORKFLOW_RESULT_FILE="/tmp/workflow_results.json"
cat >> "$WORKFLOW_RESULT_FILE" << 'EOF'

View File

@ -103,9 +103,14 @@ jobs:
- name: Check and set merge status
if: ${{ needs.StyleCheck.result == 'success' }}
run: |
export WORKFLOW_RESULT_FILE="/tmp/workflow_results.json"
cat > "$WORKFLOW_RESULT_FILE" << 'EOF'
${{ toJson(needs) }}
EOF
cd "$GITHUB_WORKSPACE/tests/ci"
python3 merge_pr.py --set-ci-status --wf-status ${{ contains(needs.*.result, 'failure') && 'failure' || 'success' }}
python3 merge_pr.py --set-ci-status
- name: Check Workflow results
if: ${{ !cancelled() }}
run: |
export WORKFLOW_RESULT_FILE="/tmp/workflow_results.json"
cat > "$WORKFLOW_RESULT_FILE" << 'EOF'

View File

@ -52,6 +52,7 @@ jobs:
- name: Check out repository code
uses: ClickHouse/checkout@v1
- name: Check Workflow results
if: ${{ !cancelled() }}
run: |
export WORKFLOW_RESULT_FILE="/tmp/workflow_results.json"
cat > "$WORKFLOW_RESULT_FILE" << 'EOF'

View File

@ -170,7 +170,11 @@ jobs:
if: ${{ needs.StyleCheck.result == 'success' }}
run: |
cd "$GITHUB_WORKSPACE/tests/ci"
python3 merge_pr.py --set-ci-status --wf-status ${{ contains(needs.*.result, 'failure') && 'failure' || 'success' }}
export WORKFLOW_RESULT_FILE="/tmp/workflow_results.json"
cat > "$WORKFLOW_RESULT_FILE" << 'EOF'
${{ toJson(needs) }}
EOF
python3 merge_pr.py --set-ci-status
- name: Check Workflow results
uses: ./.github/actions/check_workflow
with:

View File

@ -481,12 +481,10 @@ jobs:
- name: Finish label
if: ${{ !failure() }}
run: |
cd "$GITHUB_WORKSPACE/tests/ci"
# update mergeable check
python3 merge_pr.py --set-ci-status --wf-status ${{ contains(needs.*.result, 'failure') && 'failure' || 'success' }}
# update overall ci report
python3 finish_check.py --wf-status ${{ contains(needs.*.result, 'failure') && 'failure' || 'success' }}
- name: Check Workflow results
if: ${{ !cancelled() }}
run: |
export WORKFLOW_RESULT_FILE="/tmp/workflow_results.json"
cat > "$WORKFLOW_RESULT_FILE" << 'EOF'

2
.gitmodules vendored
View File

@ -341,7 +341,7 @@
url = https://github.com/graphitemaster/incbin.git
[submodule "contrib/usearch"]
path = contrib/usearch
url = https://github.com/unum-cloud/usearch.git
url = https://github.com/ClickHouse/usearch.git
[submodule "contrib/SimSIMD"]
path = contrib/SimSIMD
url = https://github.com/ashvardanian/SimSIMD.git

View File

@ -5,12 +5,6 @@ rules:
indentation:
level: warning
indent-sequences: consistent
line-length:
# there are:
# - bash -c "", so this is OK
# - yaml in tests
max: 1000
level: warning
comments:
min-spaces-from-content: 1
document-start: disable

View File

@ -64,6 +64,7 @@
* The setting `optimize_trivial_insert_select` is disabled by default. In most cases, it should be beneficial. Nevertheless, if you are seeing slower INSERT SELECT or increased memory usage, you can enable it back or `SET compatibility = '24.6'`. [#58970](https://github.com/ClickHouse/ClickHouse/pull/58970) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Print stacktrace and diagnostic info if `clickhouse-client` or `clickhouse-local` crashes. [#61109](https://github.com/ClickHouse/ClickHouse/pull/61109) ([Alexander Tokmakov](https://github.com/tavplubix)).
* The result of `SHOW INDEX | INDEXES | INDICES | KEYS` was previously sorted by the primary key column names. Since this was unintuitive, the result is now sorted by the position of the primary key columns within the primary key. [#61131](https://github.com/ClickHouse/ClickHouse/pull/61131) ([Robert Schulze](https://github.com/rschu1ze)).
* Change how deduplication for Materialized Views works. Fixed a lot of cases like: - on destination table: data is split for 2 or more blocks and that blocks is considered as duplicate when that block is inserted in parallel. - on MV destination table: the equal blocks are deduplicated, that happens when MV often produces equal data as a result for different input data due to performing aggregation. - on MV destination table: the equal blocks which comes from different MV are deduplicated. [#61601](https://github.com/ClickHouse/ClickHouse/pull/61601) ([Sema Checherinda](https://github.com/CheSema)).
* Support reading partitioned data DeltaLake data. Infer DeltaLake schema by reading metadata instead of data. [#63201](https://github.com/ClickHouse/ClickHouse/pull/63201) ([Kseniia Sumarokova](https://github.com/kssenii)).
* In composable protocols TLS layer accepted only `certificateFile` and `privateKeyFile` parameters. https://clickhouse.com/docs/en/operations/settings/composable-protocols. [#63985](https://github.com/ClickHouse/ClickHouse/pull/63985) ([Anton Ivashkin](https://github.com/ianton-ru)).
* Added profile event `SelectQueriesWithPrimaryKeyUsage` which indicates how many SELECT queries use the primary key to evaluate the WHERE clause. [#64492](https://github.com/ClickHouse/ClickHouse/pull/64492) ([0x01f](https://github.com/0xfei)).

View File

@ -21,6 +21,7 @@
#include "Poco/Exception.h"
#include "Poco/Foundation.h"
#include "Poco/Mutex.h"
#include "Poco/Message.h"
namespace Poco
@ -78,6 +79,10 @@ public:
///
/// The default implementation just breaks into the debugger.
virtual void logMessageImpl(Message::Priority priority, const std::string & msg) {}
/// Write a messages to the log
/// Useful for logging from Poco
static void handle(const Exception & exc);
/// Invokes the currently registered ErrorHandler.
@ -87,6 +92,9 @@ public:
static void handle();
/// Invokes the currently registered ErrorHandler.
static void logMessage(Message::Priority priority, const std::string & msg);
/// Invokes the currently registered ErrorHandler to log a message.
static ErrorHandler * set(ErrorHandler * pHandler);
/// Registers the given handler as the current error handler.
///

View File

@ -19,6 +19,7 @@
#include "Poco/Foundation.h"
#include <Poco/Types.h>
namespace Poco
@ -135,6 +136,12 @@ public:
static const UUID & x500();
/// Returns the namespace identifier for the X500 namespace.
UInt32 getTimeLow() const { return _timeLow; }
UInt16 getTimeMid() const { return _timeMid; }
UInt16 getTimeHiAndVersion() const { return _timeHiAndVersion; }
UInt16 getClockSeq() const { return _clockSeq; }
std::array<UInt8, 6> getNode() const { return std::array<UInt8, 6>{_node[0], _node[1], _node[2], _node[3], _node[4], _node[5]}; }
protected:
UUID(UInt32 timeLow, UInt32 timeMid, UInt32 timeHiAndVersion, UInt16 clockSeq, UInt8 node[]);
UUID(const char * bytes, Version version);

View File

@ -8,7 +8,7 @@
// Copyright (c) 2005-2006, Applied Informatics Software Engineering GmbH.
// and Contributors.
//
// SPDX-License-Identifier: BSL-1.0
// SPDX-License-Identifier: BSL-1.0
//
@ -35,79 +35,91 @@ ErrorHandler::~ErrorHandler()
void ErrorHandler::exception(const Exception& exc)
{
poco_debugger_msg(exc.what());
poco_debugger_msg(exc.what());
}
void ErrorHandler::exception(const std::exception& exc)
{
poco_debugger_msg(exc.what());
poco_debugger_msg(exc.what());
}
void ErrorHandler::exception()
{
poco_debugger_msg("unknown exception");
poco_debugger_msg("unknown exception");
}
void ErrorHandler::handle(const Exception& exc)
{
FastMutex::ScopedLock lock(_mutex);
try
{
_pHandler->exception(exc);
}
catch (...)
{
}
FastMutex::ScopedLock lock(_mutex);
try
{
_pHandler->exception(exc);
}
catch (...)
{
}
}
void ErrorHandler::handle(const std::exception& exc)
{
FastMutex::ScopedLock lock(_mutex);
try
{
_pHandler->exception(exc);
}
catch (...)
{
}
FastMutex::ScopedLock lock(_mutex);
try
{
_pHandler->exception(exc);
}
catch (...)
{
}
}
void ErrorHandler::handle()
{
FastMutex::ScopedLock lock(_mutex);
try
{
_pHandler->exception();
}
catch (...)
{
}
FastMutex::ScopedLock lock(_mutex);
try
{
_pHandler->exception();
}
catch (...)
{
}
}
void ErrorHandler::logMessage(Message::Priority priority, const std::string & msg)
{
FastMutex::ScopedLock lock(_mutex);
try
{
_pHandler->logMessageImpl(priority, msg);
}
catch (...)
{
}
}
ErrorHandler* ErrorHandler::set(ErrorHandler* pHandler)
{
poco_check_ptr(pHandler);
poco_check_ptr(pHandler);
FastMutex::ScopedLock lock(_mutex);
ErrorHandler* pOld = _pHandler;
_pHandler = pHandler;
return pOld;
FastMutex::ScopedLock lock(_mutex);
ErrorHandler* pOld = _pHandler;
_pHandler = pHandler;
return pOld;
}
ErrorHandler* ErrorHandler::defaultHandler()
{
// NOTE: Since this is called to initialize the static _pHandler
// variable, sh has to be a local static, otherwise we run
// into static initialization order issues.
static SingletonHolder<ErrorHandler> sh;
return sh.get();
// NOTE: Since this is called to initialize the static _pHandler
// variable, sh has to be a local static, otherwise we run
// into static initialization order issues.
static SingletonHolder<ErrorHandler> sh;
return sh.get();
}

View File

@ -76,13 +76,13 @@ std::string Binary::toString(int indent) const
UUID Binary::uuid() const
{
if (_subtype == 0x04 && _buffer.size() == 16)
if ((_subtype == 0x04 || _subtype == 0x03) && _buffer.size() == 16)
{
UUID uuid;
uuid.copyFrom((const char*) _buffer.begin());
return uuid;
}
throw BadCastException("Invalid subtype");
throw BadCastException("Invalid subtype: " + std::to_string(_subtype) + ", size: " + std::to_string(_buffer.size()));
}

View File

@ -17,6 +17,7 @@
#include "Poco/Net/StreamSocketImpl.h"
#include "Poco/NumberFormatter.h"
#include "Poco/Timestamp.h"
#include "Poco/ErrorHandler.h"
#include <string.h> // FD_SET needs memset on some platforms, so we can't use <cstring>

View File

@ -8,7 +8,7 @@
// Copyright (c) 2005-2006, Applied Informatics Software Engineering GmbH.
// and Contributors.
//
// SPDX-License-Identifier: BSL-1.0
// SPDX-License-Identifier: BSL-1.0
//
@ -44,190 +44,194 @@ TCPServerConnectionFilter::~TCPServerConnectionFilter()
TCPServer::TCPServer(TCPServerConnectionFactory::Ptr pFactory, Poco::UInt16 portNumber, TCPServerParams::Ptr pParams):
_socket(ServerSocket(portNumber)),
_thread(threadName(_socket)),
_stopped(true)
{
Poco::ThreadPool& pool = Poco::ThreadPool::defaultPool();
if (pParams)
{
int toAdd = pParams->getMaxThreads() - pool.capacity();
if (toAdd > 0) pool.addCapacity(toAdd);
}
_pDispatcher = new TCPServerDispatcher(pFactory, pool, pParams);
_socket(ServerSocket(portNumber)),
_thread(threadName(_socket)),
_stopped(true)
{
Poco::ThreadPool& pool = Poco::ThreadPool::defaultPool();
if (pParams)
{
int toAdd = pParams->getMaxThreads() - pool.capacity();
if (toAdd > 0) pool.addCapacity(toAdd);
}
_pDispatcher = new TCPServerDispatcher(pFactory, pool, pParams);
}
TCPServer::TCPServer(TCPServerConnectionFactory::Ptr pFactory, const ServerSocket& socket, TCPServerParams::Ptr pParams):
_socket(socket),
_thread(threadName(socket)),
_stopped(true)
_socket(socket),
_thread(threadName(socket)),
_stopped(true)
{
Poco::ThreadPool& pool = Poco::ThreadPool::defaultPool();
if (pParams)
{
int toAdd = pParams->getMaxThreads() - pool.capacity();
if (toAdd > 0) pool.addCapacity(toAdd);
}
_pDispatcher = new TCPServerDispatcher(pFactory, pool, pParams);
Poco::ThreadPool& pool = Poco::ThreadPool::defaultPool();
if (pParams)
{
int toAdd = pParams->getMaxThreads() - pool.capacity();
if (toAdd > 0) pool.addCapacity(toAdd);
}
_pDispatcher = new TCPServerDispatcher(pFactory, pool, pParams);
}
TCPServer::TCPServer(TCPServerConnectionFactory::Ptr pFactory, Poco::ThreadPool& threadPool, const ServerSocket& socket, TCPServerParams::Ptr pParams):
_socket(socket),
_pDispatcher(new TCPServerDispatcher(pFactory, threadPool, pParams)),
_thread(threadName(socket)),
_stopped(true)
_socket(socket),
_pDispatcher(new TCPServerDispatcher(pFactory, threadPool, pParams)),
_thread(threadName(socket)),
_stopped(true)
{
}
TCPServer::~TCPServer()
{
try
{
stop();
_pDispatcher->release();
}
catch (...)
{
poco_unexpected();
}
try
{
stop();
_pDispatcher->release();
}
catch (...)
{
poco_unexpected();
}
}
const TCPServerParams& TCPServer::params() const
{
return _pDispatcher->params();
return _pDispatcher->params();
}
void TCPServer::start()
{
poco_assert (_stopped);
poco_assert (_stopped);
_stopped = false;
_thread.start(*this);
_stopped = false;
_thread.start(*this);
}
void TCPServer::stop()
{
if (!_stopped)
{
_stopped = true;
_thread.join();
_pDispatcher->stop();
}
if (!_stopped)
{
_stopped = true;
_thread.join();
_pDispatcher->stop();
}
}
void TCPServer::run()
{
while (!_stopped)
{
Poco::Timespan timeout(250000);
try
{
if (_socket.poll(timeout, Socket::SELECT_READ))
{
try
{
StreamSocket ss = _socket.acceptConnection();
if (!_pConnectionFilter || _pConnectionFilter->accept(ss))
{
// enable nodelay per default: OSX really needs that
while (!_stopped)
{
Poco::Timespan timeout(250000);
try
{
if (_socket.poll(timeout, Socket::SELECT_READ))
{
try
{
StreamSocket ss = _socket.acceptConnection();
if (!_pConnectionFilter || _pConnectionFilter->accept(ss))
{
// enable nodelay per default: OSX really needs that
#if defined(POCO_OS_FAMILY_UNIX)
if (ss.address().family() != AddressFamily::UNIX_LOCAL)
if (ss.address().family() != AddressFamily::UNIX_LOCAL)
#endif
{
ss.setNoDelay(true);
}
_pDispatcher->enqueue(ss);
}
}
catch (Poco::Exception& exc)
{
ErrorHandler::handle(exc);
}
catch (std::exception& exc)
{
ErrorHandler::handle(exc);
}
catch (...)
{
ErrorHandler::handle();
}
}
}
catch (Poco::Exception& exc)
{
ErrorHandler::handle(exc);
// possibly a resource issue since poll() failed;
// give some time to recover before trying again
Poco::Thread::sleep(50);
}
}
{
ss.setNoDelay(true);
}
_pDispatcher->enqueue(ss);
}
else
{
ErrorHandler::logMessage(Message::PRIO_WARNING, "Filtered out connection from " + ss.peerAddress().toString());
}
}
catch (Poco::Exception& exc)
{
ErrorHandler::handle(exc);
}
catch (std::exception& exc)
{
ErrorHandler::handle(exc);
}
catch (...)
{
ErrorHandler::handle();
}
}
}
catch (Poco::Exception& exc)
{
ErrorHandler::handle(exc);
// possibly a resource issue since poll() failed;
// give some time to recover before trying again
Poco::Thread::sleep(50);
}
}
}
int TCPServer::currentThreads() const
{
return _pDispatcher->currentThreads();
return _pDispatcher->currentThreads();
}
int TCPServer::maxThreads() const
{
return _pDispatcher->maxThreads();
return _pDispatcher->maxThreads();
}
int TCPServer::totalConnections() const
{
return _pDispatcher->totalConnections();
return _pDispatcher->totalConnections();
}
int TCPServer::currentConnections() const
{
return _pDispatcher->currentConnections();
return _pDispatcher->currentConnections();
}
int TCPServer::maxConcurrentConnections() const
{
return _pDispatcher->maxConcurrentConnections();
return _pDispatcher->maxConcurrentConnections();
}
int TCPServer::queuedConnections() const
{
return _pDispatcher->queuedConnections();
return _pDispatcher->queuedConnections();
}
int TCPServer::refusedConnections() const
{
return _pDispatcher->refusedConnections();
return _pDispatcher->refusedConnections();
}
void TCPServer::setConnectionFilter(const TCPServerConnectionFilter::Ptr& pConnectionFilter)
{
poco_assert (_stopped);
poco_assert (_stopped);
_pConnectionFilter = pConnectionFilter;
_pConnectionFilter = pConnectionFilter;
}
std::string TCPServer::threadName(const ServerSocket& socket)
{
std::string name("TCPServer: ");
name.append(socket.address().toString());
return name;
std::string name("TCPServer: ");
name.append(socket.address().toString());
return name;
}

View File

@ -8,7 +8,7 @@
// Copyright (c) 2005-2007, Applied Informatics Software Engineering GmbH.
// and Contributors.
//
// SPDX-License-Identifier: BSL-1.0
// SPDX-License-Identifier: BSL-1.0
//
@ -33,44 +33,44 @@ namespace Net {
class TCPConnectionNotification: public Notification
{
public:
TCPConnectionNotification(const StreamSocket& socket):
_socket(socket)
{
}
~TCPConnectionNotification()
{
}
const StreamSocket& socket() const
{
return _socket;
}
TCPConnectionNotification(const StreamSocket& socket):
_socket(socket)
{
}
~TCPConnectionNotification()
{
}
const StreamSocket& socket() const
{
return _socket;
}
private:
StreamSocket _socket;
StreamSocket _socket;
};
TCPServerDispatcher::TCPServerDispatcher(TCPServerConnectionFactory::Ptr pFactory, Poco::ThreadPool& threadPool, TCPServerParams::Ptr pParams):
_rc(1),
_pParams(pParams),
_currentThreads(0),
_totalConnections(0),
_currentConnections(0),
_maxConcurrentConnections(0),
_refusedConnections(0),
_stopped(false),
_pConnectionFactory(pFactory),
_threadPool(threadPool)
_rc(1),
_pParams(pParams),
_currentThreads(0),
_totalConnections(0),
_currentConnections(0),
_maxConcurrentConnections(0),
_refusedConnections(0),
_stopped(false),
_pConnectionFactory(pFactory),
_threadPool(threadPool)
{
poco_check_ptr (pFactory);
poco_check_ptr (pFactory);
if (!_pParams)
_pParams = new TCPServerParams;
if (_pParams->getMaxThreads() == 0)
_pParams->setMaxThreads(threadPool.capacity());
if (!_pParams)
_pParams = new TCPServerParams;
if (_pParams->getMaxThreads() == 0)
_pParams->setMaxThreads(threadPool.capacity());
}
@ -81,161 +81,184 @@ TCPServerDispatcher::~TCPServerDispatcher()
void TCPServerDispatcher::duplicate()
{
++_rc;
++_rc;
}
void TCPServerDispatcher::release()
{
if (--_rc == 0) delete this;
if (--_rc == 0) delete this;
}
void TCPServerDispatcher::run()
{
AutoPtr<TCPServerDispatcher> guard(this); // ensure object stays alive
AutoPtr<TCPServerDispatcher> guard(this); // ensure object stays alive
int idleTime = (int) _pParams->getThreadIdleTime().totalMilliseconds();
int idleTime = (int) _pParams->getThreadIdleTime().totalMilliseconds();
for (;;)
{
try
{
AutoPtr<Notification> pNf = _queue.waitDequeueNotification(idleTime);
if (pNf && !_stopped)
{
TCPConnectionNotification* pCNf = dynamic_cast<TCPConnectionNotification*>(pNf.get());
if (pCNf)
{
beginConnection();
if (!_stopped)
{
std::unique_ptr<TCPServerConnection> pConnection(_pConnectionFactory->createConnection(pCNf->socket()));
poco_check_ptr(pConnection.get());
pConnection->start();
}
/// endConnection() should be called after destroying TCPServerConnection,
/// otherwise currentConnections() could become zero while some connections are yet still alive.
endConnection();
}
}
}
catch (Poco::Exception &exc) { ErrorHandler::handle(exc); }
catch (std::exception &exc) { ErrorHandler::handle(exc); }
catch (...) { ErrorHandler::handle(); }
FastMutex::ScopedLock lock(_mutex);
if (_stopped || (_currentThreads > 1 && _queue.empty()))
{
--_currentThreads;
break;
}
}
for (;;)
{
try
{
AutoPtr<Notification> pNf = _queue.waitDequeueNotification(idleTime);
if (pNf && !_stopped)
{
TCPConnectionNotification* pCNf = dynamic_cast<TCPConnectionNotification*>(pNf.get());
if (pCNf)
{
beginConnection();
if (!_stopped)
{
std::unique_ptr<TCPServerConnection> pConnection(_pConnectionFactory->createConnection(pCNf->socket()));
poco_check_ptr(pConnection.get());
pConnection->start();
}
/// endConnection() should be called after destroying TCPServerConnection,
/// otherwise currentConnections() could become zero while some connections are yet still alive.
endConnection();
}
}
}
catch (Poco::Exception &exc) { ErrorHandler::handle(exc); }
catch (std::exception &exc) { ErrorHandler::handle(exc); }
catch (...) { ErrorHandler::handle(); }
FastMutex::ScopedLock lock(_mutex);
if (_stopped || (_currentThreads > 1 && _queue.empty()))
{
--_currentThreads;
break;
}
}
}
namespace
{
static const std::string threadName("TCPServerConnection");
static const std::string threadName("TCPServerConnection");
}
void TCPServerDispatcher::enqueue(const StreamSocket& socket)
{
FastMutex::ScopedLock lock(_mutex);
FastMutex::ScopedLock lock(_mutex);
if (_queue.size() < _pParams->getMaxQueued())
{
if (!_queue.hasIdleThreads() && _currentThreads < _pParams->getMaxThreads())
{
try
{
ErrorHandler::logMessage(Message::PRIO_TEST, "Queue size: " + std::to_string(_queue.size()) +
", current threads: " + std::to_string(_currentThreads) +
", threads in pool: " + std::to_string(_threadPool.allocated()) +
", current connections: " + std::to_string(_currentConnections));
if (_queue.size() < _pParams->getMaxQueued())
{
/// NOTE: the condition below is wrong.
/// Since the thread pool is shared between multiple servers/TCPServerDispatchers,
/// _currentThreads < _pParams->getMaxThreads() will be true when the pool is actually saturated.
/// As a result, queue is useless and connections never wait in queue.
/// Instead, we (mistakenly) think that we can create a thread for this connection, but we fail to create it
/// and the connection get rejected.
/// We could check _currentThreads < _threadPool.allocated() to make it work,
/// but it's not clear if we want to make it work
/// because it may be better to reject connection immediately if we don't have resources to handle it.
if (!_queue.hasIdleThreads() && _currentThreads < _pParams->getMaxThreads())
{
try
{
this->duplicate();
_threadPool.startWithPriority(_pParams->getThreadPriority(), *this, threadName);
++_currentThreads;
}
catch (Poco::Exception& exc)
{
_threadPool.startWithPriority(_pParams->getThreadPriority(), *this, threadName);
++_currentThreads;
}
catch (Poco::Exception& exc)
{
ErrorHandler::logMessage(Message::PRIO_WARNING, "Got an exception while starting thread for connection from " +
socket.peerAddress().toString());
ErrorHandler::handle(exc);
this->release();
++_refusedConnections;
std::cerr << "Got exception while starting thread for connection. Error code: "
<< exc.code() << ", message: '" << exc.displayText() << "'" << std::endl;
return;
}
}
_queue.enqueueNotification(new TCPConnectionNotification(socket));
}
else
{
++_refusedConnections;
}
++_refusedConnections;
return;
}
}
else if (!_queue.hasIdleThreads())
{
ErrorHandler::logMessage(Message::PRIO_TRACE, "Don't have idle threads, adding connection from " +
socket.peerAddress().toString() + " to the queue, size: " + std::to_string(_queue.size()));
}
_queue.enqueueNotification(new TCPConnectionNotification(socket));
}
else
{
ErrorHandler::logMessage(Message::PRIO_WARNING, "Refusing connection from " + socket.peerAddress().toString() +
", reached max queue size " + std::to_string(_pParams->getMaxQueued()));
++_refusedConnections;
}
}
void TCPServerDispatcher::stop()
{
_stopped = true;
_queue.clear();
_queue.wakeUpAll();
_stopped = true;
_queue.clear();
_queue.wakeUpAll();
}
int TCPServerDispatcher::currentThreads() const
{
return _currentThreads;
return _currentThreads;
}
int TCPServerDispatcher::maxThreads() const
{
FastMutex::ScopedLock lock(_mutex);
return _threadPool.capacity();
FastMutex::ScopedLock lock(_mutex);
return _threadPool.capacity();
}
int TCPServerDispatcher::totalConnections() const
{
return _totalConnections;
return _totalConnections;
}
int TCPServerDispatcher::currentConnections() const
{
return _currentConnections;
return _currentConnections;
}
int TCPServerDispatcher::maxConcurrentConnections() const
{
return _maxConcurrentConnections;
return _maxConcurrentConnections;
}
int TCPServerDispatcher::queuedConnections() const
{
return _queue.size();
return _queue.size();
}
int TCPServerDispatcher::refusedConnections() const
{
return _refusedConnections;
return _refusedConnections;
}
void TCPServerDispatcher::beginConnection()
{
FastMutex::ScopedLock lock(_mutex);
FastMutex::ScopedLock lock(_mutex);
++_totalConnections;
++_currentConnections;
if (_currentConnections > _maxConcurrentConnections)
_maxConcurrentConnections.store(_currentConnections);
++_totalConnections;
++_currentConnections;
if (_currentConnections > _maxConcurrentConnections)
_maxConcurrentConnections.store(_currentConnections);
}
void TCPServerDispatcher::endConnection()
{
--_currentConnections;
--_currentConnections;
}

View File

@ -71,7 +71,6 @@ add_contrib (zlib-ng-cmake zlib-ng)
add_contrib (bzip2-cmake bzip2)
add_contrib (minizip-ng-cmake minizip-ng)
add_contrib (snappy-cmake snappy)
add_contrib (rocksdb-cmake rocksdb)
add_contrib (thrift-cmake thrift)
# parquet/arrow/orc
add_contrib (arrow-cmake arrow) # requires: snappy, thrift, double-conversion
@ -148,6 +147,7 @@ add_contrib (hive-metastore-cmake hive-metastore) # requires: thrift, avro, arro
add_contrib (cppkafka-cmake cppkafka)
add_contrib (libpqxx-cmake libpqxx)
add_contrib (libpq-cmake libpq)
add_contrib (rocksdb-cmake rocksdb) # requires: jemalloc, snappy, zlib, lz4, zstd, liburing
add_contrib (nuraft-cmake NuRaft)
add_contrib (fast_float-cmake fast_float)
add_contrib (idna-cmake idna)

2
contrib/NuRaft vendored

@ -1 +1 @@
Subproject commit cb5dc3c906e80f253e9ce9535807caef827cc2e0
Subproject commit c2b0811f164a7948208489562dab4f186eb305ce

View File

@ -4,9 +4,7 @@ else ()
option(ENABLE_ICU "Enable ICU" 0)
endif ()
# Temporarily disabled s390x because the ICU build links a blob (icudt71b_dat.S) and our friends from IBM did not explain how they generated
# the blob on s390x: https://github.com/ClickHouse/icudata/pull/2#issuecomment-2226957255
if (NOT ENABLE_ICU OR ARCH_S390X)
if (NOT ENABLE_ICU)
message(STATUS "Not using ICU")
return()
endif()

2
contrib/icudata vendored

@ -1 +1 @@
Subproject commit d345d6ac22f381c882420de9053d30ae1ff38d75
Subproject commit 4904951339a70b4814d2d3723436b20d079cb01b

2
contrib/qpl vendored

@ -1 +1 @@
Subproject commit d4715e0e79896b85612158e135ee1a85f3b3e04d
Subproject commit c2ced94c53c1ee22191201a59878e9280bc9b9b8

View File

@ -4,7 +4,6 @@ set (QPL_PROJECT_DIR "${ClickHouse_SOURCE_DIR}/contrib/qpl")
set (QPL_SRC_DIR "${ClickHouse_SOURCE_DIR}/contrib/qpl/sources")
set (QPL_BINARY_DIR "${ClickHouse_BINARY_DIR}/build/contrib/qpl")
set (EFFICIENT_WAIT OFF)
set (BLOCK_ON_FAULT ON)
set (LOG_HW_INIT OFF)
set (SANITIZE_MEMORY OFF)
set (SANITIZE_THREADS OFF)
@ -16,16 +15,20 @@ function(GetLibraryVersion _content _outputVar)
SET(${_outputVar} ${CMAKE_MATCH_1} PARENT_SCOPE)
endfunction()
set (QPL_VERSION 1.2.0)
set (QPL_VERSION 1.6.0)
message(STATUS "Intel QPL version: ${QPL_VERSION}")
# There are 5 source subdirectories under $QPL_SRC_DIR: isal, c_api, core-sw, middle-layer, c_api.
# Generate 8 library targets: middle_layer_lib, isal, isal_asm, qplcore_px, qplcore_avx512, qplcore_sw_dispatcher, core_iaa, middle_layer_lib.
# There are 5 source subdirectories under $QPL_SRC_DIR: c_api, core-iaa, core-sw, middle-layer and isal.
# Generate 8 library targets: qpl_c_api, core_iaa, qplcore_px, qplcore_avx512, qplcore_sw_dispatcher, middle_layer_lib, isal and isal_asm,
# which are then combined into static or shared qpl.
# Output ch_contrib::qpl by linking with 8 library targets.
# The qpl submodule comes with its own version of isal. It contains code which does not exist in upstream isal. It would be nice to link
# only upstream isal (ch_contrib::isal) but at this point we can't.
# Note, QPL has integrated a customized version of ISA-L to meet specific needs.
# This version has been significantly modified and there are no plans to maintain compatibility with the upstream version
# or upgrade the current copy.
## cmake/CompileOptions.cmake and automatic wrappers generation
# ==========================================================================
# Copyright (C) 2022 Intel Corporation
@ -442,6 +445,7 @@ function(generate_unpack_kernel_arrays current_directory PLATFORMS_LIST)
endforeach()
endfunction()
# [SUBDIR]isal
enable_language(ASM_NASM)
@ -479,7 +483,6 @@ set(ISAL_ASM_SRC ${QPL_SRC_DIR}/isal/igzip/igzip_body.asm
${QPL_SRC_DIR}/isal/igzip/igzip_set_long_icf_fg_04.asm
${QPL_SRC_DIR}/isal/igzip/igzip_set_long_icf_fg_06.asm
${QPL_SRC_DIR}/isal/igzip/igzip_multibinary.asm
${QPL_SRC_DIR}/isal/igzip/stdmac.asm
${QPL_SRC_DIR}/isal/crc/crc_multibinary.asm
${QPL_SRC_DIR}/isal/crc/crc32_gzip_refl_by8.asm
${QPL_SRC_DIR}/isal/crc/crc32_gzip_refl_by8_02.asm
@ -505,7 +508,6 @@ set_property(GLOBAL APPEND PROPERTY QPL_LIB_DEPS
# Setting external and internal interfaces for ISA-L library
target_include_directories(isal
PUBLIC $<BUILD_INTERFACE:${QPL_SRC_DIR}/isal/include>
PRIVATE ${QPL_SRC_DIR}/isal/include
PUBLIC ${QPL_SRC_DIR}/isal/igzip)
set_target_properties(isal PROPERTIES
@ -617,12 +619,9 @@ target_compile_options(qplcore_sw_dispatcher
# [SUBDIR]core-iaa
file(GLOB HW_PATH_SRC ${QPL_SRC_DIR}/core-iaa/sources/aecs/*.c
${QPL_SRC_DIR}/core-iaa/sources/aecs/*.cpp
${QPL_SRC_DIR}/core-iaa/sources/driver_loader/*.c
${QPL_SRC_DIR}/core-iaa/sources/driver_loader/*.cpp
${QPL_SRC_DIR}/core-iaa/sources/descriptors/*.c
${QPL_SRC_DIR}/core-iaa/sources/descriptors/*.cpp
${QPL_SRC_DIR}/core-iaa/sources/bit_rev.c)
${QPL_SRC_DIR}/core-iaa/sources/*.c)
# Create library
add_library(core_iaa OBJECT ${HW_PATH_SRC})
@ -634,31 +633,27 @@ target_include_directories(core_iaa
PRIVATE ${UUID_DIR}
PUBLIC $<BUILD_INTERFACE:${QPL_SRC_DIR}/core-iaa/include>
PUBLIC $<BUILD_INTERFACE:${QPL_SRC_DIR}/core-iaa/sources/include>
PRIVATE $<BUILD_INTERFACE:${QPL_PROJECT_DIR}/include> # status.h in own_checkers.h
PRIVATE $<BUILD_INTERFACE:${QPL_PROJECT_DIR}/sources/c_api> # own_checkers.h
PRIVATE $<BUILD_INTERFACE:${QPL_PROJECT_DIR}/include> # status.h in own_checkers.h
PRIVATE $<TARGET_PROPERTY:qpl_c_api,INTERFACE_INCLUDE_DIRECTORIES> # for own_checkers.h
PRIVATE $<TARGET_PROPERTY:qplcore_sw_dispatcher,INTERFACE_INCLUDE_DIRECTORIES>)
target_compile_features(core_iaa PRIVATE c_std_11)
target_compile_definitions(core_iaa PRIVATE QPL_BADARG_CHECK
PRIVATE $<$<BOOL:${BLOCK_ON_FAULT}>: BLOCK_ON_FAULT_ENABLED>
PRIVATE $<$<BOOL:${LOG_HW_INIT}>:LOG_HW_INIT>
PRIVATE $<$<BOOL:${DYNAMIC_LOADING_LIBACCEL_CONFIG}>:DYNAMIC_LOADING_LIBACCEL_CONFIG>)
# [SUBDIR]middle-layer
file(GLOB MIDDLE_LAYER_SRC
${QPL_SRC_DIR}/middle-layer/analytics/*.cpp
${QPL_SRC_DIR}/middle-layer/c_wrapper/*.cpp
${QPL_SRC_DIR}/middle-layer/checksum/*.cpp
${QPL_SRC_DIR}/middle-layer/accelerator/*.cpp
${QPL_SRC_DIR}/middle-layer/analytics/*.cpp
${QPL_SRC_DIR}/middle-layer/common/*.cpp
${QPL_SRC_DIR}/middle-layer/compression/*.cpp
${QPL_SRC_DIR}/middle-layer/compression/*/*.cpp
${QPL_SRC_DIR}/middle-layer/compression/*/*/*.cpp
${QPL_SRC_DIR}/middle-layer/dispatcher/*.cpp
${QPL_SRC_DIR}/middle-layer/other/*.cpp
${QPL_SRC_DIR}/middle-layer/util/*.cpp
${QPL_SRC_DIR}/middle-layer/inflate/*.cpp
${QPL_SRC_DIR}/core-iaa/sources/accelerator/*.cpp) # todo
${QPL_SRC_DIR}/middle-layer/util/*.cpp)
add_library(middle_layer_lib OBJECT
${MIDDLE_LAYER_SRC})
@ -667,6 +662,7 @@ set_property(GLOBAL APPEND PROPERTY QPL_LIB_DEPS
$<TARGET_OBJECTS:middle_layer_lib>)
target_compile_options(middle_layer_lib
PRIVATE $<$<C_COMPILER_ID:GNU,Clang>:$<$<CONFIG:Release>:-O3;-U_FORTIFY_SOURCE;-D_FORTIFY_SOURCE=2>>
PRIVATE ${QPL_LINUX_TOOLCHAIN_CPP_EMBEDDED_FLAGS})
target_compile_definitions(middle_layer_lib
@ -682,6 +678,7 @@ target_include_directories(middle_layer_lib
PRIVATE ${UUID_DIR}
PUBLIC $<BUILD_INTERFACE:${QPL_SRC_DIR}/middle-layer>
PUBLIC $<TARGET_PROPERTY:_qpl,INTERFACE_INCLUDE_DIRECTORIES>
PRIVATE $<TARGET_PROPERTY:qpl_c_api,INTERFACE_INCLUDE_DIRECTORIES>
PUBLIC $<TARGET_PROPERTY:qplcore_sw_dispatcher,INTERFACE_INCLUDE_DIRECTORIES>
PUBLIC $<TARGET_PROPERTY:isal,INTERFACE_INCLUDE_DIRECTORIES>
PUBLIC $<TARGET_PROPERTY:core_iaa,INTERFACE_INCLUDE_DIRECTORIES>)
@ -689,31 +686,54 @@ target_include_directories(middle_layer_lib
target_compile_definitions(middle_layer_lib PUBLIC -DQPL_LIB)
# [SUBDIR]c_api
file(GLOB_RECURSE QPL_C_API_SRC
${QPL_SRC_DIR}/c_api/*.c
${QPL_SRC_DIR}/c_api/*.cpp)
file(GLOB QPL_C_API_SRC
${QPL_SRC_DIR}/c_api/compression_operations/*.c
${QPL_SRC_DIR}/c_api/compression_operations/*.cpp
${QPL_SRC_DIR}/c_api/filter_operations/*.cpp
${QPL_SRC_DIR}/c_api/legacy_hw_path/*.c
${QPL_SRC_DIR}/c_api/legacy_hw_path/*.cpp
${QPL_SRC_DIR}/c_api/other_operations/*.cpp
${QPL_SRC_DIR}/c_api/serialization/*.cpp
${QPL_SRC_DIR}/c_api/*.cpp)
add_library(qpl_c_api OBJECT ${QPL_C_API_SRC})
target_include_directories(qpl_c_api
PUBLIC $<BUILD_INTERFACE:${QPL_SRC_DIR}/c_api/>
PUBLIC $<BUILD_INTERFACE:${QPL_SRC_DIR}/include/> $<INSTALL_INTERFACE:include>
PRIVATE $<TARGET_PROPERTY:middle_layer_lib,INTERFACE_INCLUDE_DIRECTORIES>)
set_target_properties(qpl_c_api PROPERTIES
$<$<C_COMPILER_ID:GNU,Clang>:C_STANDARD 17
CXX_STANDARD 17)
target_compile_options(qpl_c_api
PRIVATE $<$<C_COMPILER_ID:GNU,Clang>:$<$<CONFIG:Release>:-O3;-U_FORTIFY_SOURCE;-D_FORTIFY_SOURCE=2>>
PRIVATE $<$<COMPILE_LANG_AND_ID:CXX,GNU,Clang>:${QPL_LINUX_TOOLCHAIN_CPP_EMBEDDED_FLAGS}>)
target_compile_definitions(qpl_c_api
PUBLIC -DQPL_BADARG_CHECK # own_checkers.h
PUBLIC -DQPL_LIB # needed for middle_layer_lib
PUBLIC $<$<BOOL:${LOG_HW_INIT}>:LOG_HW_INIT>) # needed for middle_layer_lib
set_property(GLOBAL APPEND PROPERTY QPL_LIB_DEPS
$<TARGET_OBJECTS:qpl_c_api>)
# Final _qpl target
get_property(LIB_DEPS GLOBAL PROPERTY QPL_LIB_DEPS)
add_library(_qpl STATIC ${QPL_C_API_SRC} ${LIB_DEPS})
add_library(_qpl STATIC ${LIB_DEPS})
target_include_directories(_qpl
PUBLIC $<BUILD_INTERFACE:${QPL_PROJECT_DIR}/include/> $<INSTALL_INTERFACE:include>
PRIVATE $<TARGET_PROPERTY:middle_layer_lib,INTERFACE_INCLUDE_DIRECTORIES>
PRIVATE $<BUILD_INTERFACE:${QPL_SRC_DIR}/c_api>)
PUBLIC $<BUILD_INTERFACE:${QPL_PROJECT_DIR}/include/> $<INSTALL_INTERFACE:include>)
target_compile_options(_qpl
PRIVATE ${QPL_LINUX_TOOLCHAIN_CPP_EMBEDDED_FLAGS})
target_compile_definitions(_qpl
PRIVATE -DQPL_LIB
PRIVATE -DQPL_BADARG_CHECK
PRIVATE $<$<BOOL:${DYNAMIC_LOADING_LIBACCEL_CONFIG}>:DYNAMIC_LOADING_LIBACCEL_CONFIG>
PUBLIC -DENABLE_QPL_COMPRESSION)
target_link_libraries(_qpl
PRIVATE ch_contrib::accel-config
PRIVATE ch_contrib::isal)
PRIVATE ch_contrib::accel-config)
target_include_directories(_qpl SYSTEM BEFORE
PUBLIC "${QPL_PROJECT_DIR}/include"

2
contrib/rocksdb vendored

@ -1 +1 @@
Subproject commit 01e43568fa9f3f7bf107b2b66c00b286b456f33e
Subproject commit 5f003e4a22d2e48e37c98d9620241237cd30dd24

View File

@ -1,51 +1,46 @@
option (ENABLE_ROCKSDB "Enable RocksDB" ${ENABLE_LIBRARIES})
if (NOT ENABLE_ROCKSDB)
if (NOT ENABLE_ROCKSDB OR NO_SSE3_OR_HIGHER) # assumes SSE4.2 and PCLMUL
message (STATUS "Not using RocksDB")
return()
endif()
# not in original build system, otherwise xxHash.cc fails to compile with ClickHouse C++23 default
set (CMAKE_CXX_STANDARD 20)
# Always disable jemalloc for rocksdb by default because it introduces non-standard jemalloc APIs
option(WITH_JEMALLOC "build with JeMalloc" OFF)
option(WITH_LIBURING "build with liburing" OFF) # TODO could try to enable this conditionally, depending on ClickHouse's ENABLE_LIBURING
# ClickHouse cannot be compiled without snappy, lz4, zlib, zstd
option(WITH_SNAPPY "build with SNAPPY" ON)
option(WITH_LZ4 "build with lz4" ON)
option(WITH_ZLIB "build with zlib" ON)
option(WITH_ZSTD "build with zstd" ON)
if(WITH_SNAPPY)
if (ENABLE_JEMALLOC)
add_definitions(-DROCKSDB_JEMALLOC -DJEMALLOC_NO_DEMANGLE)
list (APPEND THIRDPARTY_LIBS ch_contrib::jemalloc)
endif ()
if (ENABLE_LIBURING)
add_definitions(-DROCKSDB_IOURING_PRESENT)
list (APPEND THIRDPARTY_LIBS ch_contrib::liburing)
endif ()
if (WITH_SNAPPY)
add_definitions(-DSNAPPY)
list(APPEND THIRDPARTY_LIBS ch_contrib::snappy)
endif()
if(WITH_ZLIB)
if (WITH_ZLIB)
add_definitions(-DZLIB)
list(APPEND THIRDPARTY_LIBS ch_contrib::zlib)
endif()
if(WITH_LZ4)
if (WITH_LZ4)
add_definitions(-DLZ4)
list(APPEND THIRDPARTY_LIBS ch_contrib::lz4)
endif()
if(WITH_ZSTD)
if (WITH_ZSTD)
add_definitions(-DZSTD)
list(APPEND THIRDPARTY_LIBS ch_contrib::zstd)
endif()
add_definitions(-DROCKSDB_PORTABLE)
if(ENABLE_SSE42 AND ENABLE_PCLMULQDQ)
add_definitions(-DHAVE_SSE42)
add_definitions(-DHAVE_PCLMUL)
endif()
if(CMAKE_SYSTEM_PROCESSOR MATCHES "arm64|aarch64|AARCH64")
set (HAS_ARMV8_CRC 1)
# the original build descriptions set specific flags for ARM. These flags are already subsumed by ClickHouse's general
@ -91,8 +86,11 @@ set(SOURCES
${ROCKSDB_SOURCE_DIR}/cache/compressed_secondary_cache.cc
${ROCKSDB_SOURCE_DIR}/cache/lru_cache.cc
${ROCKSDB_SOURCE_DIR}/cache/secondary_cache.cc
${ROCKSDB_SOURCE_DIR}/cache/secondary_cache_adapter.cc
${ROCKSDB_SOURCE_DIR}/cache/sharded_cache.cc
${ROCKSDB_SOURCE_DIR}/cache/tiered_secondary_cache.cc
${ROCKSDB_SOURCE_DIR}/db/arena_wrapped_db_iter.cc
${ROCKSDB_SOURCE_DIR}/db/attribute_group_iterator_impl.cc
${ROCKSDB_SOURCE_DIR}/db/blob/blob_contents.cc
${ROCKSDB_SOURCE_DIR}/db/blob/blob_fetcher.cc
${ROCKSDB_SOURCE_DIR}/db/blob/blob_file_addition.cc
@ -109,6 +107,7 @@ set(SOURCES
${ROCKSDB_SOURCE_DIR}/db/blob/prefetch_buffer_collection.cc
${ROCKSDB_SOURCE_DIR}/db/builder.cc
${ROCKSDB_SOURCE_DIR}/db/c.cc
${ROCKSDB_SOURCE_DIR}/db/coalescing_iterator.cc
${ROCKSDB_SOURCE_DIR}/db/column_family.cc
${ROCKSDB_SOURCE_DIR}/db/compaction/compaction.cc
${ROCKSDB_SOURCE_DIR}/db/compaction/compaction_iterator.cc
@ -129,6 +128,7 @@ set(SOURCES
${ROCKSDB_SOURCE_DIR}/db/db_impl/db_impl_write.cc
${ROCKSDB_SOURCE_DIR}/db/db_impl/db_impl_compaction_flush.cc
${ROCKSDB_SOURCE_DIR}/db/db_impl/db_impl_files.cc
${ROCKSDB_SOURCE_DIR}/db/db_impl/db_impl_follower.cc
${ROCKSDB_SOURCE_DIR}/db/db_impl/db_impl_open.cc
${ROCKSDB_SOURCE_DIR}/db/db_impl/db_impl_debug.cc
${ROCKSDB_SOURCE_DIR}/db/db_impl/db_impl_experimental.cc
@ -174,9 +174,11 @@ set(SOURCES
${ROCKSDB_SOURCE_DIR}/db/wal_manager.cc
${ROCKSDB_SOURCE_DIR}/db/wide/wide_column_serialization.cc
${ROCKSDB_SOURCE_DIR}/db/wide/wide_columns.cc
${ROCKSDB_SOURCE_DIR}/db/wide/wide_columns_helper.cc
${ROCKSDB_SOURCE_DIR}/db/write_batch.cc
${ROCKSDB_SOURCE_DIR}/db/write_batch_base.cc
${ROCKSDB_SOURCE_DIR}/db/write_controller.cc
${ROCKSDB_SOURCE_DIR}/db/write_stall_stats.cc
${ROCKSDB_SOURCE_DIR}/db/write_thread.cc
${ROCKSDB_SOURCE_DIR}/env/composite_env.cc
${ROCKSDB_SOURCE_DIR}/env/env.cc
@ -184,6 +186,7 @@ set(SOURCES
${ROCKSDB_SOURCE_DIR}/env/env_encryption.cc
${ROCKSDB_SOURCE_DIR}/env/file_system.cc
${ROCKSDB_SOURCE_DIR}/env/file_system_tracer.cc
${ROCKSDB_SOURCE_DIR}/env/fs_on_demand.cc
${ROCKSDB_SOURCE_DIR}/env/fs_remap.cc
${ROCKSDB_SOURCE_DIR}/env/mock_env.cc
${ROCKSDB_SOURCE_DIR}/env/unique_id_gen.cc
@ -229,6 +232,7 @@ set(SOURCES
${ROCKSDB_SOURCE_DIR}/options/configurable.cc
${ROCKSDB_SOURCE_DIR}/options/customizable.cc
${ROCKSDB_SOURCE_DIR}/options/db_options.cc
${ROCKSDB_SOURCE_DIR}/options/offpeak_time_info.cc
${ROCKSDB_SOURCE_DIR}/options/options.cc
${ROCKSDB_SOURCE_DIR}/options/options_helper.cc
${ROCKSDB_SOURCE_DIR}/options/options_parser.cc
@ -268,6 +272,7 @@ set(SOURCES
${ROCKSDB_SOURCE_DIR}/table/get_context.cc
${ROCKSDB_SOURCE_DIR}/table/iterator.cc
${ROCKSDB_SOURCE_DIR}/table/merging_iterator.cc
${ROCKSDB_SOURCE_DIR}/table/compaction_merging_iterator.cc
${ROCKSDB_SOURCE_DIR}/table/meta_blocks.cc
${ROCKSDB_SOURCE_DIR}/table/persistent_cache_helper.cc
${ROCKSDB_SOURCE_DIR}/table/plain/plain_table_bloom.cc
@ -309,6 +314,7 @@ set(SOURCES
${ROCKSDB_SOURCE_DIR}/util/compression_context_cache.cc
${ROCKSDB_SOURCE_DIR}/util/concurrent_task_limiter_impl.cc
${ROCKSDB_SOURCE_DIR}/util/crc32c.cc
${ROCKSDB_SOURCE_DIR}/util/data_structure.cc
${ROCKSDB_SOURCE_DIR}/util/dynamic_bloom.cc
${ROCKSDB_SOURCE_DIR}/util/hash.cc
${ROCKSDB_SOURCE_DIR}/util/murmurhash.cc
@ -322,6 +328,8 @@ set(SOURCES
${ROCKSDB_SOURCE_DIR}/util/string_util.cc
${ROCKSDB_SOURCE_DIR}/util/thread_local.cc
${ROCKSDB_SOURCE_DIR}/util/threadpool_imp.cc
${ROCKSDB_SOURCE_DIR}/util/udt_util.cc
${ROCKSDB_SOURCE_DIR}/util/write_batch_util.cc
${ROCKSDB_SOURCE_DIR}/util/xxhash.cc
${ROCKSDB_SOURCE_DIR}/utilities/agg_merge/agg_merge.cc
${ROCKSDB_SOURCE_DIR}/utilities/backup/backup_engine.cc
@ -366,6 +374,7 @@ set(SOURCES
${ROCKSDB_SOURCE_DIR}/utilities/persistent_cache/volatile_tier_impl.cc
${ROCKSDB_SOURCE_DIR}/utilities/simulator_cache/cache_simulator.cc
${ROCKSDB_SOURCE_DIR}/utilities/simulator_cache/sim_cache.cc
${ROCKSDB_SOURCE_DIR}/utilities/table_properties_collectors/compact_for_tiering_collector.cc
${ROCKSDB_SOURCE_DIR}/utilities/table_properties_collectors/compact_on_deletion_collector.cc
${ROCKSDB_SOURCE_DIR}/utilities/trace/file_trace_reader_writer.cc
${ROCKSDB_SOURCE_DIR}/utilities/trace/replayer_impl.cc
@ -386,6 +395,7 @@ set(SOURCES
${ROCKSDB_SOURCE_DIR}/utilities/transactions/write_prepared_txn_db.cc
${ROCKSDB_SOURCE_DIR}/utilities/transactions/write_unprepared_txn.cc
${ROCKSDB_SOURCE_DIR}/utilities/transactions/write_unprepared_txn_db.cc
${ROCKSDB_SOURCE_DIR}/utilities/types_util.cc
${ROCKSDB_SOURCE_DIR}/utilities/ttl/db_ttl_impl.cc
${ROCKSDB_SOURCE_DIR}/utilities/wal_filter.cc
${ROCKSDB_SOURCE_DIR}/utilities/write_batch_with_index/write_batch_with_index.cc
@ -404,12 +414,6 @@ set(SOURCES
${ROCKSDB_SOURCE_DIR}/utilities/transactions/lock/range/range_tree/lib/util/memarena.cc
build_version.cc) # generated by hand
if(ENABLE_SSE42 AND ENABLE_PCLMULQDQ)
set_source_files_properties(
"${ROCKSDB_SOURCE_DIR}/util/crc32c.cc"
PROPERTIES COMPILE_FLAGS "-msse4.2 -mpclmul")
endif()
if(CMAKE_SYSTEM_PROCESSOR MATCHES "^(powerpc|ppc)64")
list(APPEND SOURCES
"${ROCKSDB_SOURCE_DIR}/util/crc32c_ppc.c"
@ -422,14 +426,18 @@ if(HAS_ARMV8_CRC)
endif(HAS_ARMV8_CRC)
list(APPEND SOURCES
"${ROCKSDB_SOURCE_DIR}/port/port_posix.cc"
"${ROCKSDB_SOURCE_DIR}/env/env_posix.cc"
"${ROCKSDB_SOURCE_DIR}/env/fs_posix.cc"
"${ROCKSDB_SOURCE_DIR}/env/io_posix.cc")
${ROCKSDB_SOURCE_DIR}/port/port_posix.cc
${ROCKSDB_SOURCE_DIR}/env/env_posix.cc
${ROCKSDB_SOURCE_DIR}/env/fs_posix.cc
${ROCKSDB_SOURCE_DIR}/env/io_posix.cc)
add_library(_rocksdb ${SOURCES})
add_library(ch_contrib::rocksdb ALIAS _rocksdb)
target_link_libraries(_rocksdb PRIVATE ${THIRDPARTY_LIBS} ${SYSTEM_LIBS})
# Not in the native build system but useful anyways:
# Make all functions in xxHash.h inline. Beneficial for performance: https://github.com/Cyan4973/xxHash/tree/v0.8.2#build-modifiers
target_compile_definitions (_rocksdb PRIVATE XXH_INLINE_ALL)
# SYSTEM is required to overcome some issues
target_include_directories(_rocksdb SYSTEM BEFORE INTERFACE "${ROCKSDB_SOURCE_DIR}/include")

2
contrib/usearch vendored

@ -1 +1 @@
Subproject commit 955c6f9c11adfd89c912e0d1643d160b4e9e543f
Subproject commit 30810452bec5d3d3aa0931bb5d761e2f09aa6356

2
contrib/zlib-ng vendored

@ -1 +1 @@
Subproject commit 50f0eae1a411764cd6d1e85b3ce471438acd3c1c
Subproject commit a2fbeffdc30a8b0ce6d54ee31208e2688eac4c9f

View File

@ -14,6 +14,8 @@ add_definitions(-DHAVE_VISIBILITY_HIDDEN)
add_definitions(-DHAVE_VISIBILITY_INTERNAL)
add_definitions(-DHAVE_BUILTIN_CTZ)
add_definitions(-DHAVE_BUILTIN_CTZLL)
add_definitions(-DHAVE_ATTRIBUTE_ALIGNED)
add_definitions(-DHAVE_POSIX_MEMALIGN)
set(ZLIB_ARCH_SRCS)
set(ZLIB_ARCH_HDRS)
@ -24,67 +26,74 @@ if(ARCH_AARCH64)
set(ARCHDIR "${SOURCE_DIR}/arch/arm")
add_definitions(-DARM_FEATURES)
add_definitions(-DHAVE_SYS_AUXV_H)
add_definitions(-DARM_AUXV_HAS_CRC32 -DARM_ASM_HWCAP)
add_definitions(-DARM_AUXV_HAS_NEON)
add_definitions(-DARM_ACLE_CRC_HASH)
add_definitions(-DARM_NEON_ADLER32 -DARM_NEON_CHUNKSET -DARM_NEON_SLIDEHASH)
add_definitions(-DARM_ACLE)
add_definitions(-DHAVE_ARM_ACLE_H)
add_definitions(-DARM_NEON)
add_definitions(-DARM_NEON_HASLD4)
list(APPEND ZLIB_ARCH_HDRS ${ARCHDIR}/arm.h)
list(APPEND ZLIB_ARCH_SRCS ${ARCHDIR}/armfeature.c)
list(APPEND ZLIB_ARCH_HDRS ${ARCHDIR}/arm_features.h)
list(APPEND ZLIB_ARCH_SRCS ${ARCHDIR}/arm_features.c)
set(ACLE_SRCS ${ARCHDIR}/crc32_acle.c ${ARCHDIR}/insert_string_acle.c)
list(APPEND ZLIB_ARCH_SRCS ${ACLE_SRCS})
set(NEON_SRCS ${ARCHDIR}/adler32_neon.c ${ARCHDIR}/chunkset_neon.c ${ARCHDIR}/slide_neon.c)
set(NEON_SRCS ${ARCHDIR}/adler32_neon.c ${ARCHDIR}/chunkset_neon.c
${ARCHDIR}/compare256_neon.c ${ARCHDIR}/slide_hash_neon.c)
list(APPEND ZLIB_ARCH_SRCS ${NEON_SRCS})
elseif(ARCH_PPC64LE)
set(ARCHDIR "${SOURCE_DIR}/arch/power")
add_definitions(-DPOWER8)
add_definitions(-DPOWER_FEATURES)
add_definitions(-DPOWER8_VSX_ADLER32)
add_definitions(-DPOWER8_VSX_SLIDEHASH)
add_definitions(-DHAVE_SYS_AUXV_H)
list(APPEND ZLIB_ARCH_HDRS ${ARCHDIR}/power.h)
list(APPEND ZLIB_ARCH_SRCS ${ARCHDIR}/power.c)
set(POWER8_SRCS ${ARCHDIR}/adler32_power8.c ${ARCHDIR}/slide_hash_power8.c)
if(POWER9)
add_definitions(-DPOWER9)
else()
add_definitions(-DPOWER8)
add_definitions(-DPOWER8_VSX)
add_definitions(-DPOWER8_VSX_CRC32)
endif()
list(APPEND ZLIB_ARCH_HDRS ${ARCHDIR}/power_features.h)
list(APPEND ZLIB_ARCH_SRCS ${ARCHDIR}/power_features.c)
set(POWER8_SRCS ${ARCHDIR}/adler32_power8.c ${ARCHDIR}/chunkset_power8.c ${ARCHDIR}/slide_hash_power8.c)
list(APPEND POWER8_SRCS ${ARCHDIR}/crc32_power8.c)
list(APPEND ZLIB_ARCH_SRCS ${POWER8_SRCS})
elseif(ARCH_AMD64)
set(ARCHDIR "${SOURCE_DIR}/arch/x86")
add_definitions(-DX86_FEATURES)
list(APPEND ZLIB_ARCH_HDRS ${ARCHDIR}/x86.h)
list(APPEND ZLIB_ARCH_SRCS ${ARCHDIR}/x86.c)
list(APPEND ZLIB_ARCH_HDRS ${ARCHDIR}/x86_features.h)
list(APPEND ZLIB_ARCH_SRCS ${ARCHDIR}/x86_features.c)
if(ENABLE_AVX2)
add_definitions(-DX86_AVX2 -DX86_AVX2_ADLER32 -DX86_AVX_CHUNKSET)
set(AVX2_SRCS ${ARCHDIR}/slide_avx.c)
list(APPEND AVX2_SRCS ${ARCHDIR}/chunkset_avx.c)
list(APPEND AVX2_SRCS ${ARCHDIR}/compare258_avx.c)
list(APPEND AVX2_SRCS ${ARCHDIR}/adler32_avx.c)
add_definitions(-DX86_AVX2)
set(AVX2_SRCS ${ARCHDIR}/slide_hash_avx2.c)
list(APPEND AVX2_SRCS ${ARCHDIR}/chunkset_avx2.c)
list(APPEND AVX2_SRCS ${ARCHDIR}/compare256_avx2.c)
list(APPEND AVX2_SRCS ${ARCHDIR}/adler32_avx2.c)
list(APPEND ZLIB_ARCH_SRCS ${AVX2_SRCS})
endif()
if(ENABLE_SSE42)
add_definitions(-DX86_SSE42_CRC_HASH)
set(SSE42_SRCS ${ARCHDIR}/insert_string_sse.c)
list(APPEND ZLIB_ARCH_SRCS ${SSE42_SRCS})
add_definitions(-DX86_SSE42_CRC_INTRIN)
add_definitions(-DX86_SSE42_CMP_STR)
set(SSE42_SRCS ${ARCHDIR}/compare258_sse.c)
add_definitions(-DX86_SSE42)
set(SSE42_SRCS ${ARCHDIR}/adler32_sse42.c ${ARCHDIR}/insert_string_sse42.c)
list(APPEND ZLIB_ARCH_SRCS ${SSE42_SRCS})
endif()
if(ENABLE_SSSE3)
add_definitions(-DX86_SSSE3 -DX86_SSSE3_ADLER32)
set(SSSE3_SRCS ${ARCHDIR}/adler32_ssse3.c)
add_definitions(-DX86_SSSE3)
set(SSSE3_SRCS ${ARCHDIR}/adler32_ssse3.c ${ARCHDIR}/chunkset_ssse3.c)
list(APPEND ZLIB_ARCH_SRCS ${SSSE3_SRCS})
endif()
if(ENABLE_PCLMULQDQ)
add_definitions(-DX86_PCLMULQDQ_CRC)
set(PCLMULQDQ_SRCS ${ARCHDIR}/crc_folding.c)
set(PCLMULQDQ_SRCS ${ARCHDIR}/crc32_pclmulqdq.c)
list(APPEND ZLIB_ARCH_SRCS ${PCLMULQDQ_SRCS})
endif()
add_definitions(-DX86_SSE2 -DX86_SSE2_CHUNKSET -DX86_SSE2_SLIDEHASH)
set(SSE2_SRCS ${ARCHDIR}/chunkset_sse.c ${ARCHDIR}/slide_sse.c)
add_definitions(-DX86_SSE2)
set(SSE2_SRCS ${ARCHDIR}/chunkset_sse2.c ${ARCHDIR}/compare256_sse2.c ${ARCHDIR}/slide_hash_sse2.c)
list(APPEND ZLIB_ARCH_SRCS ${SSE2_SRCS})
add_definitions(-DX86_NOCHECK_SSE2)
endif ()
@ -106,39 +115,45 @@ generate_cmakein(${SOURCE_DIR}/zconf.h.in ${CMAKE_CURRENT_BINARY_DIR}/zconf.h.cm
set(ZLIB_SRCS
${SOURCE_DIR}/adler32.c
${SOURCE_DIR}/adler32_fold.c
${SOURCE_DIR}/chunkset.c
${SOURCE_DIR}/compare258.c
${SOURCE_DIR}/compare256.c
${SOURCE_DIR}/compress.c
${SOURCE_DIR}/crc32.c
${SOURCE_DIR}/crc32_comb.c
${SOURCE_DIR}/cpu_features.c
${SOURCE_DIR}/crc32_braid.c
${SOURCE_DIR}/crc32_braid_comb.c
${SOURCE_DIR}/crc32_fold.c
${SOURCE_DIR}/deflate.c
${SOURCE_DIR}/deflate_fast.c
${SOURCE_DIR}/deflate_huff.c
${SOURCE_DIR}/deflate_medium.c
${SOURCE_DIR}/deflate_quick.c
${SOURCE_DIR}/deflate_rle.c
${SOURCE_DIR}/deflate_slow.c
${SOURCE_DIR}/deflate_stored.c
${SOURCE_DIR}/functable.c
${SOURCE_DIR}/infback.c
${SOURCE_DIR}/inffast.c
${SOURCE_DIR}/inflate.c
${SOURCE_DIR}/inftrees.c
${SOURCE_DIR}/insert_string.c
${SOURCE_DIR}/insert_string_roll.c
${SOURCE_DIR}/slide_hash.c
${SOURCE_DIR}/trees.c
${SOURCE_DIR}/uncompr.c
${SOURCE_DIR}/zutil.c
)
set(ZLIB_GZFILE_SRCS
${SOURCE_DIR}/gzlib.c
${SOURCE_DIR}/gzread.c
${CMAKE_CURRENT_BINARY_DIR}/gzread.c
${SOURCE_DIR}/gzwrite.c
)
set(ZLIB_ALL_SRCS ${ZLIB_SRCS} ${ZLIB_ARCH_SRCS})
set(ZLIB_ALL_SRCS ${ZLIB_SRCS} ${ZLIB_ARCH_SRCS} ${ZLIB_GZFILE_SRCS})
add_library(_zlib ${ZLIB_ALL_SRCS})
add_library(ch_contrib::zlib ALIAS _zlib)
# https://github.com/zlib-ng/zlib-ng/pull/733
# This is disabed by default
add_compile_definitions(Z_TLS=__thread)
if(HAVE_UNISTD_H)
SET(ZCONF_UNISTD_LINE "#if 1 /* was set to #if 1 by configure/cmake/etc */")
else()
@ -153,6 +168,9 @@ endif()
set(ZLIB_PC ${CMAKE_CURRENT_BINARY_DIR}/zlib.pc)
configure_file(${SOURCE_DIR}/zlib.pc.cmakein ${ZLIB_PC} @ONLY)
configure_file(${CMAKE_CURRENT_BINARY_DIR}/zconf.h.cmakein ${CMAKE_CURRENT_BINARY_DIR}/zconf.h @ONLY)
configure_file(${SOURCE_DIR}/zlib.h.in ${CMAKE_CURRENT_BINARY_DIR}/zlib.h @ONLY)
configure_file(${SOURCE_DIR}/zlib_name_mangling.h.in ${CMAKE_CURRENT_BINARY_DIR}/zlib_name_mangling.h @ONLY)
configure_file(${SOURCE_DIR}/gzread.c.in ${CMAKE_CURRENT_BINARY_DIR}/gzread.c @ONLY)
# We should use same defines when including zlib.h as used when zlib compiled
target_compile_definitions (_zlib PUBLIC ZLIB_COMPAT WITH_GZFILEOP)

View File

@ -40,8 +40,6 @@ fi
DATA_DIR="${CLICKHOUSE_DATA_DIR:-/var/lib/clickhouse}"
LOG_DIR="${LOG_DIR:-/var/log/clickhouse-keeper}"
LOG_PATH="${LOG_DIR}/clickhouse-keeper.log"
ERROR_LOG_PATH="${LOG_DIR}/clickhouse-keeper.err.log"
COORDINATION_DIR="${DATA_DIR}/coordination"
COORDINATION_LOG_DIR="${DATA_DIR}/coordination/log"
COORDINATION_SNAPSHOT_DIR="${DATA_DIR}/coordination/snapshots"
@ -84,7 +82,7 @@ if [[ $# -lt 1 ]] || [[ "$1" == "--"* ]]; then
# There is a config file. It is already tested with gosu (if it is readably by keeper user)
if [ -f "$KEEPER_CONFIG" ]; then
exec $gosu /usr/bin/clickhouse-keeper --config-file="$KEEPER_CONFIG" --log-file="$LOG_PATH" --errorlog-file="$ERROR_LOG_PATH" "$@"
exec $gosu /usr/bin/clickhouse-keeper --config-file="$KEEPER_CONFIG" "$@"
fi
# There is no config file. Will use embedded one

View File

@ -28,14 +28,18 @@ RUN echo "TSAN_OPTIONS='verbosity=1000 halt_on_error=1 abort_on_error=1 history_
RUN echo "UBSAN_OPTIONS='print_stacktrace=1 max_allocation_size_mb=32768'" >> /etc/environment
RUN echo "MSAN_OPTIONS='abort_on_error=1 poison_in_dtor=1 max_allocation_size_mb=32768'" >> /etc/environment
RUN echo "LSAN_OPTIONS='suppressions=/usr/share/clickhouse-test/config/lsan_suppressions.txt max_allocation_size_mb=32768'" >> /etc/environment
RUN echo "ASAN_OPTIONS='halt_on_error=1 abort_on_error=1'" >> /etc/environment
# Sanitizer options for current shell (not current, but the one that will be spawned on "docker run")
# (but w/o verbosity for TSAN, otherwise test.reference will not match)
ENV TSAN_OPTIONS='halt_on_error=1 abort_on_error=1 history_size=7 memory_limit_mb=46080 second_deadlock_stack=1 max_allocation_size_mb=32768'
ENV UBSAN_OPTIONS='print_stacktrace=1 max_allocation_size_mb=32768'
ENV MSAN_OPTIONS='abort_on_error=1 poison_in_dtor=1 max_allocation_size_mb=32768'
ENV LSAN_OPTIONS='max_allocation_size_mb=32768'
ENV ASAN_OPTIONS='halt_on_error=1 abort_on_error=1'
# for external_symbolizer_path
# for external_symbolizer_path, and also ensure that llvm-symbolizer really
# exists (since you don't want to fallback to addr2line, it is very slow)
RUN test -f /usr/bin/llvm-symbolizer-${LLVM_VERSION}
RUN ln -s /usr/bin/llvm-symbolizer-${LLVM_VERSION} /usr/bin/llvm-symbolizer
RUN echo "en_US.UTF-8 UTF-8" > /etc/locale.gen && locale-gen en_US.UTF-8

View File

@ -218,6 +218,6 @@ function stop_logs_replication
clickhouse-client --query "select database||'.'||table from system.tables where database = 'system' and (table like '%_sender' or table like '%_watcher')" | {
tee /dev/stderr
} | {
xargs -n1 -r -i clickhouse-client --query "drop table {}"
timeout --preserve-status --signal TERM --kill-after 5m 15m xargs -n1 -r -i clickhouse-client --query "drop table {}"
}
}

View File

@ -41,7 +41,7 @@ export FASTTEST_WORKSPACE
export FASTTEST_SOURCE
export FASTTEST_BUILD
export FASTTEST_DATA
export FASTTEST_OUT
export FASTTEST_OUTPUT
export PATH
function ccache_status

View File

@ -28,9 +28,9 @@
</table_function_remote_max_addresses>
<!-- Don't waste cycles testing the old interpreter. Spend time in the new analyzer instead -->
<allow_experimental_analyzer>
<enable_analyzer>
<readonly/>
</allow_experimental_analyzer>
</enable_analyzer>
<!-- This feature is broken, deprecated and will be removed. We don't want more reports about it -->
<allow_experimental_object_type>

View File

@ -193,53 +193,60 @@ function fuzz
kill -0 $server_pid
# Set follow-fork-mode to parent, because we attach to clickhouse-server, not to watchdog
# and clickhouse-server can do fork-exec, for example, to run some bridge.
# Do not set nostop noprint for all signals, because some it may cause gdb to hang,
# explicitly ignore non-fatal signals that are used by server.
# Number of SIGRTMIN can be determined only in runtime.
RTMIN=$(kill -l SIGRTMIN)
echo "
set follow-fork-mode parent
handle SIGHUP nostop noprint pass
handle SIGINT nostop noprint pass
handle SIGQUIT nostop noprint pass
handle SIGPIPE nostop noprint pass
handle SIGTERM nostop noprint pass
handle SIGUSR1 nostop noprint pass
handle SIGUSR2 nostop noprint pass
handle SIG$RTMIN nostop noprint pass
info signals
continue
backtrace full
thread apply all backtrace full
info registers
disassemble /s
up
disassemble /s
up
disassemble /s
p \"done\"
detach
quit
" > script.gdb
IS_ASAN=$(clickhouse-client --query "SELECT count() FROM system.build_options WHERE name = 'CXX_FLAGS' AND position('sanitize=address' IN value)")
if [[ "$IS_ASAN" = "1" ]];
then
echo "ASAN build detected. Not using gdb since it disables LeakSanitizer detections"
else
# Set follow-fork-mode to parent, because we attach to clickhouse-server, not to watchdog
# and clickhouse-server can do fork-exec, for example, to run some bridge.
# Do not set nostop noprint for all signals, because some it may cause gdb to hang,
# explicitly ignore non-fatal signals that are used by server.
# Number of SIGRTMIN can be determined only in runtime.
RTMIN=$(kill -l SIGRTMIN)
echo "
set follow-fork-mode parent
handle SIGHUP nostop noprint pass
handle SIGINT nostop noprint pass
handle SIGQUIT nostop noprint pass
handle SIGPIPE nostop noprint pass
handle SIGTERM nostop noprint pass
handle SIGUSR1 nostop noprint pass
handle SIGUSR2 nostop noprint pass
handle SIG$RTMIN nostop noprint pass
info signals
continue
backtrace full
thread apply all backtrace full
info registers
disassemble /s
up
disassemble /s
up
disassemble /s
p \"done\"
detach
quit
" > script.gdb
gdb -batch -command script.gdb -p $server_pid &
sleep 5
# gdb will send SIGSTOP, spend some time loading debug info, and then send SIGCONT, wait for it (up to send_timeout, 300s)
time clickhouse-client --query "SELECT 'Connected to clickhouse-server after attaching gdb'" ||:
gdb -batch -command script.gdb -p $server_pid &
sleep 5
# gdb will send SIGSTOP, spend some time loading debug info, and then send SIGCONT, wait for it (up to send_timeout, 300s)
time clickhouse-client --query "SELECT 'Connected to clickhouse-server after attaching gdb'" ||:
# Check connectivity after we attach gdb, because it might cause the server
# to freeze, and the fuzzer will fail. In debug build, it can take a lot of time.
for _ in {1..180}
do
if clickhouse-client --query "select 1"
then
break
fi
sleep 1
done
kill -0 $server_pid # This checks that it is our server that is started and not some other one
fi
# Check connectivity after we attach gdb, because it might cause the server
# to freeze, and the fuzzer will fail. In debug build, it can take a lot of time.
for _ in {1..180}
do
if clickhouse-client --query "select 1"
then
break
fi
sleep 1
done
kill -0 $server_pid # This checks that it is our server that is started and not some other one
echo 'Server started and responded.'
setup_logs_replication
@ -264,8 +271,13 @@ quit
# The fuzzer_pid belongs to the timeout process.
actual_fuzzer_pid=$(ps -o pid= --ppid "$fuzzer_pid")
echo "Attaching gdb to the fuzzer itself"
gdb -batch -command script.gdb -p $actual_fuzzer_pid &
if [[ "$IS_ASAN" = "1" ]];
then
echo "ASAN build detected. Not using gdb since it disables LeakSanitizer detections"
else
echo "Attaching gdb to the fuzzer itself"
gdb -batch -command script.gdb -p $actual_fuzzer_pid &
fi
# Wait for the fuzzer to complete.
# Note that the 'wait || ...' thing is required so that the script doesn't

View File

@ -74,6 +74,7 @@ protobuf==4.25.2
psycopg2-binary==2.9.6
py4j==0.10.9.5
py==1.11.0
pyarrow==17.0.0
pycparser==2.22
pycryptodome==3.20.0
pymongo==3.11.0

View File

@ -5,43 +5,53 @@ source /utils.lib
function attach_gdb_to_clickhouse()
{
# Set follow-fork-mode to parent, because we attach to clickhouse-server, not to watchdog
# and clickhouse-server can do fork-exec, for example, to run some bridge.
# Do not set nostop noprint for all signals, because some it may cause gdb to hang,
# explicitly ignore non-fatal signals that are used by server.
# Number of SIGRTMIN can be determined only in runtime.
RTMIN=$(kill -l SIGRTMIN)
echo "
set follow-fork-mode parent
handle SIGHUP nostop noprint pass
handle SIGINT nostop noprint pass
handle SIGQUIT nostop noprint pass
handle SIGPIPE nostop noprint pass
handle SIGTERM nostop noprint pass
handle SIGUSR1 nostop noprint pass
handle SIGUSR2 nostop noprint pass
handle SIG$RTMIN nostop noprint pass
info signals
continue
backtrace full
thread apply all backtrace full
info registers
disassemble /s
up
disassemble /s
up
disassemble /s
p \"done\"
detach
quit
" > script.gdb
IS_ASAN=$(clickhouse-client --query "SELECT count() FROM system.build_options WHERE name = 'CXX_FLAGS' AND position('sanitize=address' IN value)")
if [[ "$IS_ASAN" = "1" ]];
then
echo "ASAN build detected. Not using gdb since it disables LeakSanitizer detections"
else
# Set follow-fork-mode to parent, because we attach to clickhouse-server, not to watchdog
# and clickhouse-server can do fork-exec, for example, to run some bridge.
# Do not set nostop noprint for all signals, because some it may cause gdb to hang,
# explicitly ignore non-fatal signals that are used by server.
# Number of SIGRTMIN can be determined only in runtime.
RTMIN=$(kill -l SIGRTMIN)
# shellcheck disable=SC2016
echo "
set follow-fork-mode parent
handle SIGHUP nostop noprint pass
handle SIGINT nostop noprint pass
handle SIGQUIT nostop noprint pass
handle SIGPIPE nostop noprint pass
handle SIGTERM nostop noprint pass
handle SIGUSR1 nostop noprint pass
handle SIGUSR2 nostop noprint pass
handle SIG$RTMIN nostop noprint pass
info signals
continue
backtrace full
info registers
p "top 1 KiB of the stack:"
p/x *(uint64_t[128]*)"'$sp'"
maintenance info sections
thread apply all backtrace full
disassemble /s
up
disassemble /s
up
disassemble /s
p \"done\"
detach
quit
" > script.gdb
# FIXME Hung check may work incorrectly because of attached gdb
# We cannot attach another gdb to get stacktraces if some queries hung
gdb -batch -command script.gdb -p "$(cat /var/run/clickhouse-server/clickhouse-server.pid)" | ts '%Y-%m-%d %H:%M:%S' >> /test_output/gdb.log &
sleep 5
# gdb will send SIGSTOP, spend some time loading debug info and then send SIGCONT, wait for it (up to send_timeout, 300s)
run_with_retry 60 clickhouse-client --query "SELECT 'Connected to clickhouse-server after attaching gdb'"
# FIXME Hung check may work incorrectly because of attached gdb
# We cannot attach another gdb to get stacktraces if some queries hung
gdb -batch -command script.gdb -p "$(cat /var/run/clickhouse-server/clickhouse-server.pid)" | ts '%Y-%m-%d %H:%M:%S' >> /test_output/gdb.log &
sleep 5
# gdb will send SIGSTOP, spend some time loading debug info and then send SIGCONT, wait for it (up to send_timeout, 300s)
run_with_retry 60 clickhouse-client --query "SELECT 'Connected to clickhouse-server after attaching gdb'"
fi
}
# vi: ft=bash

View File

@ -3,6 +3,12 @@
# shellcheck disable=SC1091
source /setup_export_logs.sh
# shellcheck source=../stateless/stress_tests.lib
source /stress_tests.lib
# Avoid overlaps with previous runs
dmesg --clear
# fail on errors, verbose and export all env variables
set -e -x -a
@ -72,8 +78,12 @@ if [[ -n "$BUGFIX_VALIDATE_CHECK" ]] && [[ "$BUGFIX_VALIDATE_CHECK" -eq 1 ]]; th
remove_keeper_config "latest_logs_cache_size_threshold" "[[:digit:]]\+"
fi
export IS_FLAKY_CHECK=0
# For flaky check we also enable thread fuzzer
if [ "$NUM_TRIES" -gt "1" ]; then
export IS_FLAKY_CHECK=1
export THREAD_FUZZER_CPU_TIME_PERIOD_US=1000
export THREAD_FUZZER_SLEEP_PROBABILITY=0.1
export THREAD_FUZZER_SLEEP_TIME_US_MAX=100000
@ -164,7 +174,7 @@ do
done
setup_logs_replication
attach_gdb_to_clickhouse || true # FIXME: to not break old builds, clean on 2023-09-01
attach_gdb_to_clickhouse
function fn_exists() {
declare -F "$1" > /dev/null;
@ -212,6 +222,10 @@ function run_tests()
ADDITIONAL_OPTIONS+=('--shared-catalog')
fi
if [[ "$USE_DISTRIBUTED_CACHE" -eq 1 ]]; then
ADDITIONAL_OPTIONS+=('--distributed-cache')
fi
if [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]; then
ADDITIONAL_OPTIONS+=('--replicated-database')
# Too many tests fail for DatabaseReplicated in parallel.
@ -256,7 +270,7 @@ function run_tests()
| ts '%Y-%m-%d %H:%M:%S' \
| tee -a test_output/test_result.txt
set -e
DURATION=$((START_TIME - SECONDS))
DURATION=$((SECONDS - START_TIME))
echo "Elapsed ${DURATION} seconds."
if [[ $DURATION -ge $TIMEOUT ]]
@ -295,22 +309,22 @@ stop_logs_replication
failed_to_save_logs=0
for table in query_log zookeeper_log trace_log transactions_info_log metric_log blob_storage_log error_log
do
err=$(clickhouse-client -q "select * from system.$table into outfile '/test_output/$table.tsv.gz' format TSVWithNamesAndTypes")
echo "$err"
[[ "0" != "${#err}" ]] && failed_to_save_logs=1
if ! clickhouse-client -q "select * from system.$table into outfile '/test_output/$table.tsv.zst' format TSVWithNamesAndTypes"; then
failed_to_save_logs=1
fi
if [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]; then
err=$( { clickhouse-client --port 19000 -q "select * from system.$table format TSVWithNamesAndTypes" | zstd --threads=0 > /test_output/$table.1.tsv.zst; } 2>&1 )
echo "$err"
[[ "0" != "${#err}" ]] && failed_to_save_logs=1
err=$( { clickhouse-client --port 29000 -q "select * from system.$table format TSVWithNamesAndTypes" | zstd --threads=0 > /test_output/$table.2.tsv.zst; } 2>&1 )
echo "$err"
[[ "0" != "${#err}" ]] && failed_to_save_logs=1
if ! clickhouse-client --port 19000 -q "select * from system.$table into outfile '/test_output/$table.1.tsv.zst' format TSVWithNamesAndTypes"; then
failed_to_save_logs=1
fi
if ! clickhouse-client --port 29000 -q "select * from system.$table into outfile '/test_output/$table.2.tsv.zst' format TSVWithNamesAndTypes"; then
failed_to_save_logs=1
fi
fi
if [[ "$USE_SHARED_CATALOG" -eq 1 ]]; then
err=$( { clickhouse-client --port 19000 -q "select * from system.$table format TSVWithNamesAndTypes" | zstd --threads=0 > /test_output/$table.1.tsv.zst; } 2>&1 )
echo "$err"
[[ "0" != "${#err}" ]] && failed_to_save_logs=1
if ! clickhouse-client --port 29000 -q "select * from system.$table into outfile '/test_output/$table.2.tsv.zst' format TSVWithNamesAndTypes"; then
failed_to_save_logs=1
fi
fi
done
@ -383,6 +397,8 @@ do
| zstd --threads=0 > "/test_output/trace-log-$trace_type-flamegraph.tsv.zst" ||:
done
# Grep logs for sanitizer asserts, crashes and other critical errors
check_logs_for_critical_errors
# Compressed (FIXME: remove once only github actions will be left)
rm /var/log/clickhouse-server/clickhouse-server.log

View File

@ -139,9 +139,9 @@ EOL
</table_function_remote_max_addresses>
<!-- Don't waste cycles testing the old interpreter. Spend time in the new analyzer instead -->
<allow_experimental_analyzer>
<enable_analyzer>
<readonly/>
</allow_experimental_analyzer>
</enable_analyzer>
<!-- This feature is broken, deprecated and will be removed. We don't want more reports about it -->
<allow_experimental_object_type>
@ -242,7 +242,7 @@ function check_server_start()
function check_logs_for_critical_errors()
{
# Sanitizer asserts
sed -n '/WARNING:.*anitizer/,/^$/p' /var/log/clickhouse-server/stderr.log >> /test_output/tmp
sed -n '/WARNING:.*anitizer/,/^$/p' /var/log/clickhouse-server/stderr*.log >> /test_output/tmp
rg -Fav -e "ASan doesn't fully support makecontext/swapcontext functions" -e "DB::Exception" /test_output/tmp > /dev/null \
&& echo -e "Sanitizer assert (in stderr.log)$FAIL$(head_escaped /test_output/tmp)" >> /test_output/test_results.tsv \
|| echo -e "No sanitizer asserts$OK" >> /test_output/test_results.tsv
@ -308,7 +308,8 @@ function collect_query_and_trace_logs()
{
for table in query_log trace_log metric_log
do
clickhouse-local --config-file=/etc/clickhouse-server/config.xml --only-system-tables -q "select * from system.$table format TSVWithNamesAndTypes" | zstd --threads=0 > /test_output/$table.tsv.zst ||:
# Don't ignore errors here, it leads to ignore sanitizer reports when running clickhouse-local
clickhouse-local --config-file=/etc/clickhouse-server/config.xml --only-system-tables -q "select * from system.$table format TSVWithNamesAndTypes" | zstd --threads=0 > /test_output/$table.tsv.zst
done
}

View File

@ -4,4 +4,5 @@ ARG FROM_TAG=latest
FROM clickhouse/test-base:$FROM_TAG
COPY run.sh /
CMD ["/bin/bash", "/run.sh"]
RUN chmod +x run.sh
ENTRYPOINT ["/run.sh"]

View File

@ -1,5 +1,27 @@
#!/bin/bash
set -x
# Need to keep error from tests after `tee`. Otherwise we don't alert on asan errors
set -o pipefail
set -e
timeout 40m gdb -q -ex 'set print inferior-events off' -ex 'set confirm off' -ex 'set print thread-events off' -ex run -ex bt -ex quit --args ./unit_tests_dbms --gtest_output='json:test_output/test_result.json' | tee test_output/test_result.txt
if [ "$#" -ne 1 ]; then
echo "Expected exactly one argument"
exit 1
fi
if [ "$1" = "GDB" ];
then
timeout 40m \
gdb -q -ex "set print inferior-events off" -ex "set confirm off" -ex "set print thread-events off" -ex run -ex bt -ex quit --args \
./unit_tests_dbms --gtest_output='json:test_output/test_result.json' \
| tee test_output/test_result.txt
elif [ "$1" = "NO_GDB" ];
then
timeout 40m \
./unit_tests_dbms --gtest_output='json:test_output/test_result.json' \
| tee test_output/test_result.txt
else
echo "Unknown argument: $1"
exit 1
fi

View File

@ -5,14 +5,6 @@ FROM ubuntu:22.04
ARG apt_archive="http://archive.ubuntu.com"
RUN sed -i "s|http://archive.ubuntu.com|$apt_archive|g" /etc/apt/sources.list
# FIXME: rebuild for clang 18.1.3, that contains a workaround [1] for
# sanitizers issue [2]:
#
# $ git tag --contains c2a57034eff048cd36c563c8e0051db3a70991b3 | tail -1
# llvmorg-18.1.3
#
# [1]: https://github.com/llvm/llvm-project/commit/c2a57034eff048cd36c563c8e0051db3a70991b3
# [2]: https://github.com/ClickHouse/ClickHouse/issues/64086
ENV DEBIAN_FRONTEND=noninteractive LLVM_VERSION=18
RUN apt-get update \

View File

@ -161,11 +161,11 @@ def process_result(result_path, broken_tests):
retries,
test_results,
) = process_test_log(result_path, broken_tests)
is_flacky_check = 1 < int(os.environ.get("NUM_TRIES", 1))
logging.info("Is flaky check: %s", is_flacky_check)
is_flaky_check = 1 < int(os.environ.get("NUM_TRIES", 1))
logging.info("Is flaky check: %s", is_flaky_check)
# If no tests were run (success == 0) it indicates an error (e.g. server did not start or crashed immediately)
# But it's Ok for "flaky checks" - they can contain just one test for check which is marked as skipped.
if failed != 0 or unknown != 0 or (success == 0 and (not is_flacky_check)):
if failed != 0 or unknown != 0 or (success == 0 and (not is_flaky_check)):
state = "failure"
if hung:

View File

@ -331,7 +331,7 @@
* Fix several non significant errors in unit tests. [#11262](https://github.com/ClickHouse/ClickHouse/pull/11262) ([alesapin](https://github.com/alesapin)).
* Add a test for Join table engine from @donmikel. This closes [#9158](https://github.com/ClickHouse/ClickHouse/issues/9158). [#11265](https://github.com/ClickHouse/ClickHouse/pull/11265) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Repeat test in CI if `curl` invocation was timed out. It is possible due to system hangups for 10+ seconds that are typical in our CI infrastructure. This fixes [#11267](https://github.com/ClickHouse/ClickHouse/issues/11267). [#11268](https://github.com/ClickHouse/ClickHouse/pull/11268) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Fix potentially flacky test `00731_long_merge_tree_select_opened_files.sh`. It does not fail frequently but we have discovered potential race condition in this test while experimenting with ThreadFuzzer: [#9814](https://github.com/ClickHouse/ClickHouse/issues/9814) See [link](https://clickhouse-test-reports.s3.yandex.net/9814/40e3023e215df22985d275bf85f4d2290897b76b/functional_stateless_tests_(unbundled).html#fail1) for the example. [#11270](https://github.com/ClickHouse/ClickHouse/pull/11270) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Fix potentially flaky test `00731_long_merge_tree_select_opened_files.sh`. It does not fail frequently but we have discovered potential race condition in this test while experimenting with ThreadFuzzer: [#9814](https://github.com/ClickHouse/ClickHouse/issues/9814) See [link](https://clickhouse-test-reports.s3.yandex.net/9814/40e3023e215df22985d275bf85f4d2290897b76b/functional_stateless_tests_(unbundled).html#fail1) for the example. [#11270](https://github.com/ClickHouse/ClickHouse/pull/11270) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Now clickhouse-test check the server aliveness before tests run. [#11285](https://github.com/ClickHouse/ClickHouse/pull/11285) ([alesapin](https://github.com/alesapin)).
* Emit a warning if server was build in debug or with sanitizers. [#11304](https://github.com/ClickHouse/ClickHouse/pull/11304) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Better check for hung queries in clickhouse-test. [#11321](https://github.com/ClickHouse/ClickHouse/pull/11321) ([Alexey Milovidov](https://github.com/alexey-milovidov)).

View File

@ -280,7 +280,7 @@ sidebar_label: 2022
* Cleanup unbundled image [#29689](https://github.com/ClickHouse/ClickHouse/pull/29689) ([Azat Khuzhin](https://github.com/azat)).
* Fix memory tracking for merges and mutations [#29691](https://github.com/ClickHouse/ClickHouse/pull/29691) ([Azat Khuzhin](https://github.com/azat)).
* Fix data-race in WriteIndirectBuffer (used in DiskMemory) [#29692](https://github.com/ClickHouse/ClickHouse/pull/29692) ([Azat Khuzhin](https://github.com/azat)).
* Fix flacky test [#29706](https://github.com/ClickHouse/ClickHouse/pull/29706) ([Kseniia Sumarokova](https://github.com/kssenii)).
* Fix flaky test [#29706](https://github.com/ClickHouse/ClickHouse/pull/29706) ([Kseniia Sumarokova](https://github.com/kssenii)).
* BorrowedObjectPool condition variable notify fix [#29722](https://github.com/ClickHouse/ClickHouse/pull/29722) ([Maksim Kita](https://github.com/kitaisreal)).
* Better exception message for local interactive [#29737](https://github.com/ClickHouse/ClickHouse/pull/29737) ([Kseniia Sumarokova](https://github.com/kssenii)).
* Fix --stage for clickhouse-local [#29745](https://github.com/ClickHouse/ClickHouse/pull/29745) ([Azat Khuzhin](https://github.com/azat)).
@ -308,7 +308,7 @@ sidebar_label: 2022
* Fix client [#29864](https://github.com/ClickHouse/ClickHouse/pull/29864) ([Kseniia Sumarokova](https://github.com/kssenii)).
* Remove some more streams. [#29898](https://github.com/ClickHouse/ClickHouse/pull/29898) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
* Add logging in ZooKeeper client [#29901](https://github.com/ClickHouse/ClickHouse/pull/29901) ([Alexander Tokmakov](https://github.com/tavplubix)).
* Fix some flacky tests [#29902](https://github.com/ClickHouse/ClickHouse/pull/29902) ([Kseniia Sumarokova](https://github.com/kssenii)).
* Fix some flaky tests [#29902](https://github.com/ClickHouse/ClickHouse/pull/29902) ([Kseniia Sumarokova](https://github.com/kssenii)).
* Grep server log even if it contains binary data [#29903](https://github.com/ClickHouse/ClickHouse/pull/29903) ([Alexander Tokmakov](https://github.com/tavplubix)).
* Cosmetic refactoring of server constants. [#29913](https://github.com/ClickHouse/ClickHouse/pull/29913) ([Amos Bird](https://github.com/amosbird)).
* Format improvement of AlterQuery [#29916](https://github.com/ClickHouse/ClickHouse/pull/29916) ([flynn](https://github.com/ucasfl)).
@ -465,7 +465,7 @@ sidebar_label: 2022
* Fix docs release [#30933](https://github.com/ClickHouse/ClickHouse/pull/30933) ([alesapin](https://github.com/alesapin)).
* Fix style check [#30937](https://github.com/ClickHouse/ClickHouse/pull/30937) ([alesapin](https://github.com/alesapin)).
* Fix file progress for clickhouse-local [#30938](https://github.com/ClickHouse/ClickHouse/pull/30938) ([Kseniia Sumarokova](https://github.com/kssenii)).
* Fix flacky test [#30940](https://github.com/ClickHouse/ClickHouse/pull/30940) ([Kseniia Sumarokova](https://github.com/kssenii)).
* Fix flaky test [#30940](https://github.com/ClickHouse/ClickHouse/pull/30940) ([Kseniia Sumarokova](https://github.com/kssenii)).
* Fix reading from TinyLog [#30941](https://github.com/ClickHouse/ClickHouse/pull/30941) ([Vitaly Baranov](https://github.com/vitlibar)).
* Add github to known hosts in docs release [#30947](https://github.com/ClickHouse/ClickHouse/pull/30947) ([alesapin](https://github.com/alesapin)).
* Parse json from response in ci checks [#30948](https://github.com/ClickHouse/ClickHouse/pull/30948) ([alesapin](https://github.com/alesapin)).

View File

@ -220,7 +220,7 @@ sidebar_label: 2022
* Fix test_backward_compatibility [#30950](https://github.com/ClickHouse/ClickHouse/pull/30950) ([Ilya Yatsishin](https://github.com/qoega)).
* Add stress test to github actions [#30952](https://github.com/ClickHouse/ClickHouse/pull/30952) ([alesapin](https://github.com/alesapin)).
* Try smaller blacklist of non parallel integration tests [#30963](https://github.com/ClickHouse/ClickHouse/pull/30963) ([Ilya Yatsishin](https://github.com/qoega)).
* Fix flacky test [#30967](https://github.com/ClickHouse/ClickHouse/pull/30967) ([Kseniia Sumarokova](https://github.com/kssenii)).
* Fix flaky test [#30967](https://github.com/ClickHouse/ClickHouse/pull/30967) ([Kseniia Sumarokova](https://github.com/kssenii)).
* Move access-rights source code [#30973](https://github.com/ClickHouse/ClickHouse/pull/30973) ([Vitaly Baranov](https://github.com/vitlibar)).
* Set output_format_avro_rows_in_file default to 1 [#30990](https://github.com/ClickHouse/ClickHouse/pull/30990) ([Kruglov Pavel](https://github.com/Avogar)).
* Remove remaining usages of Y_IGNORE [#30993](https://github.com/ClickHouse/ClickHouse/pull/30993) ([Yuriy Chernyshov](https://github.com/georgthegreat)).
@ -353,7 +353,7 @@ sidebar_label: 2022
* Support toUInt8/toInt8 for if constant condition optimization. [#31866](https://github.com/ClickHouse/ClickHouse/pull/31866) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
* Added -no-sanitize=unsigned-integer-overflow build flag [#31881](https://github.com/ClickHouse/ClickHouse/pull/31881) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
* Fix typos [#31886](https://github.com/ClickHouse/ClickHouse/pull/31886) ([Anton Popov](https://github.com/CurtizJ)).
* Try to fix flacky test. [#31889](https://github.com/ClickHouse/ClickHouse/pull/31889) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
* Try to fix flaky test. [#31889](https://github.com/ClickHouse/ClickHouse/pull/31889) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
* Reduce the files that depend on parser headers [#31896](https://github.com/ClickHouse/ClickHouse/pull/31896) ([Raúl Marín](https://github.com/Algunenano)).
* Fix magic_enum for debug helpers (fixes build w/ USE_DEBUG_HELPERS) [#31922](https://github.com/ClickHouse/ClickHouse/pull/31922) ([Azat Khuzhin](https://github.com/azat)).
* Remove some trash from build [#31923](https://github.com/ClickHouse/ClickHouse/pull/31923) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
@ -387,7 +387,7 @@ sidebar_label: 2022
* make looping in H3 funcs uniform [#32110](https://github.com/ClickHouse/ClickHouse/pull/32110) ([Bharat Nallan](https://github.com/bharatnc)).
* Remove PVS check from master [#32114](https://github.com/ClickHouse/ClickHouse/pull/32114) ([alesapin](https://github.com/alesapin)).
* Fix flaky keeper whitelist test [#32115](https://github.com/ClickHouse/ClickHouse/pull/32115) ([alesapin](https://github.com/alesapin)).
* Fix flacky test test_executable_storage_input [#32118](https://github.com/ClickHouse/ClickHouse/pull/32118) ([Maksim Kita](https://github.com/kitaisreal)).
* Fix flaky test test_executable_storage_input [#32118](https://github.com/ClickHouse/ClickHouse/pull/32118) ([Maksim Kita](https://github.com/kitaisreal)).
* Fix data race in `removePartAndEnqueueFetch(...)` [#32119](https://github.com/ClickHouse/ClickHouse/pull/32119) ([Alexander Tokmakov](https://github.com/tavplubix)).
* Move fuzzers and unit tests to another group [#32120](https://github.com/ClickHouse/ClickHouse/pull/32120) ([alesapin](https://github.com/alesapin)).
* Add a test with 20000 mutations in one query [#32122](https://github.com/ClickHouse/ClickHouse/pull/32122) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
@ -411,11 +411,11 @@ sidebar_label: 2022
* Add test for [#32186](https://github.com/ClickHouse/ClickHouse/issues/32186) [#32203](https://github.com/ClickHouse/ClickHouse/pull/32203) ([Raúl Marín](https://github.com/Algunenano)).
* Fix uncaught exception in DatabaseLazy [#32206](https://github.com/ClickHouse/ClickHouse/pull/32206) ([Alexander Tokmakov](https://github.com/tavplubix)).
* Update ASTCreateQuery.cpp [#32208](https://github.com/ClickHouse/ClickHouse/pull/32208) ([Kseniia Sumarokova](https://github.com/kssenii)).
* Fix flacky fileLog test (probably) [#32209](https://github.com/ClickHouse/ClickHouse/pull/32209) ([Kseniia Sumarokova](https://github.com/kssenii)).
* Fix flaky fileLog test (probably) [#32209](https://github.com/ClickHouse/ClickHouse/pull/32209) ([Kseniia Sumarokova](https://github.com/kssenii)).
* Fix jemalloc under osx [#32219](https://github.com/ClickHouse/ClickHouse/pull/32219) ([Azat Khuzhin](https://github.com/azat)).
* Add missing timezones to some tests [#32222](https://github.com/ClickHouse/ClickHouse/pull/32222) ([Kseniia Sumarokova](https://github.com/kssenii)).
* Fix versioning of aggregate functions (fixes performance tests) [#32236](https://github.com/ClickHouse/ClickHouse/pull/32236) ([Azat Khuzhin](https://github.com/azat)).
* Disable window view tests temporarily because still flacky [#32257](https://github.com/ClickHouse/ClickHouse/pull/32257) ([Kseniia Sumarokova](https://github.com/kssenii)).
* Disable window view tests temporarily because still flaky [#32257](https://github.com/ClickHouse/ClickHouse/pull/32257) ([Kseniia Sumarokova](https://github.com/kssenii)).
* Fix typo in tupleToNameValuePairs doc [#32262](https://github.com/ClickHouse/ClickHouse/pull/32262) ([Vladimir C](https://github.com/vdimir)).
* Fix possible Pipeline stuck in case of StrictResize processor. [#32270](https://github.com/ClickHouse/ClickHouse/pull/32270) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
* Fix possible crash in DataTypeAggregateFunction [#32287](https://github.com/ClickHouse/ClickHouse/pull/32287) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).

View File

@ -158,7 +158,7 @@ sidebar_label: 2022
* MemoryStorage sync comments and code [#22721](https://github.com/ClickHouse/ClickHouse/pull/22721) ([Maksim Kita](https://github.com/kitaisreal)).
* Fix potential segfault on Keeper startup [#22743](https://github.com/ClickHouse/ClickHouse/pull/22743) ([alesapin](https://github.com/alesapin)).
* Avoid using harmful function rand() [#22744](https://github.com/ClickHouse/ClickHouse/pull/22744) ([Amos Bird](https://github.com/amosbird)).
* Fix flacky hedged tests [#22746](https://github.com/ClickHouse/ClickHouse/pull/22746) ([Kruglov Pavel](https://github.com/Avogar)).
* Fix flaky hedged tests [#22746](https://github.com/ClickHouse/ClickHouse/pull/22746) ([Kruglov Pavel](https://github.com/Avogar)).
* add more messages when flushing the logs [#22761](https://github.com/ClickHouse/ClickHouse/pull/22761) ([Alexander Kuzmenkov](https://github.com/akuzm)).
* Moved BorrowedObjectPool to common [#22764](https://github.com/ClickHouse/ClickHouse/pull/22764) ([Maksim Kita](https://github.com/kitaisreal)).
* Functions ExternalDictionaries standardize exception throw [#22821](https://github.com/ClickHouse/ClickHouse/pull/22821) ([Maksim Kita](https://github.com/kitaisreal)).

View File

@ -55,7 +55,7 @@ sidebar_label: 2022
* Try fix rabbitmq tests [#26826](https://github.com/ClickHouse/ClickHouse/pull/26826) ([Kseniia Sumarokova](https://github.com/kssenii)).
* One more library bridge fix [#26873](https://github.com/ClickHouse/ClickHouse/pull/26873) ([Kseniia Sumarokova](https://github.com/kssenii)).
* Update PVS checksum [#27317](https://github.com/ClickHouse/ClickHouse/pull/27317) ([Alexander Tokmakov](https://github.com/tavplubix)).
* Fix flacky test [#27383](https://github.com/ClickHouse/ClickHouse/pull/27383) ([Kseniia Sumarokova](https://github.com/kssenii)).
* Fix flaky test [#27383](https://github.com/ClickHouse/ClickHouse/pull/27383) ([Kseniia Sumarokova](https://github.com/kssenii)).
* Fix throw without exception in MySQL source. [#28027](https://github.com/ClickHouse/ClickHouse/pull/28027) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
* Fix race between REPLACE PARTITION and MOVE PARTITION [#28035](https://github.com/ClickHouse/ClickHouse/pull/28035) ([Alexander Tokmakov](https://github.com/tavplubix)).
* Follow-up to [#28016](https://github.com/ClickHouse/ClickHouse/issues/28016) [#28036](https://github.com/ClickHouse/ClickHouse/pull/28036) ([Alexander Tokmakov](https://github.com/tavplubix)).

View File

@ -35,7 +35,7 @@ sidebar_label: 2022
#### NOT FOR CHANGELOG / INSIGNIFICANT
* Fix prometheus metric name [#26140](https://github.com/ClickHouse/ClickHouse/pull/26140) ([Vladimir C](https://github.com/vdimir)).
* Fix flacky test [#27383](https://github.com/ClickHouse/ClickHouse/pull/27383) ([Kseniia Sumarokova](https://github.com/kssenii)).
* Fix flaky test [#27383](https://github.com/ClickHouse/ClickHouse/pull/27383) ([Kseniia Sumarokova](https://github.com/kssenii)).
* Fix throw without exception in MySQL source. [#28027](https://github.com/ClickHouse/ClickHouse/pull/28027) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
* Fix race between REPLACE PARTITION and MOVE PARTITION [#28035](https://github.com/ClickHouse/ClickHouse/pull/28035) ([Alexander Tokmakov](https://github.com/tavplubix)).
* Follow-up to [#28016](https://github.com/ClickHouse/ClickHouse/issues/28016) [#28036](https://github.com/ClickHouse/ClickHouse/pull/28036) ([Alexander Tokmakov](https://github.com/tavplubix)).

View File

@ -101,7 +101,7 @@ sidebar_label: 2022
* Separate log files for separate runs in stress test [#25741](https://github.com/ClickHouse/ClickHouse/pull/25741) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Fix slow performance test [#25742](https://github.com/ClickHouse/ClickHouse/pull/25742) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* DatabaseAtomic EXCHANGE DICTIONARIES fix test [#25753](https://github.com/ClickHouse/ClickHouse/pull/25753) ([Maksim Kita](https://github.com/kitaisreal)).
* Try fix flacky rabbitmq test [#25756](https://github.com/ClickHouse/ClickHouse/pull/25756) ([Kseniia Sumarokova](https://github.com/kssenii)).
* Try fix flaky rabbitmq test [#25756](https://github.com/ClickHouse/ClickHouse/pull/25756) ([Kseniia Sumarokova](https://github.com/kssenii)).
* Add a test for [#13993](https://github.com/ClickHouse/ClickHouse/issues/13993) [#25758](https://github.com/ClickHouse/ClickHouse/pull/25758) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Set follow-fork-mode child for gdb in stress/fasttest/fuzzer [#25769](https://github.com/ClickHouse/ClickHouse/pull/25769) ([Azat Khuzhin](https://github.com/azat)).
* Ignore TOO_DEEP_RECURSION server exception during fuzzing [#25770](https://github.com/ClickHouse/ClickHouse/pull/25770) ([Azat Khuzhin](https://github.com/azat)).

View File

@ -40,7 +40,7 @@ sidebar_label: 2022
* Fix several bugs in ZooKeeper snapshots deserialization [#26127](https://github.com/ClickHouse/ClickHouse/pull/26127) ([alesapin](https://github.com/alesapin)).
* Fix prometheus metric name [#26140](https://github.com/ClickHouse/ClickHouse/pull/26140) ([Vladimir C](https://github.com/vdimir)).
* Fix flacky test [#27383](https://github.com/ClickHouse/ClickHouse/pull/27383) ([Kseniia Sumarokova](https://github.com/kssenii)).
* Fix flaky test [#27383](https://github.com/ClickHouse/ClickHouse/pull/27383) ([Kseniia Sumarokova](https://github.com/kssenii)).
* Fix throw without exception in MySQL source. [#28027](https://github.com/ClickHouse/ClickHouse/pull/28027) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
* Fix race between REPLACE PARTITION and MOVE PARTITION [#28035](https://github.com/ClickHouse/ClickHouse/pull/28035) ([Alexander Tokmakov](https://github.com/tavplubix)).
* Follow-up to [#28016](https://github.com/ClickHouse/ClickHouse/issues/28016) [#28036](https://github.com/ClickHouse/ClickHouse/pull/28036) ([Alexander Tokmakov](https://github.com/tavplubix)).

View File

@ -346,7 +346,7 @@ sidebar_label: 2022
* Update PVS checksum [#27317](https://github.com/ClickHouse/ClickHouse/pull/27317) ([Alexander Tokmakov](https://github.com/tavplubix)).
* Fix 01300_client_save_history_when_terminated_long [#27324](https://github.com/ClickHouse/ClickHouse/pull/27324) ([Raúl Marín](https://github.com/Algunenano)).
* Try update contrib/zlib-ng [#27327](https://github.com/ClickHouse/ClickHouse/pull/27327) ([Ilya Yatsishin](https://github.com/qoega)).
* Fix flacky test [#27383](https://github.com/ClickHouse/ClickHouse/pull/27383) ([Kseniia Sumarokova](https://github.com/kssenii)).
* Fix flaky test [#27383](https://github.com/ClickHouse/ClickHouse/pull/27383) ([Kseniia Sumarokova](https://github.com/kssenii)).
* Add and check system.mutations for database filter [#27384](https://github.com/ClickHouse/ClickHouse/pull/27384) ([Azat Khuzhin](https://github.com/azat)).
* Correct the key data type used in mapContains [#27423](https://github.com/ClickHouse/ClickHouse/pull/27423) ([Fuwang Hu](https://github.com/fuwhu)).
* Fix tests for WithMergeableStateAfterAggregationAndLimit [#27424](https://github.com/ClickHouse/ClickHouse/pull/27424) ([Azat Khuzhin](https://github.com/azat)).

View File

@ -398,7 +398,7 @@ sidebar_label: 2022
* test for [#24410](https://github.com/ClickHouse/ClickHouse/issues/24410) [#33265](https://github.com/ClickHouse/ClickHouse/pull/33265) ([Denny Crane](https://github.com/den-crane)).
* Wait for RabbitMQ container to actually start when it was restarted in test on purpose [#33266](https://github.com/ClickHouse/ClickHouse/pull/33266) ([Kseniia Sumarokova](https://github.com/kssenii)).
* Mark max_alter_threads as obsolete [#33268](https://github.com/ClickHouse/ClickHouse/pull/33268) ([Denny Crane](https://github.com/den-crane)).
* Fix azure tests flackyness because of azure server closing connection [#33269](https://github.com/ClickHouse/ClickHouse/pull/33269) ([Kseniia Sumarokova](https://github.com/kssenii)).
* Fix azure tests flakyness because of azure server closing connection [#33269](https://github.com/ClickHouse/ClickHouse/pull/33269) ([Kseniia Sumarokova](https://github.com/kssenii)).
* Test for [#26920](https://github.com/ClickHouse/ClickHouse/issues/26920) [#33272](https://github.com/ClickHouse/ClickHouse/pull/33272) ([Denny Crane](https://github.com/den-crane)).
* Fix test_storage_kafka failures by adjusting retention.ms [#33278](https://github.com/ClickHouse/ClickHouse/pull/33278) ([Azat Khuzhin](https://github.com/azat)).
* Disable FunctionConvertFromString::canBeExecutedOnDefaultArguments [#33286](https://github.com/ClickHouse/ClickHouse/pull/33286) ([Vladimir C](https://github.com/vdimir)).
@ -447,7 +447,7 @@ sidebar_label: 2022
* Update mongodb.md [#33585](https://github.com/ClickHouse/ClickHouse/pull/33585) ([Kseniia Sumarokova](https://github.com/kssenii)).
* Restore existing static builds links [#33597](https://github.com/ClickHouse/ClickHouse/pull/33597) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
* Fix pylint for run_check.py [#33600](https://github.com/ClickHouse/ClickHouse/pull/33600) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
* Fix flacky test_dictionaries_postgresql/ [#33601](https://github.com/ClickHouse/ClickHouse/pull/33601) ([Kseniia Sumarokova](https://github.com/kssenii)).
* Fix flaky test_dictionaries_postgresql/ [#33601](https://github.com/ClickHouse/ClickHouse/pull/33601) ([Kseniia Sumarokova](https://github.com/kssenii)).
* Make ZooKeeper client better interpret keeper server connection reject [#33602](https://github.com/ClickHouse/ClickHouse/pull/33602) ([alesapin](https://github.com/alesapin)).
* Fix broken workflow dependencies [#33608](https://github.com/ClickHouse/ClickHouse/pull/33608) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
* Force rebuild images in CI [#33609](https://github.com/ClickHouse/ClickHouse/pull/33609) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).

View File

@ -410,7 +410,7 @@ sidebar_label: 2022
* Fix mongodb test with new cert [#36161](https://github.com/ClickHouse/ClickHouse/pull/36161) ([alesapin](https://github.com/alesapin)).
* Some fixes for ReplicatedMergeTree [#36163](https://github.com/ClickHouse/ClickHouse/pull/36163) ([Alexander Tokmakov](https://github.com/tavplubix)).
* clickhouse-client: properly cancel query in case of error during formatting data [#36164](https://github.com/ClickHouse/ClickHouse/pull/36164) ([Azat Khuzhin](https://github.com/azat)).
* Fix flacky test 01161_all_system_tables under s3 storage [#36175](https://github.com/ClickHouse/ClickHouse/pull/36175) ([Kseniia Sumarokova](https://github.com/kssenii)).
* Fix flaky test 01161_all_system_tables under s3 storage [#36175](https://github.com/ClickHouse/ClickHouse/pull/36175) ([Kseniia Sumarokova](https://github.com/kssenii)).
* Revert "Fix possible mutation stuck due to race with DROP_RANGE" [#36190](https://github.com/ClickHouse/ClickHouse/pull/36190) ([Azat Khuzhin](https://github.com/azat)).
* Use atomic instead of mutex + condvar in ParallelReadBuffer [#36192](https://github.com/ClickHouse/ClickHouse/pull/36192) ([Kruglov Pavel](https://github.com/Avogar)).
* Follow-up to [#36138](https://github.com/ClickHouse/ClickHouse/issues/36138) [#36194](https://github.com/ClickHouse/ClickHouse/pull/36194) ([Alexander Tokmakov](https://github.com/tavplubix)).

View File

@ -321,7 +321,7 @@ sidebar_label: 2023
* Add a test for [#38128](https://github.com/ClickHouse/ClickHouse/issues/38128) [#48817](https://github.com/ClickHouse/ClickHouse/pull/48817) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Remove excessive logging [#48826](https://github.com/ClickHouse/ClickHouse/pull/48826) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* remove duplicate indentwith in clang-format [#48834](https://github.com/ClickHouse/ClickHouse/pull/48834) ([cluster](https://github.com/infdahai)).
* Try fix flacky test_concurrent_alter_move_and_drop [#48843](https://github.com/ClickHouse/ClickHouse/pull/48843) ([Sergei Trifonov](https://github.com/serxa)).
* Try fix flaky test_concurrent_alter_move_and_drop [#48843](https://github.com/ClickHouse/ClickHouse/pull/48843) ([Sergei Trifonov](https://github.com/serxa)).
* fix the race wait loading parts [#48844](https://github.com/ClickHouse/ClickHouse/pull/48844) ([Sema Checherinda](https://github.com/CheSema)).
* suppress assert of progress for test_system_replicated_fetches [#48856](https://github.com/ClickHouse/ClickHouse/pull/48856) ([Han Fei](https://github.com/hanfei1991)).
* Fix: do not run test_store_cleanup_disk_s3 in parallel [#48863](https://github.com/ClickHouse/ClickHouse/pull/48863) ([Igor Nikonov](https://github.com/devcrafter)).
@ -372,4 +372,3 @@ sidebar_label: 2023
* suppress two timeout tests [#49175](https://github.com/ClickHouse/ClickHouse/pull/49175) ([Han Fei](https://github.com/hanfei1991)).
* Document makeDateTime() and its variants [#49183](https://github.com/ClickHouse/ClickHouse/pull/49183) ([Robert Schulze](https://github.com/rschu1ze)).
* Fix after [#49110](https://github.com/ClickHouse/ClickHouse/issues/49110) [#49206](https://github.com/ClickHouse/ClickHouse/pull/49206) ([Kseniia Sumarokova](https://github.com/kssenii)).

View File

@ -263,7 +263,7 @@ sidebar_label: 2023
* Fix broken labeling for `manual approve` [#51405](https://github.com/ClickHouse/ClickHouse/pull/51405) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
* Fix parts lifetime in `MergeTreeTransaction` [#51407](https://github.com/ClickHouse/ClickHouse/pull/51407) ([Alexander Tokmakov](https://github.com/tavplubix)).
* Fix flaky test test_skip_empty_files [#51409](https://github.com/ClickHouse/ClickHouse/pull/51409) ([Kruglov Pavel](https://github.com/Avogar)).
* fix flacky test test_profile_events_s3 [#51412](https://github.com/ClickHouse/ClickHouse/pull/51412) ([Sema Checherinda](https://github.com/CheSema)).
* fix flaky test test_profile_events_s3 [#51412](https://github.com/ClickHouse/ClickHouse/pull/51412) ([Sema Checherinda](https://github.com/CheSema)).
* Update README.md [#51413](https://github.com/ClickHouse/ClickHouse/pull/51413) ([Tyler Hannan](https://github.com/tylerhannan)).
* Replace try/catch logic in hasTokenOrNull() by something more lightweight [#51425](https://github.com/ClickHouse/ClickHouse/pull/51425) ([Robert Schulze](https://github.com/rschu1ze)).
* Add retries to `tlsv1_3` tests [#51434](https://github.com/ClickHouse/ClickHouse/pull/51434) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)).

View File

@ -0,0 +1,55 @@
---
sidebar_position: 1
sidebar_label: 2024
---
# 2024 Changelog
### ClickHouse release v24.5.5.78-stable (0138248cb62) FIXME as compared to v24.5.4.49-stable (63b760955a0)
#### Improvement
* Backported in [#66768](https://github.com/ClickHouse/ClickHouse/issues/66768): Make allow_experimental_analyzer be controlled by the initiator for distributed queries. This ensures compatibility and correctness during operations in mixed version clusters. [#65777](https://github.com/ClickHouse/ClickHouse/pull/65777) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
#### Critical Bug Fix (crash, LOGICAL_ERROR, data loss, RBAC)
* Backported in [#66884](https://github.com/ClickHouse/ClickHouse/issues/66884): Fix unexpeced size of low cardinality column in function calls. [#65298](https://github.com/ClickHouse/ClickHouse/pull/65298) ([Raúl Marín](https://github.com/Algunenano)).
* Backported in [#66691](https://github.com/ClickHouse/ClickHouse/issues/66691): Fix the VALID UNTIL clause in the user definition resetting after a restart. Closes [#66405](https://github.com/ClickHouse/ClickHouse/issues/66405). [#66409](https://github.com/ClickHouse/ClickHouse/pull/66409) ([Nikolay Degterinsky](https://github.com/evillique)).
* Backported in [#67814](https://github.com/ClickHouse/ClickHouse/issues/67814): Only relevant to the experimental Variant data type. Fix crash with Variant + AggregateFunction type. [#67122](https://github.com/ClickHouse/ClickHouse/pull/67122) ([Kruglov Pavel](https://github.com/Avogar)).
* Backported in [#67501](https://github.com/ClickHouse/ClickHouse/issues/67501): Fix crash in DistributedAsyncInsert when connection is empty. [#67219](https://github.com/ClickHouse/ClickHouse/pull/67219) ([Pablo Marcos](https://github.com/pamarcos)).
* Backported in [#67850](https://github.com/ClickHouse/ClickHouse/issues/67850): Fixes [#66026](https://github.com/ClickHouse/ClickHouse/issues/66026). Avoid unresolved table function arguments traversal in `ReplaceTableNodeToDummyVisitor`. [#67522](https://github.com/ClickHouse/ClickHouse/pull/67522) ([Dmitry Novik](https://github.com/novikd)).
#### Bug Fix (user-visible misbehavior in an official stable release)
* Backported in [#65350](https://github.com/ClickHouse/ClickHouse/issues/65350): Fix possible abort on uncaught exception in ~WriteBufferFromFileDescriptor in StatusFile. [#64206](https://github.com/ClickHouse/ClickHouse/pull/64206) ([Kruglov Pavel](https://github.com/Avogar)).
* Backported in [#65621](https://github.com/ClickHouse/ClickHouse/issues/65621): Fix `Cannot find column` in distributed query with `ARRAY JOIN` by `Nested` column. Fixes [#64755](https://github.com/ClickHouse/ClickHouse/issues/64755). [#64801](https://github.com/ClickHouse/ClickHouse/pull/64801) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
* Backported in [#65933](https://github.com/ClickHouse/ClickHouse/issues/65933): For queries that read from `PostgreSQL`, cancel the internal `PostgreSQL` query if the ClickHouse query is finished. Otherwise, `ClickHouse` query cannot be canceled until the internal `PostgreSQL` query is finished. [#65771](https://github.com/ClickHouse/ClickHouse/pull/65771) ([Maksim Kita](https://github.com/kitaisreal)).
* Backported in [#66301](https://github.com/ClickHouse/ClickHouse/issues/66301): Better handling of join conditions involving `IS NULL` checks (for example `ON (a = b AND (a IS NOT NULL) AND (b IS NOT NULL) ) OR ( (a IS NULL) AND (b IS NULL) )` is rewritten to `ON a <=> b`), fix incorrect optimization when condition other then `IS NULL` are present. [#65835](https://github.com/ClickHouse/ClickHouse/pull/65835) ([vdimir](https://github.com/vdimir)).
* Backported in [#66328](https://github.com/ClickHouse/ClickHouse/issues/66328): Add missing settings `input_format_csv_skip_first_lines/input_format_tsv_skip_first_lines/input_format_csv_try_infer_numbers_from_strings/input_format_csv_try_infer_strings_from_quoted_tuples` in schema inference cache because they can change the resulting schema. It prevents from incorrect result of schema inference with these settings changed. [#65980](https://github.com/ClickHouse/ClickHouse/pull/65980) ([Kruglov Pavel](https://github.com/Avogar)).
* Backported in [#66155](https://github.com/ClickHouse/ClickHouse/issues/66155): Fixed buffer overflow bug in `unbin`/`unhex` implementation. [#66106](https://github.com/ClickHouse/ClickHouse/pull/66106) ([Nikita Taranov](https://github.com/nickitat)).
* Backported in [#66454](https://github.com/ClickHouse/ClickHouse/issues/66454): Fixed a bug in ZooKeeper client: a session could get stuck in unusable state after receiving a hardware error from ZooKeeper. For example, this might happen due to "soft memory limit" in ClickHouse Keeper. [#66140](https://github.com/ClickHouse/ClickHouse/pull/66140) ([Alexander Tokmakov](https://github.com/tavplubix)).
* Backported in [#66226](https://github.com/ClickHouse/ClickHouse/issues/66226): Fix issue in SumIfToCountIfVisitor and signed integers. [#66146](https://github.com/ClickHouse/ClickHouse/pull/66146) ([Raúl Marín](https://github.com/Algunenano)).
* Backported in [#66680](https://github.com/ClickHouse/ClickHouse/issues/66680): Fix handling limit for `system.numbers_mt` when no index can be used. [#66231](https://github.com/ClickHouse/ClickHouse/pull/66231) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)).
* Backported in [#66604](https://github.com/ClickHouse/ClickHouse/issues/66604): Fixed how the ClickHouse server detects the maximum number of usable CPU cores as specified by cgroups v2 if the server runs in a container such as Docker. In more detail, containers often run their process in the root cgroup which has an empty name. In that case, ClickHouse ignored the CPU limits set by cgroups v2. [#66237](https://github.com/ClickHouse/ClickHouse/pull/66237) ([filimonov](https://github.com/filimonov)).
* Backported in [#66360](https://github.com/ClickHouse/ClickHouse/issues/66360): Fix the `Not-ready set` error when a subquery with `IN` is used in the constraint. [#66261](https://github.com/ClickHouse/ClickHouse/pull/66261) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
* Backported in [#66972](https://github.com/ClickHouse/ClickHouse/issues/66972): Fix `Column identifier is already registered` error with `group_by_use_nulls=true` and new analyzer. [#66400](https://github.com/ClickHouse/ClickHouse/pull/66400) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
* Backported in [#66969](https://github.com/ClickHouse/ClickHouse/issues/66969): Fix `Cannot find column` error for queries with constant expression in `GROUP BY` key and new analyzer enabled. [#66433](https://github.com/ClickHouse/ClickHouse/pull/66433) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
* Backported in [#66720](https://github.com/ClickHouse/ClickHouse/issues/66720): Correctly track memory for `Allocator::realloc`. [#66548](https://github.com/ClickHouse/ClickHouse/pull/66548) ([Antonio Andelic](https://github.com/antonio2368)).
* Backported in [#66951](https://github.com/ClickHouse/ClickHouse/issues/66951): Fix an invalid result for queries with `WINDOW`. This could happen when `PARTITION` columns have sparse serialization and window functions are executed in parallel. [#66579](https://github.com/ClickHouse/ClickHouse/pull/66579) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
* Backported in [#66757](https://github.com/ClickHouse/ClickHouse/issues/66757): Fix `Unknown identifier` and `Column is not under aggregate function` errors for queries with the expression `(column IS NULL).` The bug was triggered by [#65088](https://github.com/ClickHouse/ClickHouse/issues/65088), with the disabled analyzer only. [#66654](https://github.com/ClickHouse/ClickHouse/pull/66654) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
* Backported in [#66948](https://github.com/ClickHouse/ClickHouse/issues/66948): Fix `Method getResultType is not supported for QUERY query node` error when scalar subquery was used as the first argument of IN (with new analyzer). [#66655](https://github.com/ClickHouse/ClickHouse/pull/66655) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
* Backported in [#67633](https://github.com/ClickHouse/ClickHouse/issues/67633): Fix for occasional deadlock in Context::getDDLWorker. [#66843](https://github.com/ClickHouse/ClickHouse/pull/66843) ([Alexander Gololobov](https://github.com/davenger)).
* Backported in [#67481](https://github.com/ClickHouse/ClickHouse/issues/67481): In rare cases ClickHouse could consider parts as broken because of some unexpected projections on disk. Now it's fixed. [#66898](https://github.com/ClickHouse/ClickHouse/pull/66898) ([alesapin](https://github.com/alesapin)).
* Backported in [#67197](https://github.com/ClickHouse/ClickHouse/issues/67197): TRUNCATE DATABASE used to stop replication as if it was a DROP DATABASE query, it's fixed. [#67129](https://github.com/ClickHouse/ClickHouse/pull/67129) ([Alexander Tokmakov](https://github.com/tavplubix)).
* Backported in [#67379](https://github.com/ClickHouse/ClickHouse/issues/67379): Fix error `Cannot convert column because it is non constant in source stream but must be constant in result.` for a query that reads from the `Merge` table over the `Distriburted` table with one shard. [#67146](https://github.com/ClickHouse/ClickHouse/pull/67146) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
* Backported in [#67576](https://github.com/ClickHouse/ClickHouse/issues/67576): Fix execution of nested short-circuit functions. [#67520](https://github.com/ClickHouse/ClickHouse/pull/67520) ([Kruglov Pavel](https://github.com/Avogar)).
#### NOT FOR CHANGELOG / INSIGNIFICANT
* Backported in [#66387](https://github.com/ClickHouse/ClickHouse/issues/66387): Disable broken cases from 02911_join_on_nullsafe_optimization. [#66310](https://github.com/ClickHouse/ClickHouse/pull/66310) ([vdimir](https://github.com/vdimir)).
* Backported in [#66426](https://github.com/ClickHouse/ClickHouse/issues/66426): Ignore subquery for IN in DDLLoadingDependencyVisitor. [#66395](https://github.com/ClickHouse/ClickHouse/pull/66395) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
* Backported in [#66544](https://github.com/ClickHouse/ClickHouse/issues/66544): Add additional log masking in CI. [#66523](https://github.com/ClickHouse/ClickHouse/pull/66523) ([Raúl Marín](https://github.com/Algunenano)).
* Backported in [#66859](https://github.com/ClickHouse/ClickHouse/issues/66859): Fix data race in S3::ClientCache. [#66644](https://github.com/ClickHouse/ClickHouse/pull/66644) ([Konstantin Morozov](https://github.com/k-morozov)).
* Backported in [#66875](https://github.com/ClickHouse/ClickHouse/issues/66875): Support one more case in JOIN ON ... IS NULL. [#66725](https://github.com/ClickHouse/ClickHouse/pull/66725) ([vdimir](https://github.com/vdimir)).
* Backported in [#67059](https://github.com/ClickHouse/ClickHouse/issues/67059): Increase asio pool size in case the server is tiny. [#66761](https://github.com/ClickHouse/ClickHouse/pull/66761) ([alesapin](https://github.com/alesapin)).
* Backported in [#66945](https://github.com/ClickHouse/ClickHouse/issues/66945): Small fix in realloc memory tracking. [#66820](https://github.com/ClickHouse/ClickHouse/pull/66820) ([Antonio Andelic](https://github.com/antonio2368)).
* Backported in [#67252](https://github.com/ClickHouse/ClickHouse/issues/67252): Followup [#66725](https://github.com/ClickHouse/ClickHouse/issues/66725). [#66869](https://github.com/ClickHouse/ClickHouse/pull/66869) ([vdimir](https://github.com/vdimir)).
* Backported in [#67412](https://github.com/ClickHouse/ClickHouse/issues/67412): CI: Fix build results for release branches. [#67402](https://github.com/ClickHouse/ClickHouse/pull/67402) ([Max K.](https://github.com/maxknv)).

View File

@ -0,0 +1,67 @@
---
sidebar_position: 1
sidebar_label: 2024
---
# 2024 Changelog
### ClickHouse release v24.6.3.95-stable (8325c920d11) FIXME as compared to v24.6.2.17-stable (5710a8b5c0c)
#### Improvement
* Backported in [#66770](https://github.com/ClickHouse/ClickHouse/issues/66770): Make allow_experimental_analyzer be controlled by the initiator for distributed queries. This ensures compatibility and correctness during operations in mixed version clusters. [#65777](https://github.com/ClickHouse/ClickHouse/pull/65777) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
#### Critical Bug Fix (crash, LOGICAL_ERROR, data loss, RBAC)
* Backported in [#66885](https://github.com/ClickHouse/ClickHouse/issues/66885): Fix unexpeced size of low cardinality column in function calls. [#65298](https://github.com/ClickHouse/ClickHouse/pull/65298) ([Raúl Marín](https://github.com/Algunenano)).
* Backported in [#66693](https://github.com/ClickHouse/ClickHouse/issues/66693): Fix the VALID UNTIL clause in the user definition resetting after a restart. Closes [#66405](https://github.com/ClickHouse/ClickHouse/issues/66405). [#66409](https://github.com/ClickHouse/ClickHouse/pull/66409) ([Nikolay Degterinsky](https://github.com/evillique)).
* Backported in [#67816](https://github.com/ClickHouse/ClickHouse/issues/67816): Only relevant to the experimental Variant data type. Fix crash with Variant + AggregateFunction type. [#67122](https://github.com/ClickHouse/ClickHouse/pull/67122) ([Kruglov Pavel](https://github.com/Avogar)).
* Backported in [#67503](https://github.com/ClickHouse/ClickHouse/issues/67503): Fix crash in DistributedAsyncInsert when connection is empty. [#67219](https://github.com/ClickHouse/ClickHouse/pull/67219) ([Pablo Marcos](https://github.com/pamarcos)).
* Backported in [#67852](https://github.com/ClickHouse/ClickHouse/issues/67852): Fixes [#66026](https://github.com/ClickHouse/ClickHouse/issues/66026). Avoid unresolved table function arguments traversal in `ReplaceTableNodeToDummyVisitor`. [#67522](https://github.com/ClickHouse/ClickHouse/pull/67522) ([Dmitry Novik](https://github.com/novikd)).
* Backported in [#67838](https://github.com/ClickHouse/ClickHouse/issues/67838): Fix potential stack overflow in `JSONMergePatch` function. Renamed this function from `jsonMergePatch` to `JSONMergePatch` because the previous name was wrong. The previous name is still kept for compatibility. Improved diagnostic of errors in the function. This closes [#67304](https://github.com/ClickHouse/ClickHouse/issues/67304). [#67756](https://github.com/ClickHouse/ClickHouse/pull/67756) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
#### Bug Fix (user-visible misbehavior in an official stable release)
* Backported in [#66303](https://github.com/ClickHouse/ClickHouse/issues/66303): Better handling of join conditions involving `IS NULL` checks (for example `ON (a = b AND (a IS NOT NULL) AND (b IS NOT NULL) ) OR ( (a IS NULL) AND (b IS NULL) )` is rewritten to `ON a <=> b`), fix incorrect optimization when condition other then `IS NULL` are present. [#65835](https://github.com/ClickHouse/ClickHouse/pull/65835) ([vdimir](https://github.com/vdimir)).
* Backported in [#66330](https://github.com/ClickHouse/ClickHouse/issues/66330): Add missing settings `input_format_csv_skip_first_lines/input_format_tsv_skip_first_lines/input_format_csv_try_infer_numbers_from_strings/input_format_csv_try_infer_strings_from_quoted_tuples` in schema inference cache because they can change the resulting schema. It prevents from incorrect result of schema inference with these settings changed. [#65980](https://github.com/ClickHouse/ClickHouse/pull/65980) ([Kruglov Pavel](https://github.com/Avogar)).
* Backported in [#66157](https://github.com/ClickHouse/ClickHouse/issues/66157): Fixed buffer overflow bug in `unbin`/`unhex` implementation. [#66106](https://github.com/ClickHouse/ClickHouse/pull/66106) ([Nikita Taranov](https://github.com/nickitat)).
* Backported in [#66210](https://github.com/ClickHouse/ClickHouse/issues/66210): Disable the `merge-filters` optimization introduced in [#64760](https://github.com/ClickHouse/ClickHouse/issues/64760). It may cause an exception if optimization merges two filter expressions and does not apply a short-circuit evaluation. [#66126](https://github.com/ClickHouse/ClickHouse/pull/66126) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
* Backported in [#66456](https://github.com/ClickHouse/ClickHouse/issues/66456): Fixed a bug in ZooKeeper client: a session could get stuck in unusable state after receiving a hardware error from ZooKeeper. For example, this might happen due to "soft memory limit" in ClickHouse Keeper. [#66140](https://github.com/ClickHouse/ClickHouse/pull/66140) ([Alexander Tokmakov](https://github.com/tavplubix)).
* Backported in [#66228](https://github.com/ClickHouse/ClickHouse/issues/66228): Fix issue in SumIfToCountIfVisitor and signed integers. [#66146](https://github.com/ClickHouse/ClickHouse/pull/66146) ([Raúl Marín](https://github.com/Algunenano)).
* Backported in [#66183](https://github.com/ClickHouse/ClickHouse/issues/66183): Fix rare case with missing data in the result of distributed query, close [#61432](https://github.com/ClickHouse/ClickHouse/issues/61432). [#66174](https://github.com/ClickHouse/ClickHouse/pull/66174) ([vdimir](https://github.com/vdimir)).
* Backported in [#66271](https://github.com/ClickHouse/ClickHouse/issues/66271): Don't throw `TIMEOUT_EXCEEDED` for `none_only_active` mode of `distributed_ddl_output_mode`. [#66218](https://github.com/ClickHouse/ClickHouse/pull/66218) ([Alexander Tokmakov](https://github.com/tavplubix)).
* Backported in [#66682](https://github.com/ClickHouse/ClickHouse/issues/66682): Fix handling limit for `system.numbers_mt` when no index can be used. [#66231](https://github.com/ClickHouse/ClickHouse/pull/66231) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)).
* Backported in [#66587](https://github.com/ClickHouse/ClickHouse/issues/66587): Fixed how the ClickHouse server detects the maximum number of usable CPU cores as specified by cgroups v2 if the server runs in a container such as Docker. In more detail, containers often run their process in the root cgroup which has an empty name. In that case, ClickHouse ignored the CPU limits set by cgroups v2. [#66237](https://github.com/ClickHouse/ClickHouse/pull/66237) ([filimonov](https://github.com/filimonov)).
* Backported in [#66362](https://github.com/ClickHouse/ClickHouse/issues/66362): Fix the `Not-ready set` error when a subquery with `IN` is used in the constraint. [#66261](https://github.com/ClickHouse/ClickHouse/pull/66261) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
* Backported in [#66613](https://github.com/ClickHouse/ClickHouse/issues/66613): Fix `Column identifier is already registered` error with `group_by_use_nulls=true` and new analyzer. [#66400](https://github.com/ClickHouse/ClickHouse/pull/66400) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
* Backported in [#66577](https://github.com/ClickHouse/ClickHouse/issues/66577): Fix `Cannot find column` error for queries with constant expression in `GROUP BY` key and new analyzer enabled. [#66433](https://github.com/ClickHouse/ClickHouse/pull/66433) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
* Backported in [#66721](https://github.com/ClickHouse/ClickHouse/issues/66721): Correctly track memory for `Allocator::realloc`. [#66548](https://github.com/ClickHouse/ClickHouse/pull/66548) ([Antonio Andelic](https://github.com/antonio2368)).
* Backported in [#66670](https://github.com/ClickHouse/ClickHouse/issues/66670): Fix reading of uninitialized memory when hashing empty tuples. This closes [#66559](https://github.com/ClickHouse/ClickHouse/issues/66559). [#66562](https://github.com/ClickHouse/ClickHouse/pull/66562) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Backported in [#66952](https://github.com/ClickHouse/ClickHouse/issues/66952): Fix an invalid result for queries with `WINDOW`. This could happen when `PARTITION` columns have sparse serialization and window functions are executed in parallel. [#66579](https://github.com/ClickHouse/ClickHouse/pull/66579) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
* Backported in [#66956](https://github.com/ClickHouse/ClickHouse/issues/66956): Fix removing named collections in local storage. [#66599](https://github.com/ClickHouse/ClickHouse/pull/66599) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)).
* Backported in [#66716](https://github.com/ClickHouse/ClickHouse/issues/66716): Fix removing named collections in local storage. [#66599](https://github.com/ClickHouse/ClickHouse/pull/66599) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)).
* Backported in [#66759](https://github.com/ClickHouse/ClickHouse/issues/66759): Fix `Unknown identifier` and `Column is not under aggregate function` errors for queries with the expression `(column IS NULL).` The bug was triggered by [#65088](https://github.com/ClickHouse/ClickHouse/issues/65088), with the disabled analyzer only. [#66654](https://github.com/ClickHouse/ClickHouse/pull/66654) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
* Backported in [#66751](https://github.com/ClickHouse/ClickHouse/issues/66751): Fix `Method getResultType is not supported for QUERY query node` error when scalar subquery was used as the first argument of IN (with new analyzer). [#66655](https://github.com/ClickHouse/ClickHouse/pull/66655) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
* Backported in [#67635](https://github.com/ClickHouse/ClickHouse/issues/67635): Fix for occasional deadlock in Context::getDDLWorker. [#66843](https://github.com/ClickHouse/ClickHouse/pull/66843) ([Alexander Gololobov](https://github.com/davenger)).
* Backported in [#67482](https://github.com/ClickHouse/ClickHouse/issues/67482): In rare cases ClickHouse could consider parts as broken because of some unexpected projections on disk. Now it's fixed. [#66898](https://github.com/ClickHouse/ClickHouse/pull/66898) ([alesapin](https://github.com/alesapin)).
* Backported in [#67199](https://github.com/ClickHouse/ClickHouse/issues/67199): TRUNCATE DATABASE used to stop replication as if it was a DROP DATABASE query, it's fixed. [#67129](https://github.com/ClickHouse/ClickHouse/pull/67129) ([Alexander Tokmakov](https://github.com/tavplubix)).
* Backported in [#67381](https://github.com/ClickHouse/ClickHouse/issues/67381): Fix error `Cannot convert column because it is non constant in source stream but must be constant in result.` for a query that reads from the `Merge` table over the `Distriburted` table with one shard. [#67146](https://github.com/ClickHouse/ClickHouse/pull/67146) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
* Backported in [#67244](https://github.com/ClickHouse/ClickHouse/issues/67244): This closes [#67156](https://github.com/ClickHouse/ClickHouse/issues/67156). This closes [#66447](https://github.com/ClickHouse/ClickHouse/issues/66447). The bug was introduced in https://github.com/ClickHouse/ClickHouse/pull/62907. [#67178](https://github.com/ClickHouse/ClickHouse/pull/67178) ([Maksim Kita](https://github.com/kitaisreal)).
* Backported in [#67578](https://github.com/ClickHouse/ClickHouse/issues/67578): Fix execution of nested short-circuit functions. [#67520](https://github.com/ClickHouse/ClickHouse/pull/67520) ([Kruglov Pavel](https://github.com/Avogar)).
* Backported in [#67808](https://github.com/ClickHouse/ClickHouse/issues/67808): Fix reloading SQL UDFs with UNION. Previously, restarting the server could make UDF invalid. [#67665](https://github.com/ClickHouse/ClickHouse/pull/67665) ([Antonio Andelic](https://github.com/antonio2368)).
#### NO CL ENTRY
* NO CL ENTRY: 'Revert "Backport [#66599](https://github.com/ClickHouse/ClickHouse/issues/66599) to 24.6: Fix dropping named collection in local storage"'. [#66922](https://github.com/ClickHouse/ClickHouse/pull/66922) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)).
#### NOT FOR CHANGELOG / INSIGNIFICANT
* Backported in [#66332](https://github.com/ClickHouse/ClickHouse/issues/66332): Do not raise a NOT_IMPLEMENTED error when getting s3 metrics with a multiple disk configuration. [#65403](https://github.com/ClickHouse/ClickHouse/pull/65403) ([Elena Torró](https://github.com/elenatorro)).
* Backported in [#66142](https://github.com/ClickHouse/ClickHouse/issues/66142): Fix flaky test_storage_s3_queue tests. [#66009](https://github.com/ClickHouse/ClickHouse/pull/66009) ([Kseniia Sumarokova](https://github.com/kssenii)).
* Backported in [#66389](https://github.com/ClickHouse/ClickHouse/issues/66389): Disable broken cases from 02911_join_on_nullsafe_optimization. [#66310](https://github.com/ClickHouse/ClickHouse/pull/66310) ([vdimir](https://github.com/vdimir)).
* Backported in [#66428](https://github.com/ClickHouse/ClickHouse/issues/66428): Ignore subquery for IN in DDLLoadingDependencyVisitor. [#66395](https://github.com/ClickHouse/ClickHouse/pull/66395) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
* Backported in [#66546](https://github.com/ClickHouse/ClickHouse/issues/66546): Add additional log masking in CI. [#66523](https://github.com/ClickHouse/ClickHouse/pull/66523) ([Raúl Marín](https://github.com/Algunenano)).
* Backported in [#66861](https://github.com/ClickHouse/ClickHouse/issues/66861): Fix data race in S3::ClientCache. [#66644](https://github.com/ClickHouse/ClickHouse/pull/66644) ([Konstantin Morozov](https://github.com/k-morozov)).
* Backported in [#66877](https://github.com/ClickHouse/ClickHouse/issues/66877): Support one more case in JOIN ON ... IS NULL. [#66725](https://github.com/ClickHouse/ClickHouse/pull/66725) ([vdimir](https://github.com/vdimir)).
* Backported in [#67061](https://github.com/ClickHouse/ClickHouse/issues/67061): Increase asio pool size in case the server is tiny. [#66761](https://github.com/ClickHouse/ClickHouse/pull/66761) ([alesapin](https://github.com/alesapin)).
* Backported in [#66940](https://github.com/ClickHouse/ClickHouse/issues/66940): Small fix in realloc memory tracking. [#66820](https://github.com/ClickHouse/ClickHouse/pull/66820) ([Antonio Andelic](https://github.com/antonio2368)).
* Backported in [#67254](https://github.com/ClickHouse/ClickHouse/issues/67254): Followup [#66725](https://github.com/ClickHouse/ClickHouse/issues/66725). [#66869](https://github.com/ClickHouse/ClickHouse/pull/66869) ([vdimir](https://github.com/vdimir)).
* Backported in [#67414](https://github.com/ClickHouse/ClickHouse/issues/67414): CI: Fix build results for release branches. [#67402](https://github.com/ClickHouse/ClickHouse/pull/67402) ([Max K.](https://github.com/maxknv)).

View File

@ -118,7 +118,7 @@ And the result of interpreting the `INSERT SELECT` query is a "completed" `Query
`InterpreterSelectQuery` uses `ExpressionAnalyzer` and `ExpressionActions` machinery for query analysis and transformations. This is where most rule-based query optimizations are performed. `ExpressionAnalyzer` is quite messy and should be rewritten: various query transformations and optimizations should be extracted into separate classes to allow for modular transformations of the query.
To address current problems that exist in interpreters, a new `InterpreterSelectQueryAnalyzer` is being developed. It is a new version of `InterpreterSelectQuery` that does not use `ExpressionAnalyzer` and introduces an additional abstraction level between `AST` and `QueryPipeline` called `QueryTree`. It is not production-ready yet, but it can be tested with the `allow_experimental_analyzer` flag.
To address problems that exist in interpreters, a new `InterpreterSelectQueryAnalyzer` has been developed. This is a new version of the `InterpreterSelectQuery`, which does not use the `ExpressionAnalyzer` and introduces an additional layer of abstraction between `AST` and `QueryPipeline`, called `QueryTree'. It is fully ready for use in production, but just in case it can be turned off by setting the value of the `enable_analyzer` setting to `false`.
## Functions {#functions}

View File

@ -146,6 +146,7 @@ Code: 48. DB::Exception: Received from localhost:9000. DB::Exception: Reading fr
- `_file` — Name of the file. Type: `LowCardinalty(String)`.
- `_size` — Size of the file in bytes. Type: `Nullable(UInt64)`. If the size is unknown, the value is `NULL`.
- `_time` — Last modified time of the file. Type: `Nullable(DateTime)`. If the time is unknown, the value is `NULL`.
- `_etag` — ETag of the file. Type: `LowCardinalty(String)`. If the etag is unknown, the value is `NULL`.
For more information about virtual columns see [here](../../../engines/table-engines/index.md#table_engines-virtual_columns).

View File

@ -379,7 +379,7 @@ You can mitigate this problem by enabling `wait_end_of_query=1` ([Response Buffe
However, this does not completely solve the problem because the result must still fit within the `http_response_buffer_size`, and other settings like `send_progress_in_http_headers` can interfere with the delay of the header.
The only way to catch all errors is to analyze the HTTP body before parsing it using the required format.
### Queries with Parameters {#cli-queries-with-parameters}
## Queries with Parameters {#cli-queries-with-parameters}
You can create a query with parameters and pass values for them from the corresponding HTTP request parameters. For more information, see [Queries with Parameters for CLI](../interfaces/cli.md#cli-queries-with-parameters).

View File

@ -123,7 +123,7 @@ To ensure consistent and expected results, especially when migrating old queries
In the new version of the analyzer, the rules for determining the common supertype for columns specified in the `USING` clause have been standardized to produce more predictable outcomes, especially when dealing with type modifiers like `LowCardinality` and `Nullable`.
- `LowCardinality(T)` and `T`: When a column of type `LowCardinality(T)` is joined with a column of type `T`, the resulting common supertype will be `T`, effectively discarding the `LowCardinality` modifier.
- `Nullable(T)` and `T`: When a column of type `Nullable(T)` is joined with a column of type `T`, the resulting common supertype will be `Nullable(T)`, ensuring that the nullable property is preserved.
**Example:**
@ -144,7 +144,7 @@ During projection names computation, aliases are not substituted.
SELECT
1 + 1 AS x,
x + 1
SETTINGS allow_experimental_analyzer = 0
SETTINGS enable_analyzer = 0
FORMAT PrettyCompact
┌─x─┬─plus(plus(1, 1), 1)─┐
@ -154,7 +154,7 @@ FORMAT PrettyCompact
SELECT
1 + 1 AS x,
x + 1
SETTINGS allow_experimental_analyzer = 1
SETTINGS enable_analyzer = 1
FORMAT PrettyCompact
┌─x─┬─plus(x, 1)─┐
@ -177,7 +177,7 @@ SELECT toTypeName(if(0, [2, 3, 4], 'String'))
### Heterogeneous clusters
The new analyzer significantly changed the communication protocol between servers in the cluster. Thus, it's impossible to run distributed queries on servers with different `allow_experimental_analyzer` setting values.
The new analyzer significantly changed the communication protocol between servers in the cluster. Thus, it's impossible to run distributed queries on servers with different `enable_analyzer` setting values.
### Mutations are interpreted by previous analyzer

View File

@ -85,6 +85,7 @@ The BACKUP and RESTORE statements take a list of DATABASE and TABLE names, a des
- `password` for the file on disk
- `base_backup`: the destination of the previous backup of this source. For example, `Disk('backups', '1.zip')`
- `use_same_s3_credentials_for_base_backup`: whether base backup to S3 should inherit credentials from the query. Only works with `S3`.
- `use_same_password_for_base_backup`: whether base backup archive should inherit the password from the query.
- `structure_only`: if enabled, allows to only backup or restore the CREATE statements without the data of tables
- `storage_policy`: storage policy for the tables being restored. See [Using Multiple Block Devices for Data Storage](../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-multiple-volumes). This setting is only applicable to the `RESTORE` command. The specified storage policy applies only to tables with an engine from the `MergeTree` family.
- `s3_storage_class`: the storage class used for S3 backup. For example, `STANDARD`

View File

@ -4051,7 +4051,7 @@ Rewrite aggregate functions with if expression as argument when logically equiva
For example, `avg(if(cond, col, null))` can be rewritten to `avgOrNullIf(cond, col)`. It may improve performance.
:::note
Supported only with experimental analyzer (`allow_experimental_analyzer = 1`).
Supported only with experimental analyzer (`enable_analyzer = 1`).
:::
## database_replicated_initial_query_timeout_sec {#database_replicated_initial_query_timeout_sec}
@ -4629,8 +4629,8 @@ Default Value: 5.
## memory_overcommit_ratio_denominator {#memory_overcommit_ratio_denominator}
It represents soft memory limit in case when hard limit is reached on user level.
This value is used to compute overcommit ratio for the query.
It represents the soft memory limit when the hard limit is reached on the global level.
This value is used to compute the overcommit ratio for the query.
Zero means skip the query.
Read more about [memory overcommit](memory-overcommit.md).
@ -4646,8 +4646,8 @@ Default value: `5000000`.
## memory_overcommit_ratio_denominator_for_user {#memory_overcommit_ratio_denominator_for_user}
It represents soft memory limit in case when hard limit is reached on global level.
This value is used to compute overcommit ratio for the query.
It represents the soft memory limit when the hard limit is reached on the user level.
This value is used to compute the overcommit ratio for the query.
Zero means skip the query.
Read more about [memory overcommit](memory-overcommit.md).
@ -5609,6 +5609,18 @@ Minimal size of block to compress in CROSS JOIN. Zero value means - disable this
Default value: `1GiB`.
## restore_replace_external_engines_to_null
For testing purposes. Replaces all external engines to Null to not initiate external connections.
Default value: `False`
## restore_replace_external_table_functions_to_null
For testing purposes. Replaces all external table functions to Null to not initiate external connections.
Default value: `False`
## disable_insertion_and_mutation
Disable all insert and mutations (alter table update / alter table delete / alter table drop partition). Set to true, can make this node focus on reading queries.

View File

@ -28,39 +28,39 @@ A client application to interact with clickhouse-keeper by its native protocol.
Connected to ZooKeeper at [::1]:9181 with session_id 137
/ :) ls
keeper foo bar
/ :) cd keeper
/ :) cd 'keeper'
/keeper :) ls
api_version
/keeper :) cd api_version
/keeper :) cd 'api_version'
/keeper/api_version :) ls
/keeper/api_version :) cd xyz
/keeper/api_version :) cd 'xyz'
Path /keeper/api_version/xyz does not exist
/keeper/api_version :) cd ../../
/ :) ls
keeper foo bar
/ :) get keeper/api_version
/ :) get 'keeper/api_version'
2
```
## Commands {#clickhouse-keeper-client-commands}
- `ls [path]` -- Lists the nodes for the given path (default: cwd)
- `cd [path]` -- Changes the working path (default `.`)
- `exists <path>` -- Returns `1` if node exists, `0` otherwise
- `set <path> <value> [version]` -- Updates the node's value. Only updates if version matches (default: -1)
- `create <path> <value> [mode]` -- Creates new node with the set value
- `touch <path>` -- Creates new node with an empty string as value. Doesn't throw an exception if the node already exists
- `get <path>` -- Returns the node's value
- `rm <path> [version]` -- Removes the node only if version matches (default: -1)
- `rmr <path>` -- Recursively deletes path. Confirmation required
- `ls '[path]'` -- Lists the nodes for the given path (default: cwd)
- `cd '[path]'` -- Changes the working path (default `.`)
- `exists '<path>'` -- Returns `1` if node exists, `0` otherwise
- `set '<path>' <value> [version]` -- Updates the node's value. Only updates if version matches (default: -1)
- `create '<path>' <value> [mode]` -- Creates new node with the set value
- `touch '<path>'` -- Creates new node with an empty string as value. Doesn't throw an exception if the node already exists
- `get '<path>'` -- Returns the node's value
- `rm '<path>' [version]` -- Removes the node only if version matches (default: -1)
- `rmr '<path>'` -- Recursively deletes path. Confirmation required
- `flwc <command>` -- Executes four-letter-word command
- `help` -- Prints this message
- `get_direct_children_number [path]` -- Get numbers of direct children nodes under a specific path
- `get_all_children_number [path]` -- Get all numbers of children nodes under a specific path
- `get_stat [path]` -- Returns the node's stat (default `.`)
- `find_super_nodes <threshold> [path]` -- Finds nodes with number of children larger than some threshold for the given path (default `.`)
- `get_direct_children_number '[path]'` -- Get numbers of direct children nodes under a specific path
- `get_all_children_number '[path]'` -- Get all numbers of children nodes under a specific path
- `get_stat '[path]'` -- Returns the node's stat (default `.`)
- `find_super_nodes <threshold> '[path]'` -- Finds nodes with number of children larger than some threshold for the given path (default `.`)
- `delete_stale_backups` -- Deletes ClickHouse nodes used for backups that are now inactive
- `find_big_family [path] [n]` -- Returns the top n nodes with the biggest family in the subtree (default path = `.` and n = 10)
- `sync <path>` -- Synchronizes node between processes and leader
- `sync '<path>'` -- Synchronizes node between processes and leader
- `reconfig <add|remove|set> "<arg>" [version]` -- Reconfigure Keeper cluster. See https://clickhouse.com/docs/en/guides/sre/keeper/clickhouse-keeper#reconfiguration

View File

@ -10,7 +10,7 @@ Calculates a concatenated string from a group of strings, optionally separated b
**Syntax**
``` sql
groupConcat(expression [, delimiter] [, limit]);
groupConcat[(delimiter [, limit])](expression);
```
**Arguments**
@ -20,7 +20,7 @@ groupConcat(expression [, delimiter] [, limit]);
- `limit` — A positive [integer](../../../sql-reference/data-types/int-uint.md) specifying the maximum number of elements to concatenate. If more elements are present, excess elements are ignored. This parameter is optional.
:::note
If delimiter is specified without limit, it must be the first parameter following the expression. If both delimiter and limit are specified, delimiter must precede limit.
If delimiter is specified without limit, it must be the first parameter. If both delimiter and limit are specified, delimiter must precede limit.
:::
**Returned value**
@ -61,7 +61,7 @@ This concatenates all names into one continuous string without any separator.
Query:
``` sql
SELECT groupConcat(Name, ', ', 2) FROM Employees;
SELECT groupConcat(', ')(Name) FROM Employees;
```
Result:
@ -78,7 +78,7 @@ This output shows the names separated by a comma followed by a space.
Query:
``` sql
SELECT groupConcat(Name, ', ', 2) FROM Employees;
SELECT groupConcat(', ', 2)(Name) FROM Employees;
```
Result:

View File

@ -223,3 +223,28 @@ SELECT translateUTF8('Münchener Straße', 'üß', 'us') AS res;
│ Munchener Strase │
└──────────────────┘
```
## printf
The `printf` function formats the given string with the values (strings, integers, floating-points etc.) listed in the arguments, similar to printf function in C++. The format string can contain format specifiers starting with `%` character. Anything not contained in `%` and the following format specifier is considered literal text and copied verbatim into the output. Literal `%` character can be escaped by `%%`.
**Syntax**
``` sql
printf(format, arg1, arg2, ...)
```
**Example**
Query:
``` sql
select printf('%%%s %s %d', 'Hello', 'World', 2024);
```
``` response
┌─printf('%%%s %s %d', 'Hello', 'World', 2024)─┐
│ %Hello World 2024 │
└──────────────────────────────────────────────┘
```

View File

@ -43,7 +43,7 @@ Result:
## mapFromArrays
Creates a map from an array of keys and an array of values.
Creates a map from an array or map of keys and an array or map of values.
The function is a convenient alternative to syntax `CAST([...], 'Map(key_type, value_type)')`.
For example, instead of writing
@ -62,8 +62,8 @@ Alias: `MAP_FROM_ARRAYS(keys, values)`
**Arguments**
- `keys` — Array of keys to create the map from. [Array(T)](../data-types/array.md) where `T` can be any type supported by [Map](../data-types/map.md) as key type.
- `values` - Array or map of values to create the map from. [Array](../data-types/array.md) or [Map](../data-types/map.md).
- `keys` — Array or map of keys to create the map from [Array](../data-types/array.md) or [Map](../data-types/map.md). If `keys` is an array, we accept `Array(Nullable(T))` or `Array(LowCardinality(Nullable(T)))` as its type as long as it doesn't contain NULL value.
- `values` - Array or map of values to create the map from [Array](../data-types/array.md) or [Map](../data-types/map.md).
**Returned value**
@ -99,6 +99,18 @@ Result:
└───────────────────────────────────────────────────────┘
```
```sql
SELECT mapFromArrays(map('a', 1, 'b', 2, 'c', 3), [1, 2, 3])
```
Result:
```
┌─mapFromArrays(map('a', 1, 'b', 2, 'c', 3), [1, 2, 3])─┐
│ {('a',1):1,('b',2):2,('c',3):3} │
└───────────────────────────────────────────────────────┘
```
## extractKeyValuePairs
Converts a string of key-value pairs to a [Map(String, String)](../data-types/map.md).

View File

@ -3045,13 +3045,425 @@ toUInt256OrDefault('abc', CAST('0', 'UInt256')): 0
- [`toUInt256OrZero`](#touint256orzero).
- [`toUInt256OrNull`](#touint256ornull).
## toFloat(32\|64)
## toFloat32
## toFloat(32\|64)OrZero
Converts an input value to a value of type [`Float32`](../data-types/float.md). Throws an exception in case of an error.
## toFloat(32\|64)OrNull
**Syntax**
## toFloat(32\|64)OrDefault
```sql
toFloat32(expr)
```
**Arguments**
- `expr` — Expression returning a number or a string representation of a number. [Expression](../syntax.md/#syntax-expressions).
Supported arguments:
- Values of type (U)Int8/16/32/64/128/256.
- String representations of (U)Int8/16/32/128/256.
- Values of type Float32/64, including `NaN` and `Inf`.
- String representations of Float32/64, including `NaN` and `Inf` (case-insensitive).
Unsupported arguments:
- String representations of binary and hexadecimal values, e.g. `SELECT toFloat32('0xc0fe');`.
**Returned value**
- 32-bit floating point value. [Float32](../data-types/float.md).
**Example**
Query:
```sql
SELECT
toFloat32(42.7),
toFloat32('42.7'),
toFloat32('NaN')
FORMAT Vertical;
```
Result:
```response
Row 1:
──────
toFloat32(42.7): 42.7
toFloat32('42.7'): 42.7
toFloat32('NaN'): nan
```
**See also**
- [`toFloat32OrZero`](#tofloat32orzero).
- [`toFloat32OrNull`](#tofloat32ornull).
- [`toFloat32OrDefault`](#tofloat32ordefault).
## toFloat32OrZero
Like [`toFloat32`](#tofloat32), this function converts an input value to a value of type [Float32](../data-types/float.md) but returns `0` in case of an error.
**Syntax**
```sql
toFloat32OrZero(x)
```
**Arguments**
- `x` — A String representation of a number. [String](../data-types/string.md).
Supported arguments:
- String representations of (U)Int8/16/32/128/256, Float32/64.
Unsupported arguments (return `0`):
- String representations of binary and hexadecimal values, e.g. `SELECT toFloat32OrZero('0xc0fe');`.
**Returned value**
- 32-bit Float value if successful, otherwise `0`. [Float32](../data-types/float.md).
**Example**
Query:
```sql
SELECT
toFloat32OrZero('42.7'),
toFloat32OrZero('abc')
FORMAT Vertical;
```
Result:
```response
Row 1:
──────
toFloat32OrZero('42.7'): 42.7
toFloat32OrZero('abc'): 0
```
**See also**
- [`toFloat32`](#tofloat32).
- [`toFloat32OrNull`](#tofloat32ornull).
- [`toFloat32OrDefault`](#tofloat32ordefault).
## toFloat32OrNull
Like [`toFloat32`](#tofloat32), this function converts an input value to a value of type [Float32](../data-types/float.md) but returns `NULL` in case of an error.
**Syntax**
```sql
toFloat32OrNull(x)
```
**Arguments**
- `x` — A String representation of a number. [String](../data-types/string.md).
Supported arguments:
- String representations of (U)Int8/16/32/128/256, Float32/64.
Unsupported arguments (return `\N`):
- String representations of binary and hexadecimal values, e.g. `SELECT toFloat32OrNull('0xc0fe');`.
**Returned value**
- 32-bit Float value if successful, otherwise `\N`. [Float32](../data-types/float.md).
**Example**
Query:
```sql
SELECT
toFloat32OrNull('42.7'),
toFloat32OrNull('abc')
FORMAT Vertical;
```
Result:
```response
Row 1:
──────
toFloat32OrNull('42.7'): 42.7
toFloat32OrNull('abc'): ᴺᵁᴸᴸ
```
**See also**
- [`toFloat32`](#tofloat32).
- [`toFloat32OrZero`](#tofloat32orzero).
- [`toFloat32OrDefault`](#tofloat32ordefault).
## toFloat32OrDefault
Like [`toFloat32`](#tofloat32), this function converts an input value to a value of type [Float32](../data-types/float.md) but returns the default value in case of an error.
If no `default` value is passed then `0` is returned in case of an error.
**Syntax**
```sql
toFloat32OrDefault(expr[, default])
```
**Arguments**
- `expr` — Expression returning a number or a string representation of a number. [Expression](../syntax.md/#syntax-expressions) / [String](../data-types/string.md).
- `default` (optional) — The default value to return if parsing to type `Float32` is unsuccessful. [Float32](../data-types/float.md).
Supported arguments:
- Values of type (U)Int8/16/32/64/128/256.
- String representations of (U)Int8/16/32/128/256.
- Values of type Float32/64, including `NaN` and `Inf`.
- String representations of Float32/64, including `NaN` and `Inf` (case-insensitive).
Arguments for which the default value is returned:
- String representations of binary and hexadecimal values, e.g. `SELECT toFloat32OrDefault('0xc0fe', CAST('0', 'Float32'));`.
**Returned value**
- 32-bit Float value if successful, otherwise returns the default value if passed or `0` if not. [Float32](../data-types/float.md).
**Example**
Query:
```sql
SELECT
toFloat32OrDefault('8', CAST('0', 'Float32')),
toFloat32OrDefault('abc', CAST('0', 'Float32'))
FORMAT Vertical;
```
Result:
```response
Row 1:
──────
toFloat32OrDefault('8', CAST('0', 'Float32')): 8
toFloat32OrDefault('abc', CAST('0', 'Float32')): 0
```
**See also**
- [`toFloat32`](#tofloat32).
- [`toFloat32OrZero`](#tofloat32orzero).
- [`toFloat32OrNull`](#tofloat32ornull).
## toFloat64
Converts an input value to a value of type [`Float64`](../data-types/float.md). Throws an exception in case of an error.
**Syntax**
```sql
toFloat64(expr)
```
**Arguments**
- `expr` — Expression returning a number or a string representation of a number. [Expression](../syntax.md/#syntax-expressions).
Supported arguments:
- Values of type (U)Int8/16/32/64/128/256.
- String representations of (U)Int8/16/32/128/256.
- Values of type Float32/64, including `NaN` and `Inf`.
- String representations of type Float32/64, including `NaN` and `Inf` (case-insensitive).
Unsupported arguments:
- String representations of binary and hexadecimal values, e.g. `SELECT toFloat64('0xc0fe');`.
**Returned value**
- 64-bit floating point value. [Float64](../data-types/float.md).
**Example**
Query:
```sql
SELECT
toFloat64(42.7),
toFloat64('42.7'),
toFloat64('NaN')
FORMAT Vertical;
```
Result:
```response
Row 1:
──────
toFloat64(42.7): 42.7
toFloat64('42.7'): 42.7
toFloat64('NaN'): nan
```
**See also**
- [`toFloat64OrZero`](#tofloat64orzero).
- [`toFloat64OrNull`](#tofloat64ornull).
- [`toFloat64OrDefault`](#tofloat64ordefault).
## toFloat64OrZero
Like [`toFloat64`](#tofloat64), this function converts an input value to a value of type [Float64](../data-types/float.md) but returns `0` in case of an error.
**Syntax**
```sql
toFloat64OrZero(x)
```
**Arguments**
- `x` — A String representation of a number. [String](../data-types/string.md).
Supported arguments:
- String representations of (U)Int8/16/32/128/256, Float32/64.
Unsupported arguments (return `0`):
- String representations of binary and hexadecimal values, e.g. `SELECT toFloat64OrZero('0xc0fe');`.
**Returned value**
- 64-bit Float value if successful, otherwise `0`. [Float64](../data-types/float.md).
**Example**
Query:
```sql
SELECT
toFloat64OrZero('42.7'),
toFloat64OrZero('abc')
FORMAT Vertical;
```
Result:
```response
Row 1:
──────
toFloat64OrZero('42.7'): 42.7
toFloat64OrZero('abc'): 0
```
**See also**
- [`toFloat64`](#tofloat64).
- [`toFloat64OrNull`](#tofloat64ornull).
- [`toFloat64OrDefault`](#tofloat64ordefault).
## toFloat64OrNull
Like [`toFloat64`](#tofloat64), this function converts an input value to a value of type [Float64](../data-types/float.md) but returns `NULL` in case of an error.
**Syntax**
```sql
toFloat64OrNull(x)
```
**Arguments**
- `x` — A String representation of a number. [String](../data-types/string.md).
Supported arguments:
- String representations of (U)Int8/16/32/128/256, Float32/64.
Unsupported arguments (return `\N`):
- String representations of binary and hexadecimal values, e.g. `SELECT toFloat64OrNull('0xc0fe');`.
**Returned value**
- 64-bit Float value if successful, otherwise `\N`. [Float64](../data-types/float.md).
**Example**
Query:
```sql
SELECT
toFloat64OrNull('42.7'),
toFloat64OrNull('abc')
FORMAT Vertical;
```
Result:
```response
Row 1:
──────
toFloat64OrNull('42.7'): 42.7
toFloat64OrNull('abc'): ᴺᵁᴸᴸ
```
**See also**
- [`toFloat64`](#tofloat64).
- [`toFloat64OrZero`](#tofloat64orzero).
- [`toFloat64OrDefault`](#tofloat64ordefault).
## toFloat64OrDefault
Like [`toFloat64`](#tofloat64), this function converts an input value to a value of type [Float64](../data-types/float.md) but returns the default value in case of an error.
If no `default` value is passed then `0` is returned in case of an error.
**Syntax**
```sql
toFloat64OrDefault(expr[, default])
```
**Arguments**
- `expr` — Expression returning a number or a string representation of a number. [Expression](../syntax.md/#syntax-expressions) / [String](../data-types/string.md).
- `default` (optional) — The default value to return if parsing to type `Float64` is unsuccessful. [Float64](../data-types/float.md).
Supported arguments:
- Values of type (U)Int8/16/32/64/128/256.
- String representations of (U)Int8/16/32/128/256.
- Values of type Float32/64, including `NaN` and `Inf`.
- String representations of Float32/64, including `NaN` and `Inf` (case-insensitive).
Arguments for which the default value is returned:
- String representations of binary and hexadecimal values, e.g. `SELECT toFloat64OrDefault('0xc0fe', CAST('0', 'Float64'));`.
**Returned value**
- 64-bit Float value if successful, otherwise returns the default value if passed or `0` if not. [Float64](../data-types/float.md).
**Example**
Query:
```sql
SELECT
toFloat64OrDefault('8', CAST('0', 'Float64')),
toFloat64OrDefault('abc', CAST('0', 'Float64'))
FORMAT Vertical;
```
Result:
```response
Row 1:
──────
toFloat64OrDefault('8', CAST('0', 'Float64')): 8
toFloat64OrDefault('abc', CAST('0', 'Float64')): 0
```
**See also**
- [`toFloat64`](#tofloat64).
- [`toFloat64OrZero`](#tofloat64orzero).
- [`toFloat64OrNull`](#tofloat64ornull).
## toDate

View File

@ -241,12 +241,12 @@ CREATE OR REPLACE TABLE test
(
id UInt64,
size_bytes Int64,
size String Alias formatReadableSize(size_bytes)
size String ALIAS formatReadableSize(size_bytes)
)
ENGINE = MergeTree
ORDER BY id;
INSERT INTO test Values (1, 4678899);
INSERT INTO test VALUES (1, 4678899);
SELECT id, size_bytes, size FROM test;
┌─id─┬─size_bytes─┬─size─────┐
@ -497,7 +497,7 @@ If you perform a SELECT query mentioning a specific value in an encrypted column
```sql
CREATE TABLE mytable
(
x String Codec(AES_128_GCM_SIV)
x String CODEC(AES_128_GCM_SIV)
)
ENGINE = MergeTree ORDER BY x;
```

View File

@ -36,9 +36,10 @@ If you anticipate frequent deletes, consider using a [custom partitioning key](/
## Limitations of lightweight `DELETE`
### Lightweight `DELETE`s do not work with projections
### Lightweight `DELETE`s with projections
Currently, `DELETE` does not work for tables with projections. This is because rows in a projection may be affected by a `DELETE` operation and may require the projection to be rebuilt, negatively affecting `DELETE` performance.
By default, `DELETE` does not work for tables with projections. This is because rows in a projection may be affected by a `DELETE` operation and may require the projection to be rebuilt, negatively affecting `DELETE` performance.
However, there is an option to change this behavior. By changing setting `lightweight_mutation_projection_mode = 'drop'`, deletes will work with projections.
## Performance considerations when using lightweight `DELETE`

View File

@ -12,6 +12,8 @@ The [rank](./rank.md) function provides the same behaviour, but with gaps in ran
**Syntax**
Alias: `denseRank` (case-sensitive)
```sql
dense_rank (column_name)
OVER ([[PARTITION BY grouping_column] [ORDER BY sorting_column]

View File

@ -23,8 +23,8 @@ ClickHouse supports the standard grammar for defining windows and window functio
| `INTERVAL` syntax for `DateTime` `RANGE OFFSET` frame | ❌ (specify the number of seconds instead (`RANGE` works with any numeric type).) |
| `GROUPS` frame | ❌ |
| Calculating aggregate functions over a frame (`sum(value) over (order by time)`) | ✅ (All aggregate functions are supported) |
| `rank()`, `dense_rank()`, `row_number()` | ✅ |
| `percent_rank()` | ✅ Efficiently computes the relative standing of a value within a partition in a dataset. This function effectively replaces the more verbose and computationally intensive manual SQL calculation expressed as `ifNull((rank() OVER(PARTITION BY x ORDER BY y) - 1) / nullif(count(1) OVER(PARTITION BY x) - 1, 0), 0)`|
| `rank()`, `dense_rank()`, `row_number()` | ✅ <br/>Alias: `denseRank()` |
| `percent_rank()` | ✅ Efficiently computes the relative standing of a value within a partition in a dataset. This function effectively replaces the more verbose and computationally intensive manual SQL calculation expressed as `ifNull((rank() OVER(PARTITION BY x ORDER BY y) - 1) / nullif(count(1) OVER(PARTITION BY x) - 1, 0), 0)` <br/>Alias: `percentRank()`|
| `lag/lead(value, offset)` | ❌ <br/> You can use one of the following workarounds:<br/> 1) `any(value) over (.... rows between <offset> preceding and <offset> preceding)`, or `following` for `lead` <br/> 2) `lagInFrame/leadInFrame`, which are analogous, but respect the window frame. To get behavior identical to `lag/lead`, use `rows between unbounded preceding and unbounded following` |
| ntile(buckets) | ✅ <br/> Specify window like, (partition by x order by y rows between unbounded preceding and unrounded following). |

View File

@ -115,7 +115,7 @@ ClickHouse — полноценная столбцовая СУБД. Данны
`InterpreterSelectQuery` использует `ExpressionAnalyzer` и `ExpressionActions` механизмы для анализа запросов и преобразований. Именно здесь выполняется большинство оптимизаций запросов на основе правил. `ExpressionAnalyzer` написан довольно грязно и должен быть переписан: различные преобразования запросов и оптимизации должны быть извлечены в отдельные классы, чтобы позволить модульные преобразования или запросы.
Для решения текущих проблем, существующих в интерпретаторах, разрабатывается новый `InterpreterSelectQueryAnalyzer`. Это новая версия `InterpreterSelectQuery`, которая не использует `ExpressionAnalyzer` и вводит дополнительный уровень абстракции между `AST` и `QueryPipeline`, называемый `QueryTree`. Он еще не готов к использованию в продакшене, но его можно протестировать с помощью флага `allow_experimental_analyzer`.
Для решения проблем, существующих в интерпретаторах, был разработан новый `InterpreterSelectQueryAnalyzer`. Это новая версия `InterpreterSelectQuery`, которая не использует `ExpressionAnalyzer` и вводит дополнительный уровень абстракции между `AST` и `QueryPipeline`, называемый `QueryTree`. Он полностью готов к использованию в продакшене, но на всякий случай его можно выключить, установив значение настройки `enable_analyzer` в `false`.
## Функции {#functions}

View File

@ -252,7 +252,7 @@ sidebar_label: "\u53D8\u66F4\u65E5\u5FD7"
- 抑制MSan下的一些测试失败。 [#8780](https://github.com/ClickHouse/ClickHouse/pull/8780) ([Alexander Kuzmenkov](https://github.com/akuzm))
- 加速 “exception while insert” 测试 此测试通常在具有复盖率的调试版本中超时。 [#8711](https://github.com/ClickHouse/ClickHouse/pull/8711) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov))
- 更新 `libcxx``libcxxabi` 为了主人 在准备 [#9304](https://github.com/ClickHouse/ClickHouse/issues/9304) [#9308](https://github.com/ClickHouse/ClickHouse/pull/9308) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov))
- 修复flacky测试 `00910_zookeeper_test_alter_compression_codecs`. [#9525](https://github.com/ClickHouse/ClickHouse/pull/9525) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov))
- 修复flaky测试 `00910_zookeeper_test_alter_compression_codecs`. [#9525](https://github.com/ClickHouse/ClickHouse/pull/9525) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov))
- 清理重复的链接器标志。 确保链接器不会查找意想不到的符号。 [#9433](https://github.com/ClickHouse/ClickHouse/pull/9433) ([阿莫斯鸟](https://github.com/amosbird))
- 添加 `clickhouse-odbc` 驱动程序进入测试图像。 这允许通过自己的ODBC驱动程序测试ClickHouse与ClickHouse的交互。 [#9348](https://github.com/ClickHouse/ClickHouse/pull/9348) ([filimonov](https://github.com/filimonov))
- 修复单元测试中的几个错误。 [#9047](https://github.com/ClickHouse/ClickHouse/pull/9047) ([阿利沙平](https://github.com/alesapin))

View File

@ -421,7 +421,7 @@ try
std::lock_guard lock(servers_lock);
metrics.reserve(servers->size());
for (const auto & server : *servers)
metrics.emplace_back(ProtocolServerMetrics{server.getPortName(), server.currentThreads()});
metrics.emplace_back(ProtocolServerMetrics{server.getPortName(), server.currentThreads(), server.refusedConnections()});
return metrics;
}
);

View File

@ -66,14 +66,14 @@
<server>
<!-- Used for secure tcp port -->
<!-- openssl req -subj "/CN=localhost" -new -newkey rsa:2048 -days 365 -nodes -x509 -keyout /etc/clickhouse-server/server.key -out /etc/clickhouse-server/server.crt -->
<certificateFile>/etc/clickhouse-keeper/server.crt</certificateFile>
<privateKeyFile>/etc/clickhouse-keeper/server.key</privateKeyFile>
<!-- <certificateFile>/etc/clickhouse-keeper/server.crt</certificateFile> -->
<!-- <privateKeyFile>/etc/clickhouse-keeper/server.key</privateKeyFile> -->
<!-- dhparams are optional. You can delete the <dhParamsFile> element.
To generate dhparams, use the following command:
openssl dhparam -out /etc/clickhouse-keeper/dhparam.pem 4096
Only file format with BEGIN DH PARAMETERS is supported.
-->
<dhParamsFile>/etc/clickhouse-keeper/dhparam.pem</dhParamsFile>
<!-- <dhParamsFile>/etc/clickhouse-keeper/dhparam.pem</dhParamsFile> -->
<verificationMode>none</verificationMode>
<loadDefaultCAFile>true</loadDefaultCAFile>
<cacheSessions>true</cacheSessions>

View File

@ -1,6 +1,7 @@
#include "LocalServer.h"
#include <sys/resource.h>
#include <Common/Config/getLocalConfigPath.h>
#include <Common/logger_useful.h>
#include <Common/formatReadable.h>
#include <Core/UUID.h>
@ -13,6 +14,7 @@
#include <Databases/registerDatabases.h>
#include <Databases/DatabaseFilesystem.h>
#include <Databases/DatabaseMemory.h>
#include <Databases/DatabaseAtomic.h>
#include <Databases/DatabasesOverlay.h>
#include <Storages/System/attachSystemTables.h>
#include <Storages/System/attachInformationSchemaTables.h>
@ -49,7 +51,6 @@
#include <Dictionaries/registerDictionaries.h>
#include <Disks/registerDisks.h>
#include <Formats/registerFormats.h>
#include <boost/algorithm/string/replace.hpp>
#include <boost/program_options/options_description.hpp>
#include <base/argsToConfig.h>
#include <filesystem>
@ -127,10 +128,21 @@ void LocalServer::initialize(Poco::Util::Application & self)
{
Poco::Util::Application::initialize(self);
const char * home_path_cstr = getenv("HOME"); // NOLINT(concurrency-mt-unsafe)
if (home_path_cstr)
home_path = home_path_cstr;
/// Load config files if exists
if (getClientConfiguration().has("config-file") || fs::exists("config.xml"))
std::string config_path;
if (getClientConfiguration().has("config-file"))
config_path = getClientConfiguration().getString("config-file");
else if (config_path.empty() && fs::exists("config.xml"))
config_path = "config.xml";
else if (config_path.empty())
config_path = getLocalConfigPath(home_path).value_or("");
if (fs::exists(config_path))
{
const auto config_path = getClientConfiguration().getString("config-file", "config.xml");
ConfigProcessor config_processor(config_path, false, true);
ConfigProcessor::setConfigPath(fs::path(config_path).parent_path());
auto loaded_config = config_processor.loadConfig();
@ -204,12 +216,12 @@ static DatabasePtr createMemoryDatabaseIfNotExists(ContextPtr context, const Str
return system_database;
}
static DatabasePtr createClickHouseLocalDatabaseOverlay(const String & name_, ContextPtr context_)
static DatabasePtr createClickHouseLocalDatabaseOverlay(const String & name_, ContextPtr context)
{
auto databaseCombiner = std::make_shared<DatabasesOverlay>(name_, context_);
databaseCombiner->registerNextDatabase(std::make_shared<DatabaseFilesystem>(name_, "", context_));
databaseCombiner->registerNextDatabase(std::make_shared<DatabaseMemory>(name_, context_));
return databaseCombiner;
auto overlay = std::make_shared<DatabasesOverlay>(name_, context);
overlay->registerNextDatabase(std::make_shared<DatabaseAtomic>(name_, fs::weakly_canonical(context->getPath()), UUIDHelpers::generateV4(), context));
overlay->registerNextDatabase(std::make_shared<DatabaseFilesystem>(name_, "", context));
return overlay;
}
/// If path is specified and not empty, will try to setup server environment and load existing metadata
@ -355,7 +367,7 @@ std::string LocalServer::getInitialCreateTableQuery()
else
table_structure = "(" + table_structure + ")";
return fmt::format("CREATE TABLE {} {} ENGINE = File({}, {});",
return fmt::format("CREATE TEMPORARY TABLE {} {} ENGINE = File({}, {});",
table_name, table_structure, data_format, table_file);
}
@ -749,7 +761,12 @@ void LocalServer::processConfig()
DatabaseCatalog::instance().initializeAndLoadTemporaryDatabase();
std::string default_database = server_settings.default_database;
DatabaseCatalog::instance().attachDatabase(default_database, createClickHouseLocalDatabaseOverlay(default_database, global_context));
{
DatabasePtr database = createClickHouseLocalDatabaseOverlay(default_database, global_context);
if (UUID uuid = database->getUUID(); uuid != UUIDHelpers::Nil)
DatabaseCatalog::instance().addUUIDMapping(uuid);
DatabaseCatalog::instance().attachDatabase(default_database, database);
}
global_context->setCurrentDatabase(default_database);
if (getClientConfiguration().has("path"))

View File

@ -849,7 +849,7 @@ try
#endif
#if defined(SANITIZER)
LOG_INFO(log, "Query Profiler disabled because they cannot work under sanitizers"
LOG_INFO(log, "Query Profiler is disabled because it cannot work under sanitizers"
" when two different stack unwinding methods will interfere with each other.");
#endif
@ -918,10 +918,10 @@ try
metrics.reserve(servers_to_start_before_tables.size() + servers.size());
for (const auto & server : servers_to_start_before_tables)
metrics.emplace_back(ProtocolServerMetrics{server.getPortName(), server.currentThreads()});
metrics.emplace_back(ProtocolServerMetrics{server.getPortName(), server.currentThreads(), server.refusedConnections()});
for (const auto & server : servers)
metrics.emplace_back(ProtocolServerMetrics{server.getPortName(), server.currentThreads()});
metrics.emplace_back(ProtocolServerMetrics{server.getPortName(), server.currentThreads(), server.refusedConnections()});
return metrics;
}
);

View File

@ -1130,8 +1130,7 @@
<flush_interval_milliseconds>7500</flush_interval_milliseconds>
</query_views_log>
<!-- Uncomment if use part log.
Part log contains information about all actions with parts in MergeTree tables (creation, deletion, merges, downloads).-->
<!-- Part log contains information about all actions with parts in MergeTree tables (creation, deletion, merges, downloads). -->
<part_log>
<database>system</database>
<table>part_log</table>
@ -1143,9 +1142,9 @@
<flush_on_crash>false</flush_on_crash>
</part_log>
<!-- Uncomment to write text log into table.
Text log contains all information from usual server log but stores it in structured and efficient way.
<!-- Text log contains all information from usual server log but stores it in structured and efficient way.
The level of the messages that goes to the table can be limited (<level>), if not specified all messages will go to the table.
-->
<text_log>
<database>system</database>
<table>text_log</table>
@ -1154,9 +1153,8 @@
<reserved_size_rows>8192</reserved_size_rows>
<buffer_size_rows_flush_threshold>524288</buffer_size_rows_flush_threshold>
<flush_on_crash>false</flush_on_crash>
<level></level>
<level>trace</level>
</text_log>
-->
<!-- Metric log contains rows with current values of ProfileEvents, CurrentMetrics collected with "collect_interval_milliseconds" interval. -->
<metric_log>

View File

@ -17,7 +17,7 @@
--input-shadow-color: rgba(0, 255, 0, 1);
--error-color: red;
--global-error-color: white;
--legend-background: rgba(255, 255, 255, 0.75);
--legend-background: rgba(255, 255, 0, 0.75);
--title-color: #666;
--text-color: black;
--edit-title-background: #FEE;
@ -41,7 +41,7 @@
--moving-shadow-color: rgba(255, 255, 255, 0.25);
--input-shadow-color: rgba(255, 128, 0, 0.25);
--error-color: #F66;
--legend-background: rgba(255, 255, 255, 0.25);
--legend-background: rgba(0, 96, 128, 0.75);
--title-color: white;
--text-color: white;
--edit-title-background: #364f69;
@ -218,6 +218,7 @@
#chart-params .param {
width: 6%;
font-family: monospace;
}
input {
@ -256,6 +257,7 @@
font-weight: bold;
user-select: none;
cursor: pointer;
margin-bottom: 1rem;
}
#run:hover {
@ -309,7 +311,7 @@
color: var(--param-text-color);
display: inline-block;
box-shadow: 1px 1px 0 var(--shadow-color);
margin-bottom: 1rem;
margin-bottom: 0.5rem;
}
input:focus {
@ -657,6 +659,10 @@ function insertParam(name, value) {
param_value.value = value;
param_value.spellcheck = false;
let setWidth = e => { e.style.width = (e.value.length + 1) + 'ch' };
if (value) { setWidth(param_value); }
param_value.addEventListener('input', e => setWidth(e.target));
param_wrapper.appendChild(param_name);
param_wrapper.appendChild(param_value);
document.getElementById('chart-params').appendChild(param_wrapper);
@ -945,6 +951,7 @@ function showMassEditor() {
let editor = document.getElementById('mass-editor-textarea');
editor.value = JSON.stringify({params: params, queries: queries}, null, 2);
editor.focus();
mass_editor_active = true;
}
@ -1004,14 +1011,14 @@ function legendAsTooltipPlugin({ className, style = { background: "var(--legend-
className && legendEl.classList.add(className);
uPlot.assign(legendEl.style, {
textAlign: "left",
textAlign: "right",
pointerEvents: "none",
display: "none",
position: "absolute",
left: 0,
top: 0,
zIndex: 100,
boxShadow: "2px 2px 10px rgba(0,0,0,0.1)",
boxShadow: "2px 2px 10px rgba(0, 0, 0, 0.1)",
...style
});
@ -1051,8 +1058,10 @@ function legendAsTooltipPlugin({ className, style = { background: "var(--legend-
function update(u) {
let { left, top } = u.cursor;
left -= legendEl.clientWidth / 2;
top -= legendEl.clientHeight / 2;
/// This will make the balloon to the right of the cursor when the cursor is on the left side, and vise-versa,
/// avoiding the borders of the chart.
left -= legendEl.clientWidth * (left / u.width);
top -= legendEl.clientHeight;
legendEl.style.transform = "translate(" + left + "px, " + top + "px)";
if (multiline) {
@ -1139,7 +1148,7 @@ async function draw(idx, chart, url_params, query) {
let {reply, error} = await doFetch(query, url_params);
if (!error) {
if (reply.rows.length == 0) {
if (reply.rows == 0) {
error = "Query returned empty result.";
} else if (reply.meta.length < 2) {
error = "Query should return at least two columns: unix timestamp and value.";
@ -1229,14 +1238,53 @@ async function draw(idx, chart, url_params, query) {
let sync = uPlot.sync("sync");
let axis = {
function formatDateTime(t) {
return (new Date(t * 1000)).toISOString().replace('T', '\n').replace('.000Z', '');
}
function formatDateTimes(self, ticks) {
return ticks.map((t, idx) => {
let res = formatDateTime(t);
if (idx == 0 || res.substring(0, 10) != formatDateTime(ticks[idx - 1]).substring(0, 10)) {
return res;
} else {
return res.substring(11);
}
});
}
function formatValue(v) {
const a = Math.abs(v);
if (a >= 1000000000000000) { return (v / 1000000000000000) + 'P'; }
if (a >= 1000000000000) { return (v / 1000000000000) + 'T'; }
if (a >= 1000000000) { return (v / 1000000000) + 'G'; }
if (a >= 1000000) { return (v / 1000000) + 'M'; }
if (a >= 1000) { return (v / 1000) + 'K'; }
if (a > 0 && a < 0.001) { return (v * 1000000) + "μ"; }
return v;
}
let axis_x = {
stroke: axes_color,
grid: { width: 1 / devicePixelRatio, stroke: grid_color },
ticks: { width: 1 / devicePixelRatio, stroke: grid_color }
ticks: { width: 1 / devicePixelRatio, stroke: grid_color },
values: formatDateTimes,
space: 80,
incrs: [1, 5, 10, 15, 30,
60, 60 * 5, 60 * 10, 60 * 15, 60 * 30,
3600, 3600 * 2, 3600 * 3, 3600 * 4, 3600 * 6, 3600 * 12,
3600 * 24],
};
let axes = [axis, axis];
let series = [{ label: "x" }];
let axis_y = {
stroke: axes_color,
grid: { width: 1 / devicePixelRatio, stroke: grid_color },
ticks: { width: 1 / devicePixelRatio, stroke: grid_color },
values: (self, ticks) => ticks.map(formatValue)
};
let axes = [axis_x, axis_y];
let series = [{ label: "time", value: (self, t) => formatDateTime(t) }];
let data = [reply.data[reply.meta[0].name]];
// Treat every column as series
@ -1254,9 +1302,10 @@ async function draw(idx, chart, url_params, query) {
const opts = {
width: chart.clientWidth,
height: chart.clientHeight,
scales: { x: { time: false } }, /// Because we want to split and format time on our own.
axes,
series,
padding: [ null, null, null, (Math.round(max_value * 100) / 100).toString().length * 6 - 10 ],
padding: [ null, null, null, 3 ],
plugins: [ legendAsTooltipPlugin() ],
cursor: {
sync: {

View File

@ -522,6 +522,9 @@
const current_url = new URL(window.location);
const opened_locally = location.protocol == 'file:';
/// Run query instantly after page is loaded if the run parameter is present.
const run_immediately = current_url.searchParams.has("run");
const server_address = current_url.searchParams.get('url');
if (server_address) {
document.getElementById('url').value = server_address;
@ -599,6 +602,9 @@
const title = "ClickHouse Query: " + query;
let history_url = window.location.pathname + '?user=' + encodeURIComponent(user);
if (run_immediately) {
history_url += "&run=1";
}
if (server_address != location.origin) {
/// Save server's address in URL if it's not identical to the address of the play UI.
history_url += '&url=' + encodeURIComponent(server_address);
@ -1160,6 +1166,10 @@
});
}
if (run_immediately) {
post();
}
document.getElementById('toggle-light').onclick = function() {
setColorTheme('light', true);
}

View File

@ -67,6 +67,9 @@ struct UniqVariadicHash<false, true>
{
static UInt64 apply(size_t num_args, const IColumn ** columns, size_t row_num)
{
if (!num_args)
return 0;
UInt64 hash;
const auto & tuple_columns = assert_cast<const ColumnTuple *>(columns[0])->getColumns();

View File

@ -5,6 +5,7 @@
#include <Common/assert_cast.h>
#include <Common/FieldVisitorToString.h>
#include <Common/SipHash.h>
#include <DataTypes/DataTypeDateTime64.h>
#include <IO/WriteBuffer.h>
#include <IO/WriteHelpers.h>
@ -162,6 +163,7 @@ QueryTreeNodePtr ConstantNode::cloneImpl() const
ASTPtr ConstantNode::toASTImpl(const ConvertToASTOptions & options) const
{
const auto & constant_value_literal = constant_value->getValue();
const auto & constant_value_type = constant_value->getType();
auto constant_value_ast = std::make_shared<ASTLiteral>(constant_value_literal);
if (!options.add_cast_for_constants)
@ -169,7 +171,25 @@ ASTPtr ConstantNode::toASTImpl(const ConvertToASTOptions & options) const
if (requiresCastCall())
{
auto constant_type_name_ast = std::make_shared<ASTLiteral>(constant_value->getType()->getName());
/** Value for DateTime64 is Decimal64, which is serialized as a string literal.
* If we serialize it as is, DateTime64 would be parsed from that string literal, which can be incorrect.
* For example, DateTime64 cannot be parsed from the short value, like '1', while it's a valid Decimal64 value.
* It could also lead to ambiguous parsing because we don't know if the string literal represents a date or a Decimal64 literal.
* For this reason, we use a string literal representing a date instead of a Decimal64 literal.
*/
if (WhichDataType(constant_value_type->getTypeId()).isDateTime64())
{
const auto * date_time_type = typeid_cast<const DataTypeDateTime64 *>(constant_value_type.get());
DecimalField<Decimal64> decimal_value;
if (constant_value_literal.tryGet<DecimalField<Decimal64>>(decimal_value))
{
WriteBufferFromOwnString ostr;
writeDateTimeText(decimal_value.getValue(), date_time_type->getScale(), ostr, date_time_type->getTimeZone());
constant_value_ast = std::make_shared<ASTLiteral>(ostr.str());
}
}
auto constant_type_name_ast = std::make_shared<ASTLiteral>(constant_value_type->getName());
return makeASTFunction("_CAST", std::move(constant_value_ast), std::move(constant_type_name_ast));
}

View File

@ -11,6 +11,7 @@
#include <Analyzer/InDepthQueryTreeVisitor.h>
#include <Analyzer/ConstantNode.h>
#include <Analyzer/FunctionNode.h>
#include <Analyzer/JoinNode.h>
#include <Analyzer/Utils.h>
namespace DB
@ -25,8 +26,15 @@ public:
using Base = InDepthQueryTreeVisitorWithContext<ComparisonTupleEliminationPassVisitor>;
using Base::Base;
static bool needChildVisit(QueryTreeNodePtr &, QueryTreeNodePtr & child)
static bool needChildVisit(QueryTreeNodePtr & parent, QueryTreeNodePtr & child)
{
if (parent->getNodeType() == QueryTreeNodeType::JOIN)
{
/// In JOIN ON section comparison of tuples works a bit differently.
/// For example we can join on tuple(NULL) = tuple(NULL), join algorithms consider only NULLs on the top level.
if (parent->as<const JoinNode &>().getJoinExpression().get() == child.get())
return false;
}
return child->getNodeType() != QueryTreeNodeType::TABLE_FUNCTION;
}

View File

@ -46,7 +46,7 @@ public:
return;
const auto & storage_snapshot = table_node ? table_node->getStorageSnapshot() : table_function_node->getStorageSnapshot();
if (!storage->isVirtualColumn(column.name, storage_snapshot->getMetadataForQuery()))
if (!storage->isVirtualColumn(column.name, storage_snapshot->metadata))
return;
auto function_node = std::make_shared<FunctionNode>("shardNum");

View File

@ -4541,7 +4541,15 @@ void QueryAnalyzer::resolveTableFunction(QueryTreeNodePtr & table_function_node,
resolveExpressionNode(nodes[1], scope, /* allow_lambda_expression */false, /* allow_table_function */false);
if (auto * constant = nodes[1]->as<ConstantNode>())
{
view_params[identifier_node->getIdentifier().getFullName()] = convertFieldToString(constant->getValue());
/// Serialize the constant value using datatype specific
/// interfaces to match the deserialization in ReplaceQueryParametersVistor.
WriteBufferFromOwnString buf;
const auto & value = constant->getValue();
auto real_type = constant->getResultType();
auto temporary_column = real_type->createColumn();
temporary_column->insert(value);
real_type->getDefaultSerialization()->serializeTextEscaped(*temporary_column, 0, buf, {});
view_params[identifier_node->getIdentifier().getFullName()] = buf.str();
}
}
}

View File

@ -43,6 +43,12 @@ size_t getCompoundTypeDepth(const IDataType & type)
const auto & tuple_elements = assert_cast<const DataTypeTuple &>(*current_type).getElements();
if (!tuple_elements.empty())
current_type = tuple_elements.at(0).get();
else
{
/// Special case: tuple with no element - tuple(). In this case, what's the compound type depth?
/// I'm not certain about the theoretical answer, but from experiment, 1 is the most reasonable choice.
return 1;
}
++result;
}

View File

@ -41,6 +41,7 @@ public:
bool allow_s3_native_copy = true;
bool allow_azure_native_copy = true;
bool use_same_s3_credentials_for_base_backup = false;
bool use_same_password_for_base_backup = false;
bool azure_attempt_to_create_container = true;
ReadSettings read_settings;
WriteSettings write_settings;

View File

@ -92,7 +92,8 @@ BackupImpl::BackupImpl(
std::shared_ptr<IBackupReader> reader_,
const ContextPtr & context_,
bool is_internal_backup_,
bool use_same_s3_credentials_for_base_backup_)
bool use_same_s3_credentials_for_base_backup_,
bool use_same_password_for_base_backup_)
: backup_info(backup_info_)
, backup_name_for_logging(backup_info.toStringForLogging())
, use_archive(!archive_params_.archive_name.empty())
@ -104,6 +105,7 @@ BackupImpl::BackupImpl(
, version(INITIAL_BACKUP_VERSION)
, base_backup_info(base_backup_info_)
, use_same_s3_credentials_for_base_backup(use_same_s3_credentials_for_base_backup_)
, use_same_password_for_base_backup(use_same_password_for_base_backup_)
, log(getLogger("BackupImpl"))
{
open();
@ -120,7 +122,8 @@ BackupImpl::BackupImpl(
const std::shared_ptr<IBackupCoordination> & coordination_,
const std::optional<UUID> & backup_uuid_,
bool deduplicate_files_,
bool use_same_s3_credentials_for_base_backup_)
bool use_same_s3_credentials_for_base_backup_,
bool use_same_password_for_base_backup_)
: backup_info(backup_info_)
, backup_name_for_logging(backup_info.toStringForLogging())
, use_archive(!archive_params_.archive_name.empty())
@ -135,6 +138,7 @@ BackupImpl::BackupImpl(
, base_backup_info(base_backup_info_)
, deduplicate_files(deduplicate_files_)
, use_same_s3_credentials_for_base_backup(use_same_s3_credentials_for_base_backup_)
, use_same_password_for_base_backup(use_same_password_for_base_backup_)
, log(getLogger("BackupImpl"))
{
open();
@ -258,6 +262,11 @@ std::shared_ptr<const IBackup> BackupImpl::getBaseBackupUnlocked() const
params.is_internal_backup = is_internal_backup;
/// use_same_s3_credentials_for_base_backup should be inherited for base backups
params.use_same_s3_credentials_for_base_backup = use_same_s3_credentials_for_base_backup;
/// use_same_password_for_base_backup should be inherited for base backups
params.use_same_password_for_base_backup = use_same_password_for_base_backup;
if (params.use_same_password_for_base_backup)
params.password = archive_params.password;
base_backup = BackupFactory::instance().createBackup(params);

View File

@ -41,7 +41,8 @@ public:
std::shared_ptr<IBackupReader> reader_,
const ContextPtr & context_,
bool is_internal_backup_,
bool use_same_s3_credentials_for_base_backup_);
bool use_same_s3_credentials_for_base_backup_,
bool use_same_password_for_base_backup_);
BackupImpl(
const BackupInfo & backup_info_,
@ -53,7 +54,8 @@ public:
const std::shared_ptr<IBackupCoordination> & coordination_,
const std::optional<UUID> & backup_uuid_,
bool deduplicate_files_,
bool use_same_s3_credentials_for_base_backup_);
bool use_same_s3_credentials_for_base_backup_,
bool use_same_password_for_base_backup_);
~BackupImpl() override;
@ -153,6 +155,7 @@ private:
bool writing_finalized = false;
bool deduplicate_files = true;
bool use_same_s3_credentials_for_base_backup = false;
bool use_same_password_for_base_backup = false;
const LoggerPtr log;
};

View File

@ -29,6 +29,7 @@ namespace ErrorCodes
M(Bool, allow_s3_native_copy) \
M(Bool, allow_azure_native_copy) \
M(Bool, use_same_s3_credentials_for_base_backup) \
M(Bool, use_same_password_for_base_backup) \
M(Bool, azure_attempt_to_create_container) \
M(Bool, read_from_filesystem_cache) \
M(UInt64, shard_num) \

View File

@ -50,6 +50,9 @@ struct BackupSettings
/// Whether base backup to S3 should inherit credentials from the BACKUP query.
bool use_same_s3_credentials_for_base_backup = false;
/// Whether base backup archive should be unlocked using the same password as the incremental archive
bool use_same_password_for_base_backup = false;
/// Whether a new Azure container should be created if it does not exist (requires permissions at storage account level)
bool azure_attempt_to_create_container = true;

View File

@ -602,6 +602,7 @@ void BackupsWorker::doBackup(
backup_create_params.allow_s3_native_copy = backup_settings.allow_s3_native_copy;
backup_create_params.allow_azure_native_copy = backup_settings.allow_azure_native_copy;
backup_create_params.use_same_s3_credentials_for_base_backup = backup_settings.use_same_s3_credentials_for_base_backup;
backup_create_params.use_same_password_for_base_backup = backup_settings.use_same_password_for_base_backup;
backup_create_params.azure_attempt_to_create_container = backup_settings.azure_attempt_to_create_container;
backup_create_params.read_settings = getReadSettingsForBackup(context, backup_settings);
backup_create_params.write_settings = getWriteSettingsForBackup(context);
@ -924,6 +925,7 @@ void BackupsWorker::doRestore(
backup_open_params.password = restore_settings.password;
backup_open_params.allow_s3_native_copy = restore_settings.allow_s3_native_copy;
backup_open_params.use_same_s3_credentials_for_base_backup = restore_settings.use_same_s3_credentials_for_base_backup;
backup_open_params.use_same_password_for_base_backup = restore_settings.use_same_password_for_base_backup;
backup_open_params.read_settings = getReadSettingsForRestore(context);
backup_open_params.write_settings = getWriteSettingsForRestore(context);
backup_open_params.is_internal_backup = restore_settings.internal;

View File

@ -323,7 +323,7 @@ bool RestoreCoordinationRemote::hasConcurrentRestores(const std::atomic<size_t>
return false;
bool result = false;
std::string path = zookeeper_path +"/stage";
std::string path = zookeeper_path + "/stage";
auto holder = with_retries.createRetriesControlHolder("createRootNodes");
holder.retries_ctl.retryLoop(

View File

@ -61,8 +61,6 @@ private:
void createRootNodes();
void removeAllNodes();
class ReplicatedDatabasesMetadataSync;
/// get_zookeeper will provide a zookeeper client without any fault injection
const zkutil::GetZooKeeper get_zookeeper;
const String root_zookeeper_path;

View File

@ -164,6 +164,7 @@ namespace
M(RestoreUDFCreationMode, create_function) \
M(Bool, allow_s3_native_copy) \
M(Bool, use_same_s3_credentials_for_base_backup) \
M(Bool, use_same_password_for_base_backup) \
M(Bool, restore_broken_parts_as_detached) \
M(Bool, internal) \
M(String, host_id) \

View File

@ -113,6 +113,9 @@ struct RestoreSettings
/// Whether base backup from S3 should inherit credentials from the RESTORE query.
bool use_same_s3_credentials_for_base_backup = false;
/// Whether base backup archive should be unlocked using the same password as the incremental archive
bool use_same_password_for_base_backup = false;
/// If it's true RESTORE won't stop on broken parts while restoring, instead they will be restored as detached parts
/// to the `detached` folder with names starting with `broken-from-backup'.
bool restore_broken_parts_as_detached = false;

View File

@ -222,10 +222,19 @@ void RestorerFromBackup::setStage(const String & new_stage, const String & messa
if (restore_coordination)
{
restore_coordination->setStage(new_stage, message);
if (new_stage == Stage::FINDING_TABLES_IN_BACKUP)
restore_coordination->waitForStage(new_stage, on_cluster_first_sync_timeout);
else
restore_coordination->waitForStage(new_stage);
/// The initiator of a RESTORE ON CLUSTER query waits for other hosts to complete their work (see waitForStage(Stage::COMPLETED) in BackupsWorker::doRestore),
/// but other hosts shouldn't wait for each others' completion. (That's simply unnecessary and also
/// the initiator may start cleaning up (e.g. removing restore-coordination ZooKeeper nodes) once all other hosts are in Stage::COMPLETED.)
bool need_wait = (new_stage != Stage::COMPLETED);
if (need_wait)
{
if (new_stage == Stage::FINDING_TABLES_IN_BACKUP)
restore_coordination->waitForStage(new_stage, on_cluster_first_sync_timeout);
else
restore_coordination->waitForStage(new_stage);
}
}
}

View File

@ -141,7 +141,8 @@ void registerBackupEngineAzureBlobStorage(BackupFactory & factory)
reader,
params.context,
params.is_internal_backup,
/* use_same_s3_credentials_for_base_backup*/ false);
/* use_same_s3_credentials_for_base_backup*/ false,
params.use_same_password_for_base_backup);
}
else
{
@ -164,7 +165,8 @@ void registerBackupEngineAzureBlobStorage(BackupFactory & factory)
params.backup_coordination,
params.backup_uuid,
params.deduplicate_files,
/* use_same_s3_credentials_for_base_backup */ false);
/* use_same_s3_credentials_for_base_backup */ false,
params.use_same_password_for_base_backup);
}
#else
throw Exception(ErrorCodes::SUPPORT_IS_DISABLED, "AzureBlobStorage support is disabled");

View File

@ -120,7 +120,8 @@ void registerBackupEngineS3(BackupFactory & factory)
reader,
params.context,
params.is_internal_backup,
params.use_same_s3_credentials_for_base_backup);
params.use_same_s3_credentials_for_base_backup,
params.use_same_password_for_base_backup);
}
else
{
@ -144,7 +145,8 @@ void registerBackupEngineS3(BackupFactory & factory)
params.backup_coordination,
params.backup_uuid,
params.deduplicate_files,
params.use_same_s3_credentials_for_base_backup);
params.use_same_s3_credentials_for_base_backup,
params.use_same_password_for_base_backup);
}
#else
throw Exception(ErrorCodes::SUPPORT_IS_DISABLED, "S3 support is disabled");

View File

@ -178,7 +178,8 @@ void registerBackupEnginesFileAndDisk(BackupFactory & factory)
reader,
params.context,
params.is_internal_backup,
params.use_same_s3_credentials_for_base_backup);
params.use_same_s3_credentials_for_base_backup,
params.use_same_password_for_base_backup);
}
else
{
@ -197,7 +198,8 @@ void registerBackupEnginesFileAndDisk(BackupFactory & factory)
params.backup_coordination,
params.backup_uuid,
params.deduplicate_files,
params.use_same_s3_credentials_for_base_backup);
params.use_same_s3_credentials_for_base_backup,
params.use_same_password_for_base_backup);
}
};

Some files were not shown because too many files have changed in this diff Show More