mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-22 07:31:57 +00:00
Merge remote-tracking branch 'origin/master' into analyzer-fix-const-fold
This commit is contained in:
commit
ff71973536
2308
CHANGELOG.md
2308
CHANGELOG.md
File diff suppressed because it is too large
Load Diff
@ -33,7 +33,8 @@ namespace Poco
|
|||||||
|
|
||||||
|
|
||||||
class Exception;
|
class Exception;
|
||||||
|
class Logger;
|
||||||
|
using LoggerPtr = std::shared_ptr<Logger>;
|
||||||
|
|
||||||
class Foundation_API Logger : public Channel
|
class Foundation_API Logger : public Channel
|
||||||
/// Logger is a special Channel that acts as the main
|
/// Logger is a special Channel that acts as the main
|
||||||
@ -870,6 +871,11 @@ public:
|
|||||||
/// If the Logger does not yet exist, it is created, based
|
/// If the Logger does not yet exist, it is created, based
|
||||||
/// on its parent logger.
|
/// on its parent logger.
|
||||||
|
|
||||||
|
static LoggerPtr getShared(const std::string & name);
|
||||||
|
/// Returns a shared pointer to the Logger with the given name.
|
||||||
|
/// If the Logger does not yet exist, it is created, based
|
||||||
|
/// on its parent logger.
|
||||||
|
|
||||||
static Logger & unsafeGet(const std::string & name);
|
static Logger & unsafeGet(const std::string & name);
|
||||||
/// Returns a reference to the Logger with the given name.
|
/// Returns a reference to the Logger with the given name.
|
||||||
/// If the Logger does not yet exist, it is created, based
|
/// If the Logger does not yet exist, it is created, based
|
||||||
@ -885,6 +891,11 @@ public:
|
|||||||
/// given name. The Logger's Channel and log level as set as
|
/// given name. The Logger's Channel and log level as set as
|
||||||
/// specified.
|
/// specified.
|
||||||
|
|
||||||
|
static LoggerPtr createShared(const std::string & name, Channel * pChannel, int level = Message::PRIO_INFORMATION);
|
||||||
|
/// Creates and returns a shared pointer to a Logger with the
|
||||||
|
/// given name. The Logger's Channel and log level as set as
|
||||||
|
/// specified.
|
||||||
|
|
||||||
static Logger & root();
|
static Logger & root();
|
||||||
/// Returns a reference to the root logger, which is the ultimate
|
/// Returns a reference to the root logger, which is the ultimate
|
||||||
/// ancestor of all Loggers.
|
/// ancestor of all Loggers.
|
||||||
@ -893,13 +904,6 @@ public:
|
|||||||
/// Returns a pointer to the Logger with the given name if it
|
/// Returns a pointer to the Logger with the given name if it
|
||||||
/// exists, or a null pointer otherwise.
|
/// exists, or a null pointer otherwise.
|
||||||
|
|
||||||
static void destroy(const std::string & name);
|
|
||||||
/// Destroys the logger with the specified name. Does nothing
|
|
||||||
/// if the logger is not found.
|
|
||||||
///
|
|
||||||
/// After a logger has been destroyed, all references to it
|
|
||||||
/// become invalid.
|
|
||||||
|
|
||||||
static void shutdown();
|
static void shutdown();
|
||||||
/// Shuts down the logging framework and releases all
|
/// Shuts down the logging framework and releases all
|
||||||
/// Loggers.
|
/// Loggers.
|
||||||
@ -929,8 +933,6 @@ public:
|
|||||||
static const std::string ROOT; /// The name of the root logger ("").
|
static const std::string ROOT; /// The name of the root logger ("").
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
typedef std::map<std::string, Logger *> LoggerMap;
|
|
||||||
|
|
||||||
Logger(const std::string & name, Channel * pChannel, int level);
|
Logger(const std::string & name, Channel * pChannel, int level);
|
||||||
~Logger();
|
~Logger();
|
||||||
|
|
||||||
@ -938,6 +940,7 @@ protected:
|
|||||||
void log(const std::string & text, Message::Priority prio, const char * file, int line);
|
void log(const std::string & text, Message::Priority prio, const char * file, int line);
|
||||||
|
|
||||||
static std::string format(const std::string & fmt, int argc, std::string argv[]);
|
static std::string format(const std::string & fmt, int argc, std::string argv[]);
|
||||||
|
static Logger & unsafeCreate(const std::string & name, Channel * pChannel, int level = Message::PRIO_INFORMATION);
|
||||||
static Logger & parent(const std::string & name);
|
static Logger & parent(const std::string & name);
|
||||||
static void add(Logger * pLogger);
|
static void add(Logger * pLogger);
|
||||||
static Logger * find(const std::string & name);
|
static Logger * find(const std::string & name);
|
||||||
@ -950,9 +953,6 @@ private:
|
|||||||
std::string _name;
|
std::string _name;
|
||||||
Channel * _pChannel;
|
Channel * _pChannel;
|
||||||
std::atomic_int _level;
|
std::atomic_int _level;
|
||||||
|
|
||||||
static LoggerMap * _pLoggerMap;
|
|
||||||
static Mutex _mapMtx;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
|
@ -38,15 +38,15 @@ public:
|
|||||||
/// Creates the RefCountedObject.
|
/// Creates the RefCountedObject.
|
||||||
/// The initial reference count is one.
|
/// The initial reference count is one.
|
||||||
|
|
||||||
void duplicate() const;
|
size_t duplicate() const;
|
||||||
/// Increments the object's reference count.
|
/// Increments the object's reference count, returns reference count before call.
|
||||||
|
|
||||||
void release() const throw();
|
size_t release() const throw();
|
||||||
/// Decrements the object's reference count
|
/// Decrements the object's reference count
|
||||||
/// and deletes the object if the count
|
/// and deletes the object if the count
|
||||||
/// reaches zero.
|
/// reaches zero, returns reference count before call.
|
||||||
|
|
||||||
int referenceCount() const;
|
size_t referenceCount() const;
|
||||||
/// Returns the reference count.
|
/// Returns the reference count.
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
@ -57,36 +57,40 @@ private:
|
|||||||
RefCountedObject(const RefCountedObject &);
|
RefCountedObject(const RefCountedObject &);
|
||||||
RefCountedObject & operator=(const RefCountedObject &);
|
RefCountedObject & operator=(const RefCountedObject &);
|
||||||
|
|
||||||
mutable AtomicCounter _counter;
|
mutable std::atomic<size_t> _counter;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
//
|
//
|
||||||
// inlines
|
// inlines
|
||||||
//
|
//
|
||||||
inline int RefCountedObject::referenceCount() const
|
inline size_t RefCountedObject::referenceCount() const
|
||||||
{
|
{
|
||||||
return _counter.value();
|
return _counter.load(std::memory_order_acquire);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
inline void RefCountedObject::duplicate() const
|
inline size_t RefCountedObject::duplicate() const
|
||||||
{
|
{
|
||||||
++_counter;
|
return _counter.fetch_add(1, std::memory_order_acq_rel);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
inline void RefCountedObject::release() const throw()
|
inline size_t RefCountedObject::release() const throw()
|
||||||
{
|
{
|
||||||
|
size_t reference_count_before = _counter.fetch_sub(1, std::memory_order_acq_rel);
|
||||||
|
|
||||||
try
|
try
|
||||||
{
|
{
|
||||||
if (--_counter == 0)
|
if (reference_count_before == 1)
|
||||||
delete this;
|
delete this;
|
||||||
}
|
}
|
||||||
catch (...)
|
catch (...)
|
||||||
{
|
{
|
||||||
poco_unexpected();
|
poco_unexpected();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
return reference_count_before;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -20,12 +20,38 @@
|
|||||||
#include "Poco/NumberParser.h"
|
#include "Poco/NumberParser.h"
|
||||||
#include "Poco/String.h"
|
#include "Poco/String.h"
|
||||||
|
|
||||||
|
#include <cassert>
|
||||||
|
#include <mutex>
|
||||||
|
|
||||||
|
namespace
|
||||||
|
{
|
||||||
|
|
||||||
|
std::mutex & getLoggerMutex()
|
||||||
|
{
|
||||||
|
auto get_logger_mutex_placeholder_memory = []()
|
||||||
|
{
|
||||||
|
static char buffer[sizeof(std::mutex)]{};
|
||||||
|
return buffer;
|
||||||
|
};
|
||||||
|
|
||||||
|
static std::mutex * logger_mutex = new (get_logger_mutex_placeholder_memory()) std::mutex();
|
||||||
|
return *logger_mutex;
|
||||||
|
}
|
||||||
|
|
||||||
|
struct LoggerEntry
|
||||||
|
{
|
||||||
|
Poco::Logger * logger;
|
||||||
|
bool owned_by_shared_ptr = false;
|
||||||
|
};
|
||||||
|
|
||||||
|
using LoggerMap = std::unordered_map<std::string, LoggerEntry>;
|
||||||
|
LoggerMap * _pLoggerMap = nullptr;
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
namespace Poco {
|
namespace Poco {
|
||||||
|
|
||||||
|
|
||||||
Logger::LoggerMap* Logger::_pLoggerMap = 0;
|
|
||||||
Mutex Logger::_mapMtx;
|
|
||||||
const std::string Logger::ROOT;
|
const std::string Logger::ROOT;
|
||||||
|
|
||||||
|
|
||||||
@ -73,7 +99,7 @@ void Logger::setProperty(const std::string& name, const std::string& value)
|
|||||||
setChannel(LoggingRegistry::defaultRegistry().channelForName(value));
|
setChannel(LoggingRegistry::defaultRegistry().channelForName(value));
|
||||||
else if (name == "level")
|
else if (name == "level")
|
||||||
setLevel(value);
|
setLevel(value);
|
||||||
else
|
else
|
||||||
Channel::setProperty(name, value);
|
Channel::setProperty(name, value);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -112,17 +138,17 @@ void Logger::dump(const std::string& msg, const void* buffer, std::size_t length
|
|||||||
|
|
||||||
void Logger::setLevel(const std::string& name, int level)
|
void Logger::setLevel(const std::string& name, int level)
|
||||||
{
|
{
|
||||||
Mutex::ScopedLock lock(_mapMtx);
|
std::lock_guard<std::mutex> lock(getLoggerMutex());
|
||||||
|
|
||||||
if (_pLoggerMap)
|
if (_pLoggerMap)
|
||||||
{
|
{
|
||||||
std::string::size_type len = name.length();
|
std::string::size_type len = name.length();
|
||||||
for (LoggerMap::iterator it = _pLoggerMap->begin(); it != _pLoggerMap->end(); ++it)
|
for (auto & it : *_pLoggerMap)
|
||||||
{
|
{
|
||||||
if (len == 0 ||
|
if (len == 0 ||
|
||||||
(it->first.compare(0, len, name) == 0 && (it->first.length() == len || it->first[len] == '.')))
|
(it.first.compare(0, len, name) == 0 && (it.first.length() == len || it.first[len] == '.')))
|
||||||
{
|
{
|
||||||
it->second->setLevel(level);
|
it.second.logger->setLevel(level);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -131,17 +157,17 @@ void Logger::setLevel(const std::string& name, int level)
|
|||||||
|
|
||||||
void Logger::setChannel(const std::string& name, Channel* pChannel)
|
void Logger::setChannel(const std::string& name, Channel* pChannel)
|
||||||
{
|
{
|
||||||
Mutex::ScopedLock lock(_mapMtx);
|
std::lock_guard<std::mutex> lock(getLoggerMutex());
|
||||||
|
|
||||||
if (_pLoggerMap)
|
if (_pLoggerMap)
|
||||||
{
|
{
|
||||||
std::string::size_type len = name.length();
|
std::string::size_type len = name.length();
|
||||||
for (LoggerMap::iterator it = _pLoggerMap->begin(); it != _pLoggerMap->end(); ++it)
|
for (auto & it : *_pLoggerMap)
|
||||||
{
|
{
|
||||||
if (len == 0 ||
|
if (len == 0 ||
|
||||||
(it->first.compare(0, len, name) == 0 && (it->first.length() == len || it->first[len] == '.')))
|
(it.first.compare(0, len, name) == 0 && (it.first.length() == len || it.first[len] == '.')))
|
||||||
{
|
{
|
||||||
it->second->setChannel(pChannel);
|
it.second.logger->setChannel(pChannel);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -150,17 +176,17 @@ void Logger::setChannel(const std::string& name, Channel* pChannel)
|
|||||||
|
|
||||||
void Logger::setProperty(const std::string& loggerName, const std::string& propertyName, const std::string& value)
|
void Logger::setProperty(const std::string& loggerName, const std::string& propertyName, const std::string& value)
|
||||||
{
|
{
|
||||||
Mutex::ScopedLock lock(_mapMtx);
|
std::lock_guard<std::mutex> lock(getLoggerMutex());
|
||||||
|
|
||||||
if (_pLoggerMap)
|
if (_pLoggerMap)
|
||||||
{
|
{
|
||||||
std::string::size_type len = loggerName.length();
|
std::string::size_type len = loggerName.length();
|
||||||
for (LoggerMap::iterator it = _pLoggerMap->begin(); it != _pLoggerMap->end(); ++it)
|
for (auto & it : *_pLoggerMap)
|
||||||
{
|
{
|
||||||
if (len == 0 ||
|
if (len == 0 ||
|
||||||
(it->first.compare(0, len, loggerName) == 0 && (it->first.length() == len || it->first[len] == '.')))
|
(it.first.compare(0, len, loggerName) == 0 && (it.first.length() == len || it.first[len] == '.')))
|
||||||
{
|
{
|
||||||
it->second->setProperty(propertyName, value);
|
it.second.logger->setProperty(propertyName, value);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -280,11 +306,88 @@ void Logger::formatDump(std::string& message, const void* buffer, std::size_t le
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
namespace
|
||||||
|
{
|
||||||
|
|
||||||
|
struct LoggerDeleter
|
||||||
|
{
|
||||||
|
void operator()(Poco::Logger * logger)
|
||||||
|
{
|
||||||
|
std::lock_guard<std::mutex> lock(getLoggerMutex());
|
||||||
|
|
||||||
|
/// If logger infrastructure is destroyed just decrement logger reference count
|
||||||
|
if (!_pLoggerMap)
|
||||||
|
{
|
||||||
|
logger->release();
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
auto it = _pLoggerMap->find(logger->name());
|
||||||
|
assert(it != _pLoggerMap->end());
|
||||||
|
|
||||||
|
/** If reference count is 1, this means this shared pointer owns logger
|
||||||
|
* and need destroy it.
|
||||||
|
*/
|
||||||
|
size_t reference_count_before_release = logger->release();
|
||||||
|
if (reference_count_before_release == 1)
|
||||||
|
{
|
||||||
|
assert(it->second.owned_by_shared_ptr);
|
||||||
|
_pLoggerMap->erase(it);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
|
||||||
|
inline LoggerPtr makeLoggerPtr(Logger & logger)
|
||||||
|
{
|
||||||
|
return std::shared_ptr<Logger>(&logger, LoggerDeleter());
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
Logger& Logger::get(const std::string& name)
|
Logger& Logger::get(const std::string& name)
|
||||||
{
|
{
|
||||||
Mutex::ScopedLock lock(_mapMtx);
|
std::lock_guard<std::mutex> lock(getLoggerMutex());
|
||||||
|
|
||||||
return unsafeGet(name);
|
Logger & logger = unsafeGet(name);
|
||||||
|
|
||||||
|
/** If there are already shared pointer created for this logger
|
||||||
|
* we need to increment Logger reference count and now logger
|
||||||
|
* is owned by logger infrastructure.
|
||||||
|
*/
|
||||||
|
auto it = _pLoggerMap->find(name);
|
||||||
|
if (it->second.owned_by_shared_ptr)
|
||||||
|
{
|
||||||
|
it->second.logger->duplicate();
|
||||||
|
it->second.owned_by_shared_ptr = false;
|
||||||
|
}
|
||||||
|
|
||||||
|
return logger;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
LoggerPtr Logger::getShared(const std::string & name)
|
||||||
|
{
|
||||||
|
std::lock_guard<std::mutex> lock(getLoggerMutex());
|
||||||
|
bool logger_exists = _pLoggerMap && _pLoggerMap->contains(name);
|
||||||
|
|
||||||
|
Logger & logger = unsafeGet(name);
|
||||||
|
|
||||||
|
/** If logger already exists, then this shared pointer does not own it.
|
||||||
|
* If logger does not exists, logger infrastructure could be already destroyed
|
||||||
|
* or logger was created.
|
||||||
|
*/
|
||||||
|
if (logger_exists)
|
||||||
|
{
|
||||||
|
logger.duplicate();
|
||||||
|
}
|
||||||
|
else if (_pLoggerMap)
|
||||||
|
{
|
||||||
|
_pLoggerMap->find(name)->second.owned_by_shared_ptr = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
return makeLoggerPtr(logger);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -310,18 +413,24 @@ Logger& Logger::unsafeGet(const std::string& name)
|
|||||||
|
|
||||||
Logger& Logger::create(const std::string& name, Channel* pChannel, int level)
|
Logger& Logger::create(const std::string& name, Channel* pChannel, int level)
|
||||||
{
|
{
|
||||||
Mutex::ScopedLock lock(_mapMtx);
|
std::lock_guard<std::mutex> lock(getLoggerMutex());
|
||||||
|
|
||||||
if (find(name)) throw ExistsException();
|
return unsafeCreate(name, pChannel, level);
|
||||||
Logger* pLogger = new Logger(name, pChannel, level);
|
|
||||||
add(pLogger);
|
|
||||||
return *pLogger;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
LoggerPtr Logger::createShared(const std::string & name, Channel * pChannel, int level)
|
||||||
|
{
|
||||||
|
std::lock_guard<std::mutex> lock(getLoggerMutex());
|
||||||
|
|
||||||
|
Logger & logger = unsafeCreate(name, pChannel, level);
|
||||||
|
_pLoggerMap->find(name)->second.owned_by_shared_ptr = true;
|
||||||
|
|
||||||
|
return makeLoggerPtr(logger);
|
||||||
|
}
|
||||||
|
|
||||||
Logger& Logger::root()
|
Logger& Logger::root()
|
||||||
{
|
{
|
||||||
Mutex::ScopedLock lock(_mapMtx);
|
std::lock_guard<std::mutex> lock(getLoggerMutex());
|
||||||
|
|
||||||
return unsafeGet(ROOT);
|
return unsafeGet(ROOT);
|
||||||
}
|
}
|
||||||
@ -329,7 +438,7 @@ Logger& Logger::root()
|
|||||||
|
|
||||||
Logger* Logger::has(const std::string& name)
|
Logger* Logger::has(const std::string& name)
|
||||||
{
|
{
|
||||||
Mutex::ScopedLock lock(_mapMtx);
|
std::lock_guard<std::mutex> lock(getLoggerMutex());
|
||||||
|
|
||||||
return find(name);
|
return find(name);
|
||||||
}
|
}
|
||||||
@ -337,14 +446,18 @@ Logger* Logger::has(const std::string& name)
|
|||||||
|
|
||||||
void Logger::shutdown()
|
void Logger::shutdown()
|
||||||
{
|
{
|
||||||
Mutex::ScopedLock lock(_mapMtx);
|
std::lock_guard<std::mutex> lock(getLoggerMutex());
|
||||||
|
|
||||||
if (_pLoggerMap)
|
if (_pLoggerMap)
|
||||||
{
|
{
|
||||||
for (LoggerMap::iterator it = _pLoggerMap->begin(); it != _pLoggerMap->end(); ++it)
|
for (auto & it : *_pLoggerMap)
|
||||||
{
|
{
|
||||||
it->second->release();
|
if (it.second.owned_by_shared_ptr)
|
||||||
|
continue;
|
||||||
|
|
||||||
|
it.second.logger->release();
|
||||||
}
|
}
|
||||||
|
|
||||||
delete _pLoggerMap;
|
delete _pLoggerMap;
|
||||||
_pLoggerMap = 0;
|
_pLoggerMap = 0;
|
||||||
}
|
}
|
||||||
@ -357,31 +470,15 @@ Logger* Logger::find(const std::string& name)
|
|||||||
{
|
{
|
||||||
LoggerMap::iterator it = _pLoggerMap->find(name);
|
LoggerMap::iterator it = _pLoggerMap->find(name);
|
||||||
if (it != _pLoggerMap->end())
|
if (it != _pLoggerMap->end())
|
||||||
return it->second;
|
return it->second.logger;
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void Logger::destroy(const std::string& name)
|
|
||||||
{
|
|
||||||
Mutex::ScopedLock lock(_mapMtx);
|
|
||||||
|
|
||||||
if (_pLoggerMap)
|
|
||||||
{
|
|
||||||
LoggerMap::iterator it = _pLoggerMap->find(name);
|
|
||||||
if (it != _pLoggerMap->end())
|
|
||||||
{
|
|
||||||
it->second->release();
|
|
||||||
_pLoggerMap->erase(it);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
void Logger::names(std::vector<std::string>& names)
|
void Logger::names(std::vector<std::string>& names)
|
||||||
{
|
{
|
||||||
Mutex::ScopedLock lock(_mapMtx);
|
std::lock_guard<std::mutex> lock(getLoggerMutex());
|
||||||
|
|
||||||
names.clear();
|
names.clear();
|
||||||
if (_pLoggerMap)
|
if (_pLoggerMap)
|
||||||
@ -393,6 +490,14 @@ void Logger::names(std::vector<std::string>& names)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Logger& Logger::unsafeCreate(const std::string & name, Channel * pChannel, int level)
|
||||||
|
{
|
||||||
|
if (find(name)) throw ExistsException();
|
||||||
|
Logger* pLogger = new Logger(name, pChannel, level);
|
||||||
|
add(pLogger);
|
||||||
|
|
||||||
|
return *pLogger;
|
||||||
|
}
|
||||||
|
|
||||||
Logger& Logger::parent(const std::string& name)
|
Logger& Logger::parent(const std::string& name)
|
||||||
{
|
{
|
||||||
@ -478,7 +583,8 @@ void Logger::add(Logger* pLogger)
|
|||||||
{
|
{
|
||||||
if (!_pLoggerMap)
|
if (!_pLoggerMap)
|
||||||
_pLoggerMap = new LoggerMap;
|
_pLoggerMap = new LoggerMap;
|
||||||
_pLoggerMap->insert(LoggerMap::value_type(pLogger->name(), pLogger));
|
|
||||||
|
_pLoggerMap->emplace(pLogger->name(), LoggerEntry{pLogger, false /*owned_by_shared_ptr*/});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -2,11 +2,11 @@
|
|||||||
|
|
||||||
# NOTE: has nothing common with DBMS_TCP_PROTOCOL_VERSION,
|
# NOTE: has nothing common with DBMS_TCP_PROTOCOL_VERSION,
|
||||||
# only DBMS_TCP_PROTOCOL_VERSION should be incremented on protocol changes.
|
# only DBMS_TCP_PROTOCOL_VERSION should be incremented on protocol changes.
|
||||||
SET(VERSION_REVISION 54482)
|
SET(VERSION_REVISION 54483)
|
||||||
SET(VERSION_MAJOR 24)
|
SET(VERSION_MAJOR 24)
|
||||||
SET(VERSION_MINOR 1)
|
SET(VERSION_MINOR 2)
|
||||||
SET(VERSION_PATCH 1)
|
SET(VERSION_PATCH 1)
|
||||||
SET(VERSION_GITHASH a2faa65b080a587026c86844f3a20c74d23a86f8)
|
SET(VERSION_GITHASH 5a024dfc0936e062770d0cfaad0805b57c1fba17)
|
||||||
SET(VERSION_DESCRIBE v24.1.1.1-testing)
|
SET(VERSION_DESCRIBE v24.2.1.1-testing)
|
||||||
SET(VERSION_STRING 24.1.1.1)
|
SET(VERSION_STRING 24.2.1.1)
|
||||||
# end of autochange
|
# end of autochange
|
||||||
|
@ -79,7 +79,10 @@ if (SANITIZE_COVERAGE)
|
|||||||
|
|
||||||
# But the actual coverage will be enabled on per-library basis: for ClickHouse code, but not for 3rd-party.
|
# But the actual coverage will be enabled on per-library basis: for ClickHouse code, but not for 3rd-party.
|
||||||
set (COVERAGE_FLAGS "-fsanitize-coverage=trace-pc-guard,pc-table")
|
set (COVERAGE_FLAGS "-fsanitize-coverage=trace-pc-guard,pc-table")
|
||||||
endif()
|
|
||||||
|
|
||||||
set (WITHOUT_COVERAGE_FLAGS "-fno-profile-instr-generate -fno-coverage-mapping -fno-sanitize-coverage=trace-pc-guard,pc-table")
|
set (WITHOUT_COVERAGE_FLAGS "-fno-profile-instr-generate -fno-coverage-mapping -fno-sanitize-coverage=trace-pc-guard,pc-table")
|
||||||
set (WITHOUT_COVERAGE_FLAGS_LIST -fno-profile-instr-generate -fno-coverage-mapping -fno-sanitize-coverage=trace-pc-guard,pc-table)
|
set (WITHOUT_COVERAGE_FLAGS_LIST -fno-profile-instr-generate -fno-coverage-mapping -fno-sanitize-coverage=trace-pc-guard,pc-table)
|
||||||
|
else()
|
||||||
|
set (WITHOUT_COVERAGE_FLAGS "")
|
||||||
|
set (WITHOUT_COVERAGE_FLAGS_LIST "")
|
||||||
|
endif()
|
||||||
|
@ -1,8 +1,5 @@
|
|||||||
if (NOT ENABLE_LIBRARIES)
|
if (NOT ENABLE_LIBRARIES)
|
||||||
set(DEFAULT_ENABLE_RUST FALSE)
|
set(DEFAULT_ENABLE_RUST FALSE)
|
||||||
elseif((CMAKE_TOOLCHAIN_FILE MATCHES "darwin") AND (CMAKE_TOOLCHAIN_FILE MATCHES "aarch64"))
|
|
||||||
message(STATUS "Rust is not available on aarch64-apple-darwin")
|
|
||||||
set(DEFAULT_ENABLE_RUST FALSE)
|
|
||||||
else()
|
else()
|
||||||
list (APPEND CMAKE_MODULE_PATH "${ClickHouse_SOURCE_DIR}/contrib/corrosion/cmake")
|
list (APPEND CMAKE_MODULE_PATH "${ClickHouse_SOURCE_DIR}/contrib/corrosion/cmake")
|
||||||
find_package(Rust)
|
find_package(Rust)
|
||||||
@ -19,7 +16,9 @@ message(STATUS "Checking Rust toolchain for current target")
|
|||||||
|
|
||||||
# See https://doc.rust-lang.org/nightly/rustc/platform-support.html
|
# See https://doc.rust-lang.org/nightly/rustc/platform-support.html
|
||||||
|
|
||||||
if((CMAKE_TOOLCHAIN_FILE MATCHES "linux/toolchain-x86_64") AND (CMAKE_TOOLCHAIN_FILE MATCHES "musl"))
|
if(CMAKE_TOOLCHAIN_FILE MATCHES "ppc64le")
|
||||||
|
set(Rust_CARGO_TARGET "powerpc64le-unknown-linux-gnu")
|
||||||
|
elseif((CMAKE_TOOLCHAIN_FILE MATCHES "linux/toolchain-x86_64") AND (CMAKE_TOOLCHAIN_FILE MATCHES "musl"))
|
||||||
set(Rust_CARGO_TARGET "x86_64-unknown-linux-musl")
|
set(Rust_CARGO_TARGET "x86_64-unknown-linux-musl")
|
||||||
elseif(CMAKE_TOOLCHAIN_FILE MATCHES "linux/toolchain-x86_64")
|
elseif(CMAKE_TOOLCHAIN_FILE MATCHES "linux/toolchain-x86_64")
|
||||||
set(Rust_CARGO_TARGET "x86_64-unknown-linux-gnu")
|
set(Rust_CARGO_TARGET "x86_64-unknown-linux-gnu")
|
||||||
@ -29,14 +28,14 @@ elseif(CMAKE_TOOLCHAIN_FILE MATCHES "linux/toolchain-aarch64")
|
|||||||
set(Rust_CARGO_TARGET "aarch64-unknown-linux-gnu")
|
set(Rust_CARGO_TARGET "aarch64-unknown-linux-gnu")
|
||||||
elseif((CMAKE_TOOLCHAIN_FILE MATCHES "darwin") AND (CMAKE_TOOLCHAIN_FILE MATCHES "x86_64"))
|
elseif((CMAKE_TOOLCHAIN_FILE MATCHES "darwin") AND (CMAKE_TOOLCHAIN_FILE MATCHES "x86_64"))
|
||||||
set(Rust_CARGO_TARGET "x86_64-apple-darwin")
|
set(Rust_CARGO_TARGET "x86_64-apple-darwin")
|
||||||
|
elseif((CMAKE_TOOLCHAIN_FILE MATCHES "darwin") AND (CMAKE_TOOLCHAIN_FILE MATCHES "aarch64"))
|
||||||
|
set(Rust_CARGO_TARGET "aarch64-apple-darwin")
|
||||||
elseif((CMAKE_TOOLCHAIN_FILE MATCHES "freebsd") AND (CMAKE_TOOLCHAIN_FILE MATCHES "x86_64"))
|
elseif((CMAKE_TOOLCHAIN_FILE MATCHES "freebsd") AND (CMAKE_TOOLCHAIN_FILE MATCHES "x86_64"))
|
||||||
set(Rust_CARGO_TARGET "x86_64-unknown-freebsd")
|
set(Rust_CARGO_TARGET "x86_64-unknown-freebsd")
|
||||||
elseif(CMAKE_TOOLCHAIN_FILE MATCHES "linux/toolchain-riscv64")
|
elseif(CMAKE_TOOLCHAIN_FILE MATCHES "linux/toolchain-riscv64")
|
||||||
set(Rust_CARGO_TARGET "riscv64gc-unknown-linux-gnu")
|
set(Rust_CARGO_TARGET "riscv64gc-unknown-linux-gnu")
|
||||||
endif()
|
else()
|
||||||
|
message(FATAL_ERROR "Unsupported rust target")
|
||||||
if(CMAKE_TOOLCHAIN_FILE MATCHES "ppc64le")
|
|
||||||
set(Rust_CARGO_TARGET "powerpc64le-unknown-linux-gnu")
|
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
message(STATUS "Switched Rust target to ${Rust_CARGO_TARGET}")
|
message(STATUS "Switched Rust target to ${Rust_CARGO_TARGET}")
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
option (ENABLE_SSH "Enable support for SSH keys and protocol" ON)
|
option (ENABLE_SSH "Enable support for SSH keys and protocol" ${ENABLE_LIBRARIES})
|
||||||
|
|
||||||
if (NOT ENABLE_SSH)
|
if (NOT ENABLE_SSH)
|
||||||
message(STATUS "Not using SSH")
|
message(STATUS "Not using SSH")
|
||||||
|
2
contrib/simdjson
vendored
2
contrib/simdjson
vendored
@ -1 +1 @@
|
|||||||
Subproject commit 1075e8609c4afa253162d441437af929c29e31bb
|
Subproject commit 6060be2fdf62edf4a8f51a8b0883d57d09397b30
|
@ -22,7 +22,7 @@ RUN apt-get update \
|
|||||||
zstd \
|
zstd \
|
||||||
--yes --no-install-recommends
|
--yes --no-install-recommends
|
||||||
|
|
||||||
RUN pip3 install numpy scipy pandas Jinja2
|
RUN pip3 install numpy==1.26.3 scipy==1.12.0 pandas==1.5.3 Jinja2==3.1.3
|
||||||
|
|
||||||
ARG odbc_driver_url="https://github.com/ClickHouse/clickhouse-odbc/releases/download/v1.1.4.20200302/clickhouse-odbc-1.1.4-Linux.tar.gz"
|
ARG odbc_driver_url="https://github.com/ClickHouse/clickhouse-odbc/releases/download/v1.1.4.20200302/clickhouse-odbc-1.1.4-Linux.tar.gz"
|
||||||
|
|
||||||
|
@ -23,13 +23,15 @@ if [ -f /sys/fs/cgroup/cgroup.controllers ]; then
|
|||||||
> /sys/fs/cgroup/cgroup.subtree_control
|
> /sys/fs/cgroup/cgroup.subtree_control
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# In case of test hung it is convenient to use pytest --pdb to debug it,
|
|
||||||
# and on hung you can simply press Ctrl-C and it will spawn a python pdb,
|
|
||||||
# but on SIGINT dockerd will exit, so ignore it to preserve the daemon.
|
|
||||||
trap '' INT
|
|
||||||
# Binding to an IP address without --tlsverify is deprecated. Startup is intentionally being slowed
|
# Binding to an IP address without --tlsverify is deprecated. Startup is intentionally being slowed
|
||||||
# unless --tls=false or --tlsverify=false is set
|
# unless --tls=false or --tlsverify=false is set
|
||||||
dockerd --host=unix:///var/run/docker.sock --tls=false --host=tcp://0.0.0.0:2375 --default-address-pool base=172.17.0.0/12,size=24 &>/ClickHouse/tests/integration/dockerd.log &
|
#
|
||||||
|
# In case of test hung it is convenient to use pytest --pdb to debug it,
|
||||||
|
# and on hung you can simply press Ctrl-C and it will spawn a python pdb,
|
||||||
|
# but on SIGINT dockerd will exit, so we spawn new session to ignore SIGINT by
|
||||||
|
# docker.
|
||||||
|
# Note, that if you will run it via runner, it will send SIGINT to docker anyway.
|
||||||
|
setsid dockerd --host=unix:///var/run/docker.sock --tls=false --host=tcp://0.0.0.0:2375 --default-address-pool base=172.17.0.0/12,size=24 &>/ClickHouse/tests/integration/dockerd.log &
|
||||||
|
|
||||||
set +e
|
set +e
|
||||||
reties=0
|
reties=0
|
||||||
|
@ -246,16 +246,19 @@ clickhouse-client -q "system flush logs" ||:
|
|||||||
stop_logs_replication
|
stop_logs_replication
|
||||||
|
|
||||||
# Try to get logs while server is running
|
# Try to get logs while server is running
|
||||||
successfuly_saved=0
|
failed_to_save_logs=0
|
||||||
for table in query_log zookeeper_log trace_log transactions_info_log
|
for table in query_log zookeeper_log trace_log transactions_info_log metric_log
|
||||||
do
|
do
|
||||||
clickhouse-client -q "select * from system.$table format TSVWithNamesAndTypes" | zstd --threads=0 > /test_output/$table.tsv.zst
|
err=$( { clickhouse-client -q "select * from system.$table format TSVWithNamesAndTypes" | zstd --threads=0 > /test_output/$table.tsv.zst; } 2>&1 )
|
||||||
successfuly_saved=$?
|
echo "$err"
|
||||||
|
[[ "0" != "${#err}" ]] && failed_to_save_logs=1
|
||||||
if [[ -n "$USE_DATABASE_REPLICATED" ]] && [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]; then
|
if [[ -n "$USE_DATABASE_REPLICATED" ]] && [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]; then
|
||||||
clickhouse-client -q "select * from system.$table format TSVWithNamesAndTypes" | zstd --threads=0 > /test_output/$table.1.tsv.zst
|
err=$( { clickhouse-client -q "select * from system.$table format TSVWithNamesAndTypes" | zstd --threads=0 > /test_output/$table.1.tsv.zst; } 2>&1 )
|
||||||
successfuly_saved=$((successfuly_saved | $?))
|
echo "$err"
|
||||||
clickhouse-client -q "select * from system.$table format TSVWithNamesAndTypes" | zstd --threads=0 > /test_output/$table.2.tsv.zst
|
[[ "0" != "${#err}" ]] && failed_to_save_logs=1
|
||||||
successfuly_saved=$((successfuly_saved | $?))
|
err=$( { clickhouse-client -q "select * from system.$table format TSVWithNamesAndTypes" | zstd --threads=0 > /test_output/$table.2.tsv.zst; } 2>&1 )
|
||||||
|
echo "$err"
|
||||||
|
[[ "0" != "${#err}" ]] && failed_to_save_logs=1
|
||||||
fi
|
fi
|
||||||
done
|
done
|
||||||
|
|
||||||
@ -280,7 +283,7 @@ fi
|
|||||||
|
|
||||||
|
|
||||||
# If server crashed dump system logs with clickhouse-local
|
# If server crashed dump system logs with clickhouse-local
|
||||||
if [ $successfuly_saved -ne 0 ]; then
|
if [ $failed_to_save_logs -ne 0 ]; then
|
||||||
# Compress tables.
|
# Compress tables.
|
||||||
#
|
#
|
||||||
# NOTE:
|
# NOTE:
|
||||||
@ -288,7 +291,7 @@ if [ $successfuly_saved -ne 0 ]; then
|
|||||||
# directly
|
# directly
|
||||||
# - even though ci auto-compress some files (but not *.tsv) it does this only
|
# - even though ci auto-compress some files (but not *.tsv) it does this only
|
||||||
# for files >64MB, we want this files to be compressed explicitly
|
# for files >64MB, we want this files to be compressed explicitly
|
||||||
for table in query_log zookeeper_log trace_log transactions_info_log
|
for table in query_log zookeeper_log trace_log transactions_info_log metric_log
|
||||||
do
|
do
|
||||||
clickhouse-local "$data_path_config" --only-system-tables -q "select * from system.$table format TSVWithNamesAndTypes" | zstd --threads=0 > /test_output/$table.tsv.zst ||:
|
clickhouse-local "$data_path_config" --only-system-tables -q "select * from system.$table format TSVWithNamesAndTypes" | zstd --threads=0 > /test_output/$table.tsv.zst ||:
|
||||||
if [[ -n "$USE_DATABASE_REPLICATED" ]] && [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]; then
|
if [[ -n "$USE_DATABASE_REPLICATED" ]] && [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]; then
|
||||||
|
@ -78,6 +78,8 @@ function configure()
|
|||||||
randomize_config_boolean_value use_compression zookeeper
|
randomize_config_boolean_value use_compression zookeeper
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
randomize_config_boolean_value allow_experimental_block_number_column block_number
|
||||||
|
|
||||||
# for clickhouse-server (via service)
|
# for clickhouse-server (via service)
|
||||||
echo "ASAN_OPTIONS='malloc_context_size=10 verbosity=1 allocator_release_to_os_interval_ms=10000'" >> /etc/environment
|
echo "ASAN_OPTIONS='malloc_context_size=10 verbosity=1 allocator_release_to_os_interval_ms=10000'" >> /etc/environment
|
||||||
# for clickhouse-client
|
# for clickhouse-client
|
||||||
|
@ -122,6 +122,7 @@ rm /etc/clickhouse-server/config.d/merge_tree.xml
|
|||||||
rm /etc/clickhouse-server/config.d/enable_wait_for_shutdown_replicated_tables.xml
|
rm /etc/clickhouse-server/config.d/enable_wait_for_shutdown_replicated_tables.xml
|
||||||
rm /etc/clickhouse-server/config.d/zero_copy_destructive_operations.xml
|
rm /etc/clickhouse-server/config.d/zero_copy_destructive_operations.xml
|
||||||
rm /etc/clickhouse-server/config.d/storage_conf_02963.xml
|
rm /etc/clickhouse-server/config.d/storage_conf_02963.xml
|
||||||
|
rm /etc/clickhouse-server/config.d/block_number.xml
|
||||||
rm /etc/clickhouse-server/users.d/nonconst_timezone.xml
|
rm /etc/clickhouse-server/users.d/nonconst_timezone.xml
|
||||||
rm /etc/clickhouse-server/users.d/s3_cache_new.xml
|
rm /etc/clickhouse-server/users.d/s3_cache_new.xml
|
||||||
rm /etc/clickhouse-server/users.d/replicated_ddl_entry.xml
|
rm /etc/clickhouse-server/users.d/replicated_ddl_entry.xml
|
||||||
|
@ -11,6 +11,7 @@ sidebar_label: 2023
|
|||||||
* Remove the `status_info` configuration option and dictionaries status from the default Prometheus handler. [#54090](https://github.com/ClickHouse/ClickHouse/pull/54090) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
* Remove the `status_info` configuration option and dictionaries status from the default Prometheus handler. [#54090](https://github.com/ClickHouse/ClickHouse/pull/54090) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
* The experimental parts metadata cache is removed from the codebase. [#54215](https://github.com/ClickHouse/ClickHouse/pull/54215) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
* The experimental parts metadata cache is removed from the codebase. [#54215](https://github.com/ClickHouse/ClickHouse/pull/54215) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
* Disable setting `input_format_json_try_infer_numbers_from_strings` by default, so we don't try to infer numbers from strings in JSON formats by default to avoid possible parsing errors when sample data contains strings that looks like a number. [#55099](https://github.com/ClickHouse/ClickHouse/pull/55099) ([Kruglov Pavel](https://github.com/Avogar)).
|
* Disable setting `input_format_json_try_infer_numbers_from_strings` by default, so we don't try to infer numbers from strings in JSON formats by default to avoid possible parsing errors when sample data contains strings that looks like a number. [#55099](https://github.com/ClickHouse/ClickHouse/pull/55099) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* IPv6 bloom filter indexes created prior to March 2023 are not compatible with current version and have to be rebuilt. [#54200](https://github.com/ClickHouse/ClickHouse/pull/54200) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
|
||||||
|
|
||||||
#### New Feature
|
#### New Feature
|
||||||
* Added new type of authentication based on SSH keys. It works only for Native TCP protocol. [#41109](https://github.com/ClickHouse/ClickHouse/pull/41109) ([George Gamezardashvili](https://github.com/InfJoker)).
|
* Added new type of authentication based on SSH keys. It works only for Native TCP protocol. [#41109](https://github.com/ClickHouse/ClickHouse/pull/41109) ([George Gamezardashvili](https://github.com/InfJoker)).
|
||||||
|
@ -109,6 +109,9 @@ Do not check for a particular wording of error message, it may change in the fut
|
|||||||
|
|
||||||
If you want to use distributed queries in functional tests, you can leverage `remote` table function with `127.0.0.{1..2}` addresses for the server to query itself; or you can use predefined test clusters in server configuration file like `test_shard_localhost`. Remember to add the words `shard` or `distributed` to the test name, so that it is run in CI in correct configurations, where the server is configured to support distributed queries.
|
If you want to use distributed queries in functional tests, you can leverage `remote` table function with `127.0.0.{1..2}` addresses for the server to query itself; or you can use predefined test clusters in server configuration file like `test_shard_localhost`. Remember to add the words `shard` or `distributed` to the test name, so that it is run in CI in correct configurations, where the server is configured to support distributed queries.
|
||||||
|
|
||||||
|
### Working with Temporary Files
|
||||||
|
|
||||||
|
Sometimes in a shell test you may need to create a file on the fly to work with. Keep in mind that some CI checks run tests in parallel, so if you are creating or removing a temporary file in your script without a unique name this can cause some of the CI checks, such as Flaky, to fail. To get around this you should use environment variable `$CLICKHOUSE_TEST_UNIQUE_NAME` to give temporary files a name unique to the test that is running. That way you can be sure that the file you are creating during setup or removing during cleanup is the file only in use by that test and not some other test which is running in parallel.
|
||||||
|
|
||||||
## Known Bugs {#known-bugs}
|
## Known Bugs {#known-bugs}
|
||||||
|
|
||||||
|
@ -2040,6 +2040,32 @@ SELECT * FROM test_table
|
|||||||
└───┘
|
└───┘
|
||||||
```
|
```
|
||||||
|
|
||||||
|
## update_insert_deduplication_token_in_dependent_materialized_views {#update-insert-deduplication-token-in-dependent-materialized-views}
|
||||||
|
|
||||||
|
Allows to update `insert_deduplication_token` with table identifier during insert in dependent materialized views, if setting `deduplicate_blocks_in_dependent_materialized_views` is enabled and `insert_deduplication_token` is set.
|
||||||
|
|
||||||
|
Possible values:
|
||||||
|
|
||||||
|
0 — Disabled.
|
||||||
|
1 — Enabled.
|
||||||
|
|
||||||
|
Default value: 0.
|
||||||
|
|
||||||
|
Usage:
|
||||||
|
|
||||||
|
If setting `deduplicate_blocks_in_dependent_materialized_views` is enabled, `insert_deduplication_token` is passed to dependent materialized views. But in complex INSERT flows it is possible that we want to avoid deduplication for dependent materialized views.
|
||||||
|
|
||||||
|
Example:
|
||||||
|
```
|
||||||
|
landing -┬--> mv_1_1 ---> ds_1_1 ---> mv_2_1 --┬-> ds_2_1 ---> mv_3_1 ---> ds_3_1
|
||||||
|
| |
|
||||||
|
└--> mv_1_2 ---> ds_1_2 ---> mv_2_2 --┘
|
||||||
|
```
|
||||||
|
|
||||||
|
In this example we want to avoid deduplication for two different blocks generated from `mv_2_1` and `mv_2_2` that will be inserted into `ds_2_1`. Without `update_insert_deduplication_token_in_dependent_materialized_views` setting enabled, those two different blocks will be deduplicated, because different blocks from `mv_2_1` and `mv_2_2` will have the same `insert_deduplication_token`.
|
||||||
|
|
||||||
|
If setting `update_insert_deduplication_token_in_dependent_materialized_views` is enabled, during each insert into dependent materialized views `insert_deduplication_token` is updated with table identifier, so block from `mv_2_1` and block from `mv_2_2` will have different `insert_deduplication_token` and will not be deduplicated.
|
||||||
|
|
||||||
## insert_keeper_max_retries
|
## insert_keeper_max_retries
|
||||||
|
|
||||||
The setting sets the maximum number of retries for ClickHouse Keeper (or ZooKeeper) requests during insert into replicated MergeTree. Only Keeper requests which failed due to network error, Keeper session timeout, or request timeout are considered for retries.
|
The setting sets the maximum number of retries for ClickHouse Keeper (or ZooKeeper) requests during insert into replicated MergeTree. Only Keeper requests which failed due to network error, Keeper session timeout, or request timeout are considered for retries.
|
||||||
@ -5165,7 +5191,7 @@ SETTINGS(dictionary_use_async_executor=1, max_threads=8);
|
|||||||
## storage_metadata_write_full_object_key {#storage_metadata_write_full_object_key}
|
## storage_metadata_write_full_object_key {#storage_metadata_write_full_object_key}
|
||||||
|
|
||||||
When set to `true` the metadata files are written with `VERSION_FULL_OBJECT_KEY` format version. With that format full object storage key names are written to the metadata files.
|
When set to `true` the metadata files are written with `VERSION_FULL_OBJECT_KEY` format version. With that format full object storage key names are written to the metadata files.
|
||||||
When set to `false` the metadata files are written with the previous format version, `VERSION_INLINE_DATA`. With that format only suffixes of object storage key names are are written to the metadata files. The prefix for all of object storage key names is set in configurations files at `storage_configuration.disks` section.
|
When set to `false` the metadata files are written with the previous format version, `VERSION_INLINE_DATA`. With that format only suffixes of object storage key names are are written to the metadata files. The prefix for all of object storage key names is set in configurations files at `storage_configuration.disks` section.
|
||||||
|
|
||||||
Default value: `false`.
|
Default value: `false`.
|
||||||
|
|
||||||
@ -5176,6 +5202,95 @@ When set to `false` than all attempts are made with identical timeouts.
|
|||||||
|
|
||||||
Default value: `true`.
|
Default value: `true`.
|
||||||
|
|
||||||
|
## allow_experimental_variant_type {#allow_experimental_variant_type}
|
||||||
|
|
||||||
|
Allows creation of experimental [Variant](../../sql-reference/data-types/variant.md).
|
||||||
|
|
||||||
|
Default value: `false`.
|
||||||
|
|
||||||
|
## use_variant_as_common_type {#use_variant_as_common_type}
|
||||||
|
|
||||||
|
Allows to use `Variant` type as a result type for [if](../../sql-reference/functions/conditional-functions.md/#if)/[multiIf](../../sql-reference/functions/conditional-functions.md/#multiif)/[array](../../sql-reference/functions/array-functions.md)/[map](../../sql-reference/functions/tuple-map-functions.md) functions when there is no common type for argument types.
|
||||||
|
|
||||||
|
Example:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SET use_variant_as_common_type = 1;
|
||||||
|
SELECT toTypeName(if(number % 2, number, range(number))) as variant_type FROM numbers(1);
|
||||||
|
SELECT if(number % 2, number, range(number)) as variant FROM numbers(5);
|
||||||
|
```
|
||||||
|
|
||||||
|
```text
|
||||||
|
┌─variant_type───────────────────┐
|
||||||
|
│ Variant(Array(UInt64), UInt64) │
|
||||||
|
└────────────────────────────────┘
|
||||||
|
┌─variant───┐
|
||||||
|
│ [] │
|
||||||
|
│ 1 │
|
||||||
|
│ [0,1] │
|
||||||
|
│ 3 │
|
||||||
|
│ [0,1,2,3] │
|
||||||
|
└───────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SET use_variant_as_common_type = 1;
|
||||||
|
SELECT toTypeName(multiIf((number % 4) = 0, 42, (number % 4) = 1, [1, 2, 3], (number % 4) = 2, 'Hello, World!', NULL)) AS variant_type FROM numbers(1);
|
||||||
|
SELECT multiIf((number % 4) = 0, 42, (number % 4) = 1, [1, 2, 3], (number % 4) = 2, 'Hello, World!', NULL) AS variant FROM numbers(4);
|
||||||
|
```
|
||||||
|
|
||||||
|
```text
|
||||||
|
─variant_type─────────────────────────┐
|
||||||
|
│ Variant(Array(UInt8), String, UInt8) │
|
||||||
|
└──────────────────────────────────────┘
|
||||||
|
|
||||||
|
┌─variant───────┐
|
||||||
|
│ 42 │
|
||||||
|
│ [1,2,3] │
|
||||||
|
│ Hello, World! │
|
||||||
|
│ ᴺᵁᴸᴸ │
|
||||||
|
└───────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SET use_variant_as_common_type = 1;
|
||||||
|
SELECT toTypeName(array(range(number), number, 'str_' || toString(number))) as array_of_variants_type from numbers(1);
|
||||||
|
SELECT array(range(number), number, 'str_' || toString(number)) as array_of_variants FROM numbers(3);
|
||||||
|
```
|
||||||
|
|
||||||
|
```text
|
||||||
|
┌─array_of_variants_type────────────────────────┐
|
||||||
|
│ Array(Variant(Array(UInt64), String, UInt64)) │
|
||||||
|
└───────────────────────────────────────────────┘
|
||||||
|
|
||||||
|
┌─array_of_variants─┐
|
||||||
|
│ [[],0,'str_0'] │
|
||||||
|
│ [[0],1,'str_1'] │
|
||||||
|
│ [[0,1],2,'str_2'] │
|
||||||
|
└───────────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SET use_variant_as_common_type = 1;
|
||||||
|
SELECT toTypeName(map('a', range(number), 'b', number, 'c', 'str_' || toString(number))) as map_of_variants_type from numbers(1);
|
||||||
|
SELECT map('a', range(number), 'b', number, 'c', 'str_' || toString(number)) as map_of_variants FROM numbers(3);
|
||||||
|
```
|
||||||
|
|
||||||
|
```text
|
||||||
|
┌─map_of_variants_type────────────────────────────────┐
|
||||||
|
│ Map(String, Variant(Array(UInt64), String, UInt64)) │
|
||||||
|
└─────────────────────────────────────────────────────┘
|
||||||
|
|
||||||
|
┌─map_of_variants───────────────┐
|
||||||
|
│ {'a':[],'b':0,'c':'str_0'} │
|
||||||
|
│ {'a':[0],'b':1,'c':'str_1'} │
|
||||||
|
│ {'a':[0,1],'b':2,'c':'str_2'} │
|
||||||
|
└───────────────────────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
Default value: `false`.
|
||||||
|
|
||||||
## max_partition_size_to_drop
|
## max_partition_size_to_drop
|
||||||
|
|
||||||
Restriction on dropping partitions in query time.
|
Restriction on dropping partitions in query time.
|
||||||
@ -5197,3 +5312,13 @@ The value 0 means that you can delete all tables without any restrictions.
|
|||||||
:::note
|
:::note
|
||||||
This query setting overwrites its server setting equivalent, see [max_table_size_to_drop](/docs/en/operations/server-configuration-parameters/settings.md/#max-table-size-to-drop)
|
This query setting overwrites its server setting equivalent, see [max_table_size_to_drop](/docs/en/operations/server-configuration-parameters/settings.md/#max-table-size-to-drop)
|
||||||
:::
|
:::
|
||||||
|
|
||||||
|
## iceberg_engine_ignore_schema_evolution {#iceberg_engine_ignore_schema_evolution}
|
||||||
|
|
||||||
|
Allow to ignore schema evolution in Iceberg table engine and read all data using schema specified by the user on table creation or latest schema parsed from metadata on table creation.
|
||||||
|
|
||||||
|
:::note
|
||||||
|
Enabling this setting can lead to incorrect result as in case of evolved schema all data files will be read using the same schema.
|
||||||
|
:::
|
||||||
|
|
||||||
|
Default value: 'false'.
|
@ -287,7 +287,7 @@ Number of threads in the HashedDictionary thread pool running a task.
|
|||||||
|
|
||||||
### IOPrefetchThreads
|
### IOPrefetchThreads
|
||||||
|
|
||||||
Number of threads in the IO prefertch thread pool.
|
Number of threads in the IO prefetch thread pool.
|
||||||
|
|
||||||
### IOPrefetchThreadsActive
|
### IOPrefetchThreadsActive
|
||||||
|
|
||||||
|
@ -88,7 +88,7 @@ ClickHouse-specific aggregate functions:
|
|||||||
- [quantileTDigestWeighted](/docs/en/sql-reference/aggregate-functions/reference/quantiletdigestweighted.md)
|
- [quantileTDigestWeighted](/docs/en/sql-reference/aggregate-functions/reference/quantiletdigestweighted.md)
|
||||||
- [quantileBFloat16](/docs/en/sql-reference/aggregate-functions/reference/quantilebfloat16.md#quantilebfloat16)
|
- [quantileBFloat16](/docs/en/sql-reference/aggregate-functions/reference/quantilebfloat16.md#quantilebfloat16)
|
||||||
- [quantileBFloat16Weighted](/docs/en/sql-reference/aggregate-functions/reference/quantilebfloat16.md#quantilebfloat16weighted)
|
- [quantileBFloat16Weighted](/docs/en/sql-reference/aggregate-functions/reference/quantilebfloat16.md#quantilebfloat16weighted)
|
||||||
- [quantileDDSketch](/docs/en/sql-reference/aggregate-functions/reference/quantileddsketch.md#quantileddsketch)
|
- [quantileDD](/docs/en/sql-reference/aggregate-functions/reference/quantileddsketch.md#quantileddsketch)
|
||||||
- [simpleLinearRegression](/docs/en/sql-reference/aggregate-functions/reference/simplelinearregression.md)
|
- [simpleLinearRegression](/docs/en/sql-reference/aggregate-functions/reference/simplelinearregression.md)
|
||||||
- [stochasticLinearRegression](/docs/en/sql-reference/aggregate-functions/reference/stochasticlinearregression.md)
|
- [stochasticLinearRegression](/docs/en/sql-reference/aggregate-functions/reference/stochasticlinearregression.md)
|
||||||
- [stochasticLogisticRegression](/docs/en/sql-reference/aggregate-functions/reference/stochasticlogisticregression.md)
|
- [stochasticLogisticRegression](/docs/en/sql-reference/aggregate-functions/reference/stochasticlogisticregression.md)
|
||||||
@ -105,4 +105,3 @@ ClickHouse-specific aggregate functions:
|
|||||||
- [sparkBar](./sparkbar.md)
|
- [sparkBar](./sparkbar.md)
|
||||||
- [sumCount](./sumcount.md)
|
- [sumCount](./sumcount.md)
|
||||||
- [largestTriangleThreeBuckets](./largestTriangleThreeBuckets.md)
|
- [largestTriangleThreeBuckets](./largestTriangleThreeBuckets.md)
|
||||||
|
|
||||||
|
@ -18,7 +18,7 @@ Functions:
|
|||||||
- `medianTDigest` — Alias for [quantileTDigest](../../../sql-reference/aggregate-functions/reference/quantiletdigest.md#quantiletdigest).
|
- `medianTDigest` — Alias for [quantileTDigest](../../../sql-reference/aggregate-functions/reference/quantiletdigest.md#quantiletdigest).
|
||||||
- `medianTDigestWeighted` — Alias for [quantileTDigestWeighted](../../../sql-reference/aggregate-functions/reference/quantiletdigestweighted.md#quantiletdigestweighted).
|
- `medianTDigestWeighted` — Alias for [quantileTDigestWeighted](../../../sql-reference/aggregate-functions/reference/quantiletdigestweighted.md#quantiletdigestweighted).
|
||||||
- `medianBFloat16` — Alias for [quantileBFloat16](../../../sql-reference/aggregate-functions/reference/quantilebfloat16.md#quantilebfloat16).
|
- `medianBFloat16` — Alias for [quantileBFloat16](../../../sql-reference/aggregate-functions/reference/quantilebfloat16.md#quantilebfloat16).
|
||||||
- `medianDDSketch` — Alias for [quantileDDSketch](../../../sql-reference/aggregate-functions/reference/quantileddsketch.md#quantileddsketch).
|
- `medianDD` — Alias for [quantileDD](../../../sql-reference/aggregate-functions/reference/quantileddsketch.md#quantileddsketch).
|
||||||
|
|
||||||
**Example**
|
**Example**
|
||||||
|
|
||||||
|
@ -1,10 +1,10 @@
|
|||||||
---
|
---
|
||||||
slug: /en/sql-reference/aggregate-functions/reference/quantileddsketch
|
slug: /en/sql-reference/aggregate-functions/reference/quantileddsketch
|
||||||
sidebar_position: 211
|
sidebar_position: 211
|
||||||
title: quantileDDSketch
|
title: quantileDD
|
||||||
---
|
---
|
||||||
|
|
||||||
Computes an approximate [quantile](https://en.wikipedia.org/wiki/Quantile) of a sample with relative-error guarantees. It works by building a [DDSketch](https://www.vldb.org/pvldb/vol12/p2195-masson.pdf).
|
Computes an approximate [quantile](https://en.wikipedia.org/wiki/Quantile) of a sample with relative-error guarantees. It works by building a [DD](https://www.vldb.org/pvldb/vol12/p2195-masson.pdf).
|
||||||
|
|
||||||
**Syntax**
|
**Syntax**
|
||||||
|
|
||||||
@ -44,13 +44,13 @@ Input table has an integer and a float columns:
|
|||||||
Query to calculate 0.75-quantile (third quartile):
|
Query to calculate 0.75-quantile (third quartile):
|
||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
SELECT quantileDDSketch(0.01, 0.75)(a), quantileDDSketch(0.01, 0.75)(b) FROM example_table;
|
SELECT quantileDD(0.01, 0.75)(a), quantileDD(0.01, 0.75)(b) FROM example_table;
|
||||||
```
|
```
|
||||||
|
|
||||||
Result:
|
Result:
|
||||||
|
|
||||||
``` text
|
``` text
|
||||||
┌─quantileDDSketch(0.01, 0.75)(a)─┬─quantileDDSketch(0.01, 0.75)(b)─┐
|
┌─quantileDD(0.01, 0.75)(a)─┬─quantileDD(0.01, 0.75)(b)─┐
|
||||||
│ 2.974233423476717 │ 1.01 │
|
│ 2.974233423476717 │ 1.01 │
|
||||||
└─────────────────────────────────┴─────────────────────────────────┘
|
└─────────────────────────────────┴─────────────────────────────────┘
|
||||||
```
|
```
|
||||||
|
@ -9,7 +9,7 @@ sidebar_position: 201
|
|||||||
|
|
||||||
Syntax: `quantiles(level1, level2, …)(x)`
|
Syntax: `quantiles(level1, level2, …)(x)`
|
||||||
|
|
||||||
All the quantile functions also have corresponding quantiles functions: `quantiles`, `quantilesDeterministic`, `quantilesTiming`, `quantilesTimingWeighted`, `quantilesExact`, `quantilesExactWeighted`, `quantileInterpolatedWeighted`, `quantilesTDigest`, `quantilesBFloat16`, `quantilesDDSketch`. These functions calculate all the quantiles of the listed levels in one pass, and return an array of the resulting values.
|
All the quantile functions also have corresponding quantiles functions: `quantiles`, `quantilesDeterministic`, `quantilesTiming`, `quantilesTimingWeighted`, `quantilesExact`, `quantilesExactWeighted`, `quantileInterpolatedWeighted`, `quantilesTDigest`, `quantilesBFloat16`, `quantilesDD`. These functions calculate all the quantiles of the listed levels in one pass, and return an array of the resulting values.
|
||||||
|
|
||||||
## quantilesExactExclusive
|
## quantilesExactExclusive
|
||||||
|
|
||||||
|
@ -7,7 +7,7 @@ sidebar_label: JSON
|
|||||||
# JSON
|
# JSON
|
||||||
|
|
||||||
:::note
|
:::note
|
||||||
This feature is experimental and is not production ready. If you need to work with JSON documents, consider using [this guide](/docs/en/integrations/data-ingestion/data-formats/json.md) instead.
|
This feature is experimental and is not production-ready. If you need to work with JSON documents, consider using [this guide](/docs/en/integrations/data-ingestion/data-formats/json.md) instead.
|
||||||
:::
|
:::
|
||||||
|
|
||||||
Stores JavaScript Object Notation (JSON) documents in a single column.
|
Stores JavaScript Object Notation (JSON) documents in a single column.
|
||||||
@ -15,7 +15,8 @@ Stores JavaScript Object Notation (JSON) documents in a single column.
|
|||||||
`JSON` is an alias for `Object('json')`.
|
`JSON` is an alias for `Object('json')`.
|
||||||
|
|
||||||
:::note
|
:::note
|
||||||
The JSON data type is an experimental feature. To use it, set `allow_experimental_object_type = 1`.
|
The JSON data type is an obsolete feature. Do not use it.
|
||||||
|
If you want to use it, set `allow_experimental_object_type = 1`.
|
||||||
:::
|
:::
|
||||||
|
|
||||||
## Example
|
## Example
|
||||||
|
245
docs/en/sql-reference/data-types/variant.md
Normal file
245
docs/en/sql-reference/data-types/variant.md
Normal file
@ -0,0 +1,245 @@
|
|||||||
|
---
|
||||||
|
slug: /en/sql-reference/data-types/json
|
||||||
|
sidebar_position: 55
|
||||||
|
sidebar_label: Variant
|
||||||
|
---
|
||||||
|
|
||||||
|
# Variant(T1, T2, T3, ...)
|
||||||
|
|
||||||
|
This type represents a union of other data types. Type `Variant(T1, T2, ..., TN)` means that each row of this type
|
||||||
|
has a value of either type `T1` or `T2` or ... or `TN` or none of them (`NULL` value).
|
||||||
|
|
||||||
|
The order of nested types doesn't matter: Variant(T1, T2) = Variant(T2, T1).
|
||||||
|
Nested types can be arbitrary types except Nullable(...), LowCardinality(Nullable(...)) and Variant(...) types.
|
||||||
|
|
||||||
|
:::note
|
||||||
|
The Variant data type is an experimental feature. To use it, set `allow_experimental_variant_type = 1`.
|
||||||
|
:::
|
||||||
|
|
||||||
|
## Creating Variant
|
||||||
|
|
||||||
|
Using `Variant` type in table column definition:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
CREATE TABLE test (v Variant(UInt64, String, Array(UInt64))) ENGINE = Memory;
|
||||||
|
INSERT INTO test VALUES (NULL), (42), ('Hello, World!'), ([1, 2, 3]);
|
||||||
|
SELECT v FROM test;
|
||||||
|
```
|
||||||
|
|
||||||
|
```text
|
||||||
|
┌─v─────────────┐
|
||||||
|
│ ᴺᵁᴸᴸ │
|
||||||
|
│ 42 │
|
||||||
|
│ Hello, World! │
|
||||||
|
│ [1,2,3] │
|
||||||
|
└───────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
Using CAST from ordinary columns:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SELECT toTypeName(variant) as type_name, 'Hello, World!'::Variant(UInt64, String, Array(UInt64)) as variant;
|
||||||
|
```
|
||||||
|
|
||||||
|
```text
|
||||||
|
┌─type_name──────────────────────────────┬─variant───────┐
|
||||||
|
│ Variant(Array(UInt64), String, UInt64) │ Hello, World! │
|
||||||
|
└────────────────────────────────────────┴───────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
Using functions `if/multiIf` when arguments don't have common type (setting `use_variant_as_common_type` should be enabled for it):
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SET use_variant_as_common_type = 1;
|
||||||
|
SELECT if(number % 2, number, range(number)) as variant FROM numbers(5);
|
||||||
|
```
|
||||||
|
|
||||||
|
```text
|
||||||
|
┌─variant───┐
|
||||||
|
│ [] │
|
||||||
|
│ 1 │
|
||||||
|
│ [0,1] │
|
||||||
|
│ 3 │
|
||||||
|
│ [0,1,2,3] │
|
||||||
|
└───────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SET use_variant_as_common_type = 1;
|
||||||
|
SELECT multiIf((number % 4) = 0, 42, (number % 4) = 1, [1, 2, 3], (number % 4) = 2, 'Hello, World!', NULL) AS variant FROM numbers(4);
|
||||||
|
```
|
||||||
|
|
||||||
|
```text
|
||||||
|
┌─variant───────┐
|
||||||
|
│ 42 │
|
||||||
|
│ [1,2,3] │
|
||||||
|
│ Hello, World! │
|
||||||
|
│ ᴺᵁᴸᴸ │
|
||||||
|
└───────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
Using functions 'array/map' if array elements/map values don't have common type (setting `use_variant_as_common_type` should be enabled for it):
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SET use_variant_as_common_type = 1;
|
||||||
|
SELECT array(range(number), number, 'str_' || toString(number)) as array_of_variants FROM numbers(3);
|
||||||
|
```
|
||||||
|
|
||||||
|
```text
|
||||||
|
┌─array_of_variants─┐
|
||||||
|
│ [[],0,'str_0'] │
|
||||||
|
│ [[0],1,'str_1'] │
|
||||||
|
│ [[0,1],2,'str_2'] │
|
||||||
|
└───────────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SET use_variant_as_common_type = 1;
|
||||||
|
SELECT map('a', range(number), 'b', number, 'c', 'str_' || toString(number)) as map_of_variants FROM numbers(3);
|
||||||
|
```
|
||||||
|
|
||||||
|
```text
|
||||||
|
┌─map_of_variants───────────────┐
|
||||||
|
│ {'a':[],'b':0,'c':'str_0'} │
|
||||||
|
│ {'a':[0],'b':1,'c':'str_1'} │
|
||||||
|
│ {'a':[0,1],'b':2,'c':'str_2'} │
|
||||||
|
└───────────────────────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
## Reading Variant nested types as subcolumns
|
||||||
|
|
||||||
|
Variant type supports reading a single nested type from a Variant column using the type name as a subcolumn.
|
||||||
|
So, if you have column `variant Variant(T1, T2, T3)` you can read a subcolumn of type `T2` using syntax `variant.T2`,
|
||||||
|
this subcolumn will have type `Nullable(T2)` if `T2` can be inside `Nullable` and `T2` otherwise. This subcolumn will
|
||||||
|
be the same size as original `Variant` column and will contain `NULL` values (or empty values if `T2` cannot be inside `Nullable`)
|
||||||
|
in all rows in which original `Variant` column doesn't have type `T2`.
|
||||||
|
|
||||||
|
Variant subcolumns can be also read using function `variantElement(variant_column, type_name)`.
|
||||||
|
|
||||||
|
Examples:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
CREATE TABLE test (v Variant(UInt64, String, Array(UInt64))) ENGINE = Memory;
|
||||||
|
INSERT INTO test VALUES (NULL), (42), ('Hello, World!'), ([1, 2, 3]);
|
||||||
|
SELECT v, v.String, v.UInt64, v.`Array(UInt64)` FROM test;
|
||||||
|
```
|
||||||
|
|
||||||
|
```text
|
||||||
|
┌─v─────────────┬─v.String──────┬─v.UInt64─┬─v.Array(UInt64)─┐
|
||||||
|
│ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ [] │
|
||||||
|
│ 42 │ ᴺᵁᴸᴸ │ 42 │ [] │
|
||||||
|
│ Hello, World! │ Hello, World! │ ᴺᵁᴸᴸ │ [] │
|
||||||
|
│ [1,2,3] │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ [1,2,3] │
|
||||||
|
└───────────────┴───────────────┴──────────┴─────────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SELECT toTypeName(v.String), toTypeName(v.UInt64), toTypeName(v.`Array(UInt64)`) FROM test LIMIT 1;
|
||||||
|
```
|
||||||
|
|
||||||
|
```text
|
||||||
|
┌─toTypeName(v.String)─┬─toTypeName(v.UInt64)─┬─toTypeName(v.Array(UInt64))─┐
|
||||||
|
│ Nullable(String) │ Nullable(UInt64) │ Array(UInt64) │
|
||||||
|
└──────────────────────┴──────────────────────┴─────────────────────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SELECT v, variantElement(v, 'String'), variantElement(v, 'UInt64'), variantElement(v, 'Array(UInt64)') FROM test;
|
||||||
|
```
|
||||||
|
|
||||||
|
```text
|
||||||
|
┌─v─────────────┬─variantElement(v, 'String')─┬─variantElement(v, 'UInt64')─┬─variantElement(v, 'Array(UInt64)')─┐
|
||||||
|
│ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ [] │
|
||||||
|
│ 42 │ ᴺᵁᴸᴸ │ 42 │ [] │
|
||||||
|
│ Hello, World! │ Hello, World! │ ᴺᵁᴸᴸ │ [] │
|
||||||
|
│ [1,2,3] │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ [1,2,3] │
|
||||||
|
└───────────────┴─────────────────────────────┴─────────────────────────────┴────────────────────────────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
## Conversion between Variant column and other columns
|
||||||
|
|
||||||
|
There are 3 possible conversions that can be performed with Variant column.
|
||||||
|
|
||||||
|
### Converting an ordinary column to a Variant column
|
||||||
|
|
||||||
|
It is possible to convert ordinary column with type `T` to a `Variant` column containing this type:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SELECT toTypeName(variant) as type_name, 'Hello, World!'::Variant(UInt64, String, Array(UInt64)) as variant;
|
||||||
|
```
|
||||||
|
|
||||||
|
```text
|
||||||
|
┌─type_name──────────────────────────────┬─variant───────┐
|
||||||
|
│ Variant(Array(UInt64), String, UInt64) │ Hello, World! │
|
||||||
|
└────────────────────────────────────────┴───────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
### Converting a Variant column to an ordinary column
|
||||||
|
|
||||||
|
It is possible to convert a `Variant` column to an ordinary column. In this case all nested variants will be converted to a destination type:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
CREATE TABLE test (v Variant(UInt64, String)) ENGINE = Memory;
|
||||||
|
INSERT INTO test VALUES (NULL), (42), ('42.42');
|
||||||
|
SELECT v::Nullable(Float64) FROM test;
|
||||||
|
```
|
||||||
|
|
||||||
|
```text
|
||||||
|
┌─CAST(v, 'Nullable(Float64)')─┐
|
||||||
|
│ ᴺᵁᴸᴸ │
|
||||||
|
│ 42 │
|
||||||
|
│ 42.42 │
|
||||||
|
└──────────────────────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
### Converting a Variant to another Variant
|
||||||
|
|
||||||
|
It is possible to convert a `Variant` column to another `Variant` column, but only if the destination `Variant` column contains all nested types from the original `Variant`:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
CREATE TABLE test (v Variant(UInt64, String)) ENGINE = Memory;
|
||||||
|
INSERT INTO test VALUES (NULL), (42), ('String');
|
||||||
|
SELECT v::Variant(UInt64, String, Array(UInt64)) FROM test;
|
||||||
|
```
|
||||||
|
|
||||||
|
```text
|
||||||
|
┌─CAST(v, 'Variant(UInt64, String, Array(UInt64))')─┐
|
||||||
|
│ ᴺᵁᴸᴸ │
|
||||||
|
│ 42 │
|
||||||
|
│ String │
|
||||||
|
└───────────────────────────────────────────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
## Reading Variant type from the data
|
||||||
|
|
||||||
|
All text formats (TSV, CSV, CustomSeparated, Values, JSONEachRow, etc) supports reading `Variant` type. During data parsing ClickHouse tries to insert value into most appropriate variant type.
|
||||||
|
|
||||||
|
Example:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SELECT
|
||||||
|
v,
|
||||||
|
variantElement(v, 'String') AS str,
|
||||||
|
variantElement(v, 'UInt64') AS num,
|
||||||
|
variantElement(v, 'Float64') AS float,
|
||||||
|
variantElement(v, 'DateTime') AS date,
|
||||||
|
variantElement(v, 'Array(UInt64)') AS arr
|
||||||
|
FROM format(JSONEachRow, 'v Variant(String, UInt64, Float64, DateTime, Array(UInt64))', $$
|
||||||
|
{"v" : "Hello, World!"},
|
||||||
|
{"v" : 42},
|
||||||
|
{"v" : 42.42},
|
||||||
|
{"v" : "2020-01-01 00:00:00"},
|
||||||
|
{"v" : [1, 2, 3]}
|
||||||
|
$$)
|
||||||
|
```
|
||||||
|
|
||||||
|
```text
|
||||||
|
┌─v───────────────────┬─str───────────┬──num─┬─float─┬────────────────date─┬─arr─────┐
|
||||||
|
│ Hello, World! │ Hello, World! │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ [] │
|
||||||
|
│ 42 │ ᴺᵁᴸᴸ │ 42 │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ [] │
|
||||||
|
│ 42.42 │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ 42.42 │ ᴺᵁᴸᴸ │ [] │
|
||||||
|
│ 2020-01-01 00:00:00 │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ 2020-01-01 00:00:00 │ [] │
|
||||||
|
│ [1,2,3] │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ [1,2,3] │
|
||||||
|
└─────────────────────┴───────────────┴──────┴───────┴─────────────────────┴─────────┘
|
||||||
|
```
|
@ -1805,6 +1805,7 @@ Example of settings:
|
|||||||
``` xml
|
``` xml
|
||||||
<source>
|
<source>
|
||||||
<postgresql>
|
<postgresql>
|
||||||
|
<host>postgresql-hostname</hoat>
|
||||||
<port>5432</port>
|
<port>5432</port>
|
||||||
<user>clickhouse</user>
|
<user>clickhouse</user>
|
||||||
<password>qwerty</password>
|
<password>qwerty</password>
|
||||||
|
@ -2832,6 +2832,43 @@ Result:
|
|||||||
└─────────────────────────────────────────────────────────────────────────┘
|
└─────────────────────────────────────────────────────────────────────────┘
|
||||||
```
|
```
|
||||||
|
|
||||||
|
## variantElement
|
||||||
|
|
||||||
|
Extracts a column with specified type from a `Variant` column.
|
||||||
|
|
||||||
|
**Syntax**
|
||||||
|
|
||||||
|
```sql
|
||||||
|
variantElement(variant, type_name, [, default_value])
|
||||||
|
```
|
||||||
|
|
||||||
|
**Arguments**
|
||||||
|
|
||||||
|
- `variant` — Variant column. [Variant](../../sql-reference/data-types/variant.md).
|
||||||
|
- `type_name` — The name of the variant type to extract. [String](../../sql-reference/data-types/string.md).
|
||||||
|
- `default_value` - The default value that will be used if variant doesn't have variant with specified type. Can be any type. Optional.
|
||||||
|
|
||||||
|
**Returned value**
|
||||||
|
|
||||||
|
- Subcolumn of a `Variant` column with specified type.
|
||||||
|
|
||||||
|
**Example**
|
||||||
|
|
||||||
|
```sql
|
||||||
|
CREATE TABLE test (v Variant(UInt64, String, Array(UInt64))) ENGINE = Memory;
|
||||||
|
INSERT INTO test VALUES (NULL), (42), ('Hello, World!'), ([1, 2, 3]);
|
||||||
|
SELECT v, variantElement(v, 'String'), variantElement(v, 'UInt64'), variantElement(v, 'Array(UInt64)') FROM test;
|
||||||
|
```
|
||||||
|
|
||||||
|
```text
|
||||||
|
┌─v─────────────┬─variantElement(v, 'String')─┬─variantElement(v, 'UInt64')─┬─variantElement(v, 'Array(UInt64)')─┐
|
||||||
|
│ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ [] │
|
||||||
|
│ 42 │ ᴺᵁᴸᴸ │ 42 │ [] │
|
||||||
|
│ Hello, World! │ Hello, World! │ ᴺᵁᴸᴸ │ [] │
|
||||||
|
│ [1,2,3] │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ [1,2,3] │
|
||||||
|
└───────────────┴─────────────────────────────┴─────────────────────────────┴────────────────────────────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
## minSampleSizeConversion
|
## minSampleSizeConversion
|
||||||
|
|
||||||
Calculates minimum required sample size for an A/B test comparing conversions (proportions) in two samples.
|
Calculates minimum required sample size for an A/B test comparing conversions (proportions) in two samples.
|
||||||
|
@ -515,7 +515,7 @@ Alias: `concat_ws`
|
|||||||
**Arguments**
|
**Arguments**
|
||||||
|
|
||||||
- sep — separator. Const [String](../../sql-reference/data-types/string.md) or [FixedString](../../sql-reference/data-types/fixedstring.md).
|
- sep — separator. Const [String](../../sql-reference/data-types/string.md) or [FixedString](../../sql-reference/data-types/fixedstring.md).
|
||||||
- exprN — expression to be concatenated. [String](../../sql-reference/data-types/string.md) or [FixedString](../../sql-reference/data-types/fixedstring.md).
|
- exprN — expression to be concatenated. Arguments which are not of types [String](../../sql-reference/data-types/string.md) or [FixedString](../../sql-reference/data-types/fixedstring.md) are converted to strings using their default serialization. As this decreases performance, it is not recommended to use non-String/FixedString arguments.
|
||||||
|
|
||||||
**Returned values**
|
**Returned values**
|
||||||
|
|
||||||
|
@ -77,8 +77,8 @@ The number of data points in `series` should be at least twice the value of `per
|
|||||||
|
|
||||||
**Returned value**
|
**Returned value**
|
||||||
|
|
||||||
- An array of three arrays where the first array include seasonal components, the second array - trend,
|
- An array of four arrays where the first array include seasonal components, the second array - trend,
|
||||||
and the third array - residue component.
|
the third array - residue component, and the fourth array - baseline(seasonal + trend) component.
|
||||||
|
|
||||||
Type: [Array](../../sql-reference/data-types/array.md).
|
Type: [Array](../../sql-reference/data-types/array.md).
|
||||||
|
|
||||||
@ -107,6 +107,10 @@ Result:
|
|||||||
[
|
[
|
||||||
0, 0.0000019073486, -0.0000019073486, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -0.0000019073486, 0,
|
0, 0.0000019073486, -0.0000019073486, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -0.0000019073486, 0,
|
||||||
0
|
0
|
||||||
|
],
|
||||||
|
[
|
||||||
|
10.1, 20.449999, 40.340004, 10.100001, 20.45, 40.34, 10.100001, 20.45, 40.34, 10.1, 20.45, 40.34,
|
||||||
|
10.1, 20.45, 40.34, 10.1, 20.45, 40.34, 10.1, 20.45, 40.34, 10.100002, 20.45, 40.34
|
||||||
]] │
|
]] │
|
||||||
└────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┘
|
└────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┘
|
||||||
```
|
```
|
||||||
|
@ -8,8 +8,6 @@ sidebar_label: VIEW
|
|||||||
|
|
||||||
You can modify `SELECT` query that was specified when a [materialized view](../create/view.md#materialized) was created with the `ALTER TABLE … MODIFY QUERY` statement without interrupting ingestion process.
|
You can modify `SELECT` query that was specified when a [materialized view](../create/view.md#materialized) was created with the `ALTER TABLE … MODIFY QUERY` statement without interrupting ingestion process.
|
||||||
|
|
||||||
The `allow_experimental_alter_materialized_view_structure` setting must be enabled.
|
|
||||||
|
|
||||||
This command is created to change materialized view created with `TO [db.]name` clause. It does not change the structure of the underling storage table and it does not change the columns' definition of the materialized view, because of this the application of this command is very limited for materialized views are created without `TO [db.]name` clause.
|
This command is created to change materialized view created with `TO [db.]name` clause. It does not change the structure of the underling storage table and it does not change the columns' definition of the materialized view, because of this the application of this command is very limited for materialized views are created without `TO [db.]name` clause.
|
||||||
|
|
||||||
**Example with TO table**
|
**Example with TO table**
|
||||||
|
@ -97,7 +97,7 @@ This feature is deprecated and will be removed in the future.
|
|||||||
|
|
||||||
For your convenience, the old documentation is located [here](https://pastila.nl/?00f32652/fdf07272a7b54bda7e13b919264e449f.md)
|
For your convenience, the old documentation is located [here](https://pastila.nl/?00f32652/fdf07272a7b54bda7e13b919264e449f.md)
|
||||||
|
|
||||||
## Refreshable Materialized View {#refreshable-materialized-view}
|
## Refreshable Materialized View [Experimental] {#refreshable-materialized-view}
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
CREATE MATERIALIZED VIEW [IF NOT EXISTS] [db.]table_name
|
CREATE MATERIALIZED VIEW [IF NOT EXISTS] [db.]table_name
|
||||||
@ -120,7 +120,8 @@ Differences from regular non-refreshable materialized views:
|
|||||||
|
|
||||||
:::note
|
:::note
|
||||||
Refreshable materialized views are a work in progress. Setting `allow_experimental_refreshable_materialized_view = 1` is required for creating one. Current limitations:
|
Refreshable materialized views are a work in progress. Setting `allow_experimental_refreshable_materialized_view = 1` is required for creating one. Current limitations:
|
||||||
* not compatible with Replicated database or table engines,
|
* not compatible with Replicated database or table engines
|
||||||
|
* It is not supported in ClickHouse Cloud
|
||||||
* require [Atomic database engine](../../../engines/database-engines/atomic.md),
|
* require [Atomic database engine](../../../engines/database-engines/atomic.md),
|
||||||
* no retries for failed refresh - we just skip to the next scheduled refresh time,
|
* no retries for failed refresh - we just skip to the next scheduled refresh time,
|
||||||
* no limit on number of concurrent refreshes.
|
* no limit on number of concurrent refreshes.
|
||||||
|
@ -9,10 +9,6 @@ sidebar_label: RENAME
|
|||||||
Renames databases, tables, or dictionaries. Several entities can be renamed in a single query.
|
Renames databases, tables, or dictionaries. Several entities can be renamed in a single query.
|
||||||
Note that the `RENAME` query with several entities is non-atomic operation. To swap entities names atomically, use the [EXCHANGE](./exchange.md) statement.
|
Note that the `RENAME` query with several entities is non-atomic operation. To swap entities names atomically, use the [EXCHANGE](./exchange.md) statement.
|
||||||
|
|
||||||
:::note
|
|
||||||
The `RENAME` query is supported by the [Atomic](../../engines/database-engines/atomic.md) database engine only.
|
|
||||||
:::
|
|
||||||
|
|
||||||
**Syntax**
|
**Syntax**
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
|
@ -7,7 +7,7 @@ keywords: [udf, user defined function, clickhouse, executable, table, function]
|
|||||||
|
|
||||||
# executable Table Function for UDFs
|
# executable Table Function for UDFs
|
||||||
|
|
||||||
The `executable` table function creates a table based on the output of a user-defined function (UDF) that you define in a script that outputs rows to **stdout**. The executable script is stored in the `users_scripts` directory and can read data from any source.
|
The `executable` table function creates a table based on the output of a user-defined function (UDF) that you define in a script that outputs rows to **stdout**. The executable script is stored in the `users_scripts` directory and can read data from any source. Make sure your ClickHouse server has all the required packages to run the executable script. For example, if it is a Python script, ensure that the server has the necessary Python packages installed.
|
||||||
|
|
||||||
You can optionally include one or more input queries that stream their results to **stdin** for the script to read.
|
You can optionally include one or more input queries that stream their results to **stdin** for the script to read.
|
||||||
|
|
||||||
|
@ -20,7 +20,7 @@ public:
|
|||||||
const String & host_id_,
|
const String & host_id_,
|
||||||
const String & proxy_database_name_,
|
const String & proxy_database_name_,
|
||||||
ContextMutablePtr context_,
|
ContextMutablePtr context_,
|
||||||
Poco::Logger * log_)
|
LoggerRawPtr log_)
|
||||||
: WithMutableContext(context_),
|
: WithMutableContext(context_),
|
||||||
task_zookeeper_path(task_path_),
|
task_zookeeper_path(task_path_),
|
||||||
host_id(host_id_),
|
host_id(host_id_),
|
||||||
@ -230,7 +230,7 @@ private:
|
|||||||
|
|
||||||
bool experimental_use_sample_offset{false};
|
bool experimental_use_sample_offset{false};
|
||||||
|
|
||||||
Poco::Logger * log;
|
LoggerRawPtr log;
|
||||||
|
|
||||||
UInt64 max_table_tries = 3;
|
UInt64 max_table_tries = 3;
|
||||||
UInt64 max_shard_partition_tries = 3;
|
UInt64 max_shard_partition_tries = 3;
|
||||||
|
@ -177,7 +177,7 @@ public:
|
|||||||
auto watch_callback =
|
auto watch_callback =
|
||||||
[my_stale = stale] (const Coordination::WatchResponse & rsp)
|
[my_stale = stale] (const Coordination::WatchResponse & rsp)
|
||||||
{
|
{
|
||||||
auto logger = &Poco::Logger::get("ClusterCopier");
|
auto logger = getLogger("ClusterCopier");
|
||||||
if (rsp.error == Coordination::Error::ZOK)
|
if (rsp.error == Coordination::Error::ZOK)
|
||||||
{
|
{
|
||||||
switch (rsp.type)
|
switch (rsp.type)
|
||||||
|
@ -160,7 +160,7 @@ int DisksApp::main(const std::vector<String> & /*args*/)
|
|||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "No config-file specifiged");
|
throw Exception(ErrorCodes::BAD_ARGUMENTS, "No config-file specified");
|
||||||
}
|
}
|
||||||
|
|
||||||
if (config().has("save-logs"))
|
if (config().has("save-logs"))
|
||||||
|
@ -375,7 +375,7 @@ int KeeperClient::main(const std::vector<String> & /* args */)
|
|||||||
|
|
||||||
if (!config().has("host") && !config().has("port") && !keys.empty())
|
if (!config().has("host") && !config().has("port") && !keys.empty())
|
||||||
{
|
{
|
||||||
LOG_INFO(&Poco::Logger::get("KeeperClient"), "Found keeper node in the config.xml, will use it for connection");
|
LOG_INFO(getLogger("KeeperClient"), "Found keeper node in the config.xml, will use it for connection");
|
||||||
|
|
||||||
for (const auto & key : keys)
|
for (const auto & key : keys)
|
||||||
{
|
{
|
||||||
|
@ -28,7 +28,7 @@ int mainEntryClickHouseKeeperConverter(int argc, char ** argv)
|
|||||||
po::store(po::command_line_parser(argc, argv).options(desc).run(), options);
|
po::store(po::command_line_parser(argc, argv).options(desc).run(), options);
|
||||||
Poco::AutoPtr<Poco::ConsoleChannel> console_channel(new Poco::ConsoleChannel);
|
Poco::AutoPtr<Poco::ConsoleChannel> console_channel(new Poco::ConsoleChannel);
|
||||||
|
|
||||||
Poco::Logger * logger = &Poco::Logger::get("KeeperConverter");
|
LoggerPtr logger = getLogger("KeeperConverter");
|
||||||
logger->setChannel(console_channel);
|
logger->setChannel(console_channel);
|
||||||
|
|
||||||
if (options.count("help"))
|
if (options.count("help"))
|
||||||
|
@ -624,7 +624,7 @@ catch (...)
|
|||||||
|
|
||||||
void Keeper::logRevision() const
|
void Keeper::logRevision() const
|
||||||
{
|
{
|
||||||
LOG_INFO(&Poco::Logger::get("Application"),
|
LOG_INFO(getLogger("Application"),
|
||||||
"Starting ClickHouse Keeper {} (revision: {}, git hash: {}, build id: {}), PID {}",
|
"Starting ClickHouse Keeper {} (revision: {}, git hash: {}, build id: {}), PID {}",
|
||||||
VERSION_STRING,
|
VERSION_STRING,
|
||||||
ClickHouseRevision::getVersionRevision(),
|
ClickHouseRevision::getVersionRevision(),
|
||||||
|
@ -13,7 +13,7 @@ CatBoostLibraryHandlerFactory & CatBoostLibraryHandlerFactory::instance()
|
|||||||
}
|
}
|
||||||
|
|
||||||
CatBoostLibraryHandlerFactory::CatBoostLibraryHandlerFactory()
|
CatBoostLibraryHandlerFactory::CatBoostLibraryHandlerFactory()
|
||||||
: log(&Poco::Logger::get("CatBoostLibraryHandlerFactory"))
|
: log(getLogger("CatBoostLibraryHandlerFactory"))
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -31,7 +31,7 @@ private:
|
|||||||
/// map: model path --> catboost library handler
|
/// map: model path --> catboost library handler
|
||||||
std::unordered_map<String, CatBoostLibraryHandlerPtr> library_handlers TSA_GUARDED_BY(mutex);
|
std::unordered_map<String, CatBoostLibraryHandlerPtr> library_handlers TSA_GUARDED_BY(mutex);
|
||||||
std::mutex mutex;
|
std::mutex mutex;
|
||||||
Poco::Logger * log;
|
LoggerPtr log;
|
||||||
};
|
};
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -9,40 +9,40 @@ const char DICT_LOGGER_NAME[] = "LibraryDictionarySourceExternal";
|
|||||||
|
|
||||||
void ExternalDictionaryLibraryAPI::log(LogLevel level, CString msg)
|
void ExternalDictionaryLibraryAPI::log(LogLevel level, CString msg)
|
||||||
{
|
{
|
||||||
auto & logger = Poco::Logger::get(DICT_LOGGER_NAME);
|
auto logger = getLogger(DICT_LOGGER_NAME);
|
||||||
switch (level)
|
switch (level)
|
||||||
{
|
{
|
||||||
case LogLevel::TRACE:
|
case LogLevel::TRACE:
|
||||||
if (logger.trace())
|
if (logger->trace())
|
||||||
logger.trace(msg);
|
logger->trace(msg);
|
||||||
break;
|
break;
|
||||||
case LogLevel::DEBUG:
|
case LogLevel::DEBUG:
|
||||||
if (logger.debug())
|
if (logger->debug())
|
||||||
logger.debug(msg);
|
logger->debug(msg);
|
||||||
break;
|
break;
|
||||||
case LogLevel::INFORMATION:
|
case LogLevel::INFORMATION:
|
||||||
if (logger.information())
|
if (logger->information())
|
||||||
logger.information(msg);
|
logger->information(msg);
|
||||||
break;
|
break;
|
||||||
case LogLevel::NOTICE:
|
case LogLevel::NOTICE:
|
||||||
if (logger.notice())
|
if (logger->notice())
|
||||||
logger.notice(msg);
|
logger->notice(msg);
|
||||||
break;
|
break;
|
||||||
case LogLevel::WARNING:
|
case LogLevel::WARNING:
|
||||||
if (logger.warning())
|
if (logger->warning())
|
||||||
logger.warning(msg);
|
logger->warning(msg);
|
||||||
break;
|
break;
|
||||||
case LogLevel::ERROR:
|
case LogLevel::ERROR:
|
||||||
if (logger.error())
|
if (logger->error())
|
||||||
logger.error(msg);
|
logger->error(msg);
|
||||||
break;
|
break;
|
||||||
case LogLevel::CRITICAL:
|
case LogLevel::CRITICAL:
|
||||||
if (logger.critical())
|
if (logger->critical())
|
||||||
logger.critical(msg);
|
logger->critical(msg);
|
||||||
break;
|
break;
|
||||||
case LogLevel::FATAL:
|
case LogLevel::FATAL:
|
||||||
if (logger.fatal())
|
if (logger->fatal())
|
||||||
logger.fatal(msg);
|
logger->fatal(msg);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -26,7 +26,7 @@ void ExternalDictionaryLibraryHandlerFactory::create(
|
|||||||
|
|
||||||
if (library_handlers.contains(dictionary_id))
|
if (library_handlers.contains(dictionary_id))
|
||||||
{
|
{
|
||||||
LOG_WARNING(&Poco::Logger::get("ExternalDictionaryLibraryHandlerFactory"), "Library handler with dictionary id {} already exists", dictionary_id);
|
LOG_WARNING(getLogger("ExternalDictionaryLibraryHandlerFactory"), "Library handler with dictionary id {} already exists", dictionary_id);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -12,7 +12,7 @@ LibraryBridgeHandlerFactory::LibraryBridgeHandlerFactory(
|
|||||||
size_t keep_alive_timeout_,
|
size_t keep_alive_timeout_,
|
||||||
ContextPtr context_)
|
ContextPtr context_)
|
||||||
: WithContext(context_)
|
: WithContext(context_)
|
||||||
, log(&Poco::Logger::get(name_))
|
, log(getLogger(name_))
|
||||||
, name(name_)
|
, name(name_)
|
||||||
, keep_alive_timeout(keep_alive_timeout_)
|
, keep_alive_timeout(keep_alive_timeout_)
|
||||||
{
|
{
|
||||||
|
@ -19,7 +19,7 @@ public:
|
|||||||
std::unique_ptr<HTTPRequestHandler> createRequestHandler(const HTTPServerRequest & request) override;
|
std::unique_ptr<HTTPRequestHandler> createRequestHandler(const HTTPServerRequest & request) override;
|
||||||
|
|
||||||
private:
|
private:
|
||||||
Poco::Logger * log;
|
LoggerPtr log;
|
||||||
const std::string name;
|
const std::string name;
|
||||||
const size_t keep_alive_timeout;
|
const size_t keep_alive_timeout;
|
||||||
};
|
};
|
||||||
|
@ -47,7 +47,7 @@ namespace
|
|||||||
if (!response.sent())
|
if (!response.sent())
|
||||||
*response.send() << message << '\n';
|
*response.send() << message << '\n';
|
||||||
|
|
||||||
LOG_WARNING(&Poco::Logger::get("LibraryBridge"), fmt::runtime(message));
|
LOG_WARNING(getLogger("LibraryBridge"), fmt::runtime(message));
|
||||||
}
|
}
|
||||||
|
|
||||||
std::shared_ptr<Block> parseColumns(String && column_string)
|
std::shared_ptr<Block> parseColumns(String && column_string)
|
||||||
@ -92,7 +92,7 @@ static void writeData(Block data, OutputFormatPtr format)
|
|||||||
ExternalDictionaryLibraryBridgeRequestHandler::ExternalDictionaryLibraryBridgeRequestHandler(size_t keep_alive_timeout_, ContextPtr context_)
|
ExternalDictionaryLibraryBridgeRequestHandler::ExternalDictionaryLibraryBridgeRequestHandler(size_t keep_alive_timeout_, ContextPtr context_)
|
||||||
: WithContext(context_)
|
: WithContext(context_)
|
||||||
, keep_alive_timeout(keep_alive_timeout_)
|
, keep_alive_timeout(keep_alive_timeout_)
|
||||||
, log(&Poco::Logger::get("ExternalDictionaryLibraryBridgeRequestHandler"))
|
, log(getLogger("ExternalDictionaryLibraryBridgeRequestHandler"))
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -380,7 +380,7 @@ void ExternalDictionaryLibraryBridgeRequestHandler::handleRequest(HTTPServerRequ
|
|||||||
ExternalDictionaryLibraryBridgeExistsHandler::ExternalDictionaryLibraryBridgeExistsHandler(size_t keep_alive_timeout_, ContextPtr context_)
|
ExternalDictionaryLibraryBridgeExistsHandler::ExternalDictionaryLibraryBridgeExistsHandler(size_t keep_alive_timeout_, ContextPtr context_)
|
||||||
: WithContext(context_)
|
: WithContext(context_)
|
||||||
, keep_alive_timeout(keep_alive_timeout_)
|
, keep_alive_timeout(keep_alive_timeout_)
|
||||||
, log(&Poco::Logger::get("ExternalDictionaryLibraryBridgeExistsHandler"))
|
, log(getLogger("ExternalDictionaryLibraryBridgeExistsHandler"))
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -419,7 +419,7 @@ CatBoostLibraryBridgeRequestHandler::CatBoostLibraryBridgeRequestHandler(
|
|||||||
size_t keep_alive_timeout_, ContextPtr context_)
|
size_t keep_alive_timeout_, ContextPtr context_)
|
||||||
: WithContext(context_)
|
: WithContext(context_)
|
||||||
, keep_alive_timeout(keep_alive_timeout_)
|
, keep_alive_timeout(keep_alive_timeout_)
|
||||||
, log(&Poco::Logger::get("CatBoostLibraryBridgeRequestHandler"))
|
, log(getLogger("CatBoostLibraryBridgeRequestHandler"))
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -623,7 +623,7 @@ void CatBoostLibraryBridgeRequestHandler::handleRequest(HTTPServerRequest & requ
|
|||||||
CatBoostLibraryBridgeExistsHandler::CatBoostLibraryBridgeExistsHandler(size_t keep_alive_timeout_, ContextPtr context_)
|
CatBoostLibraryBridgeExistsHandler::CatBoostLibraryBridgeExistsHandler(size_t keep_alive_timeout_, ContextPtr context_)
|
||||||
: WithContext(context_)
|
: WithContext(context_)
|
||||||
, keep_alive_timeout(keep_alive_timeout_)
|
, keep_alive_timeout(keep_alive_timeout_)
|
||||||
, log(&Poco::Logger::get("CatBoostLibraryBridgeExistsHandler"))
|
, log(getLogger("CatBoostLibraryBridgeExistsHandler"))
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -26,7 +26,7 @@ private:
|
|||||||
static constexpr inline auto FORMAT = "RowBinary";
|
static constexpr inline auto FORMAT = "RowBinary";
|
||||||
|
|
||||||
const size_t keep_alive_timeout;
|
const size_t keep_alive_timeout;
|
||||||
Poco::Logger * log;
|
LoggerPtr log;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
@ -40,7 +40,7 @@ public:
|
|||||||
|
|
||||||
private:
|
private:
|
||||||
const size_t keep_alive_timeout;
|
const size_t keep_alive_timeout;
|
||||||
Poco::Logger * log;
|
LoggerPtr log;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
@ -69,7 +69,7 @@ public:
|
|||||||
|
|
||||||
private:
|
private:
|
||||||
const size_t keep_alive_timeout;
|
const size_t keep_alive_timeout;
|
||||||
Poco::Logger * log;
|
LoggerPtr log;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
@ -83,7 +83,7 @@ public:
|
|||||||
|
|
||||||
private:
|
private:
|
||||||
const size_t keep_alive_timeout;
|
const size_t keep_alive_timeout;
|
||||||
Poco::Logger * log;
|
LoggerPtr log;
|
||||||
};
|
};
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -221,7 +221,7 @@ void LocalServer::tryInitPath()
|
|||||||
{
|
{
|
||||||
// The path is not provided explicitly - use a unique path in the system temporary directory
|
// The path is not provided explicitly - use a unique path in the system temporary directory
|
||||||
// (or in the current dir if temporary don't exist)
|
// (or in the current dir if temporary don't exist)
|
||||||
Poco::Logger * log = &logger();
|
LoggerRawPtr log = &logger();
|
||||||
std::filesystem::path parent_folder;
|
std::filesystem::path parent_folder;
|
||||||
std::filesystem::path default_path;
|
std::filesystem::path default_path;
|
||||||
|
|
||||||
@ -631,7 +631,7 @@ void LocalServer::processConfig()
|
|||||||
|
|
||||||
tryInitPath();
|
tryInitPath();
|
||||||
|
|
||||||
Poco::Logger * log = &logger();
|
LoggerRawPtr log = &logger();
|
||||||
|
|
||||||
/// Maybe useless
|
/// Maybe useless
|
||||||
if (config().has("macros"))
|
if (config().has("macros"))
|
||||||
|
@ -18,7 +18,7 @@ class ODBCColumnsInfoHandler : public HTTPRequestHandler, WithContext
|
|||||||
public:
|
public:
|
||||||
ODBCColumnsInfoHandler(size_t keep_alive_timeout_, ContextPtr context_)
|
ODBCColumnsInfoHandler(size_t keep_alive_timeout_, ContextPtr context_)
|
||||||
: WithContext(context_)
|
: WithContext(context_)
|
||||||
, log(&Poco::Logger::get("ODBCColumnsInfoHandler"))
|
, log(getLogger("ODBCColumnsInfoHandler"))
|
||||||
, keep_alive_timeout(keep_alive_timeout_)
|
, keep_alive_timeout(keep_alive_timeout_)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
@ -26,7 +26,7 @@ public:
|
|||||||
void handleRequest(HTTPServerRequest & request, HTTPServerResponse & response, const ProfileEvents::Event & write_event) override;
|
void handleRequest(HTTPServerRequest & request, HTTPServerResponse & response, const ProfileEvents::Event & write_event) override;
|
||||||
|
|
||||||
private:
|
private:
|
||||||
Poco::Logger * log;
|
LoggerPtr log;
|
||||||
size_t keep_alive_timeout;
|
size_t keep_alive_timeout;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -16,7 +16,7 @@ class IdentifierQuoteHandler : public HTTPRequestHandler, WithContext
|
|||||||
public:
|
public:
|
||||||
IdentifierQuoteHandler(size_t keep_alive_timeout_, ContextPtr context_)
|
IdentifierQuoteHandler(size_t keep_alive_timeout_, ContextPtr context_)
|
||||||
: WithContext(context_)
|
: WithContext(context_)
|
||||||
, log(&Poco::Logger::get("IdentifierQuoteHandler"))
|
, log(getLogger("IdentifierQuoteHandler"))
|
||||||
, keep_alive_timeout(keep_alive_timeout_)
|
, keep_alive_timeout(keep_alive_timeout_)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
@ -24,7 +24,7 @@ public:
|
|||||||
void handleRequest(HTTPServerRequest & request, HTTPServerResponse & response, const ProfileEvents::Event & write_event) override;
|
void handleRequest(HTTPServerRequest & request, HTTPServerResponse & response, const ProfileEvents::Event & write_event) override;
|
||||||
|
|
||||||
private:
|
private:
|
||||||
Poco::Logger * log;
|
LoggerPtr log;
|
||||||
size_t keep_alive_timeout;
|
size_t keep_alive_timeout;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -24,7 +24,7 @@ public:
|
|||||||
ContextPtr context_,
|
ContextPtr context_,
|
||||||
const String & mode_)
|
const String & mode_)
|
||||||
: WithContext(context_)
|
: WithContext(context_)
|
||||||
, log(&Poco::Logger::get("ODBCHandler"))
|
, log(getLogger("ODBCHandler"))
|
||||||
, keep_alive_timeout(keep_alive_timeout_)
|
, keep_alive_timeout(keep_alive_timeout_)
|
||||||
, mode(mode_)
|
, mode(mode_)
|
||||||
{
|
{
|
||||||
@ -33,7 +33,7 @@ public:
|
|||||||
void handleRequest(HTTPServerRequest & request, HTTPServerResponse & response, const ProfileEvents::Event & write_event) override;
|
void handleRequest(HTTPServerRequest & request, HTTPServerResponse & response, const ProfileEvents::Event & write_event) override;
|
||||||
|
|
||||||
private:
|
private:
|
||||||
Poco::Logger * log;
|
LoggerPtr log;
|
||||||
|
|
||||||
size_t keep_alive_timeout;
|
size_t keep_alive_timeout;
|
||||||
String mode;
|
String mode;
|
||||||
|
@ -23,7 +23,7 @@ namespace ErrorCodes
|
|||||||
ODBCSource::ODBCSource(
|
ODBCSource::ODBCSource(
|
||||||
nanodbc::ConnectionHolderPtr connection_holder, const std::string & query_str, const Block & sample_block, const UInt64 max_block_size_)
|
nanodbc::ConnectionHolderPtr connection_holder, const std::string & query_str, const Block & sample_block, const UInt64 max_block_size_)
|
||||||
: ISource(sample_block)
|
: ISource(sample_block)
|
||||||
, log(&Poco::Logger::get("ODBCSource"))
|
, log(getLogger("ODBCSource"))
|
||||||
, max_block_size{max_block_size_}
|
, max_block_size{max_block_size_}
|
||||||
, query(query_str)
|
, query(query_str)
|
||||||
{
|
{
|
||||||
|
@ -30,7 +30,7 @@ private:
|
|||||||
column.insertFrom(sample_column, 0);
|
column.insertFrom(sample_column, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
Poco::Logger * log;
|
LoggerPtr log;
|
||||||
const UInt64 max_block_size;
|
const UInt64 max_block_size;
|
||||||
ExternalResultDescription description;
|
ExternalResultDescription description;
|
||||||
|
|
||||||
|
@ -19,7 +19,7 @@ ODBCSink::ODBCSink(
|
|||||||
ContextPtr local_context_,
|
ContextPtr local_context_,
|
||||||
IdentifierQuotingStyle quoting_)
|
IdentifierQuotingStyle quoting_)
|
||||||
: ISink(sample_block_)
|
: ISink(sample_block_)
|
||||||
, log(&Poco::Logger::get("ODBCSink"))
|
, log(getLogger("ODBCSink"))
|
||||||
, connection_holder(std::move(connection_holder_))
|
, connection_holder(std::move(connection_holder_))
|
||||||
, db_name(remote_database_name_)
|
, db_name(remote_database_name_)
|
||||||
, table_name(remote_table_name_)
|
, table_name(remote_table_name_)
|
||||||
|
@ -30,7 +30,7 @@ protected:
|
|||||||
void consume(Chunk chunk) override;
|
void consume(Chunk chunk) override;
|
||||||
|
|
||||||
private:
|
private:
|
||||||
Poco::Logger * log;
|
LoggerPtr log;
|
||||||
|
|
||||||
nanodbc::ConnectionHolderPtr connection_holder;
|
nanodbc::ConnectionHolderPtr connection_holder;
|
||||||
std::string db_name;
|
std::string db_name;
|
||||||
|
@ -11,7 +11,7 @@ namespace DB
|
|||||||
|
|
||||||
ODBCBridgeHandlerFactory::ODBCBridgeHandlerFactory(const std::string & name_, size_t keep_alive_timeout_, ContextPtr context_)
|
ODBCBridgeHandlerFactory::ODBCBridgeHandlerFactory(const std::string & name_, size_t keep_alive_timeout_, ContextPtr context_)
|
||||||
: WithContext(context_)
|
: WithContext(context_)
|
||||||
, log(&Poco::Logger::get(name_))
|
, log(getLogger(name_))
|
||||||
, name(name_)
|
, name(name_)
|
||||||
, keep_alive_timeout(keep_alive_timeout_)
|
, keep_alive_timeout(keep_alive_timeout_)
|
||||||
{
|
{
|
||||||
|
@ -22,7 +22,7 @@ public:
|
|||||||
std::unique_ptr<HTTPRequestHandler> createRequestHandler(const HTTPServerRequest & request) override;
|
std::unique_ptr<HTTPRequestHandler> createRequestHandler(const HTTPServerRequest & request) override;
|
||||||
|
|
||||||
private:
|
private:
|
||||||
Poco::Logger * log;
|
LoggerPtr log;
|
||||||
std::string name;
|
std::string name;
|
||||||
size_t keep_alive_timeout;
|
size_t keep_alive_timeout;
|
||||||
};
|
};
|
||||||
|
@ -97,7 +97,7 @@ T execute(nanodbc::ConnectionHolderPtr connection_holder, std::function<T(nanodb
|
|||||||
/// https://docs.microsoft.com/ru-ru/sql/odbc/reference/appendixes/appendix-a-odbc-error-codes?view=sql-server-ver15
|
/// https://docs.microsoft.com/ru-ru/sql/odbc/reference/appendixes/appendix-a-odbc-error-codes?view=sql-server-ver15
|
||||||
bool is_retriable = e.state().starts_with("08") || e.state().starts_with("24") || e.state().starts_with("25");
|
bool is_retriable = e.state().starts_with("08") || e.state().starts_with("24") || e.state().starts_with("25");
|
||||||
LOG_ERROR(
|
LOG_ERROR(
|
||||||
&Poco::Logger::get("ODBCConnection"),
|
getLogger("ODBCConnection"),
|
||||||
"ODBC query failed with error: {}, state: {}, native code: {}{}",
|
"ODBC query failed with error: {}, state: {}, native code: {}{}",
|
||||||
e.what(), e.state(), e.native(), is_retriable ? ", will retry" : "");
|
e.what(), e.state(), e.native(), is_retriable ? ", will retry" : "");
|
||||||
|
|
||||||
|
@ -19,7 +19,7 @@ class SchemaAllowedHandler : public HTTPRequestHandler, WithContext
|
|||||||
public:
|
public:
|
||||||
SchemaAllowedHandler(size_t keep_alive_timeout_, ContextPtr context_)
|
SchemaAllowedHandler(size_t keep_alive_timeout_, ContextPtr context_)
|
||||||
: WithContext(context_)
|
: WithContext(context_)
|
||||||
, log(&Poco::Logger::get("SchemaAllowedHandler"))
|
, log(getLogger("SchemaAllowedHandler"))
|
||||||
, keep_alive_timeout(keep_alive_timeout_)
|
, keep_alive_timeout(keep_alive_timeout_)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
@ -27,7 +27,7 @@ public:
|
|||||||
void handleRequest(HTTPServerRequest & request, HTTPServerResponse & response, const ProfileEvents::Event & write_event) override;
|
void handleRequest(HTTPServerRequest & request, HTTPServerResponse & response, const ProfileEvents::Event & write_event) override;
|
||||||
|
|
||||||
private:
|
private:
|
||||||
Poco::Logger * log;
|
LoggerPtr log;
|
||||||
size_t keep_alive_timeout;
|
size_t keep_alive_timeout;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -26,7 +26,7 @@ std::string getIdentifierQuote(nanodbc::ConnectionHolderPtr connection_holder)
|
|||||||
}
|
}
|
||||||
catch (...)
|
catch (...)
|
||||||
{
|
{
|
||||||
LOG_WARNING(&Poco::Logger::get("ODBCGetIdentifierQuote"), "Cannot fetch identifier quote. Default double quote is used. Reason: {}", getCurrentExceptionMessage(false));
|
LOG_WARNING(getLogger("ODBCGetIdentifierQuote"), "Cannot fetch identifier quote. Default double quote is used. Reason: {}", getCurrentExceptionMessage(false));
|
||||||
return "\"";
|
return "\"";
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -365,7 +365,7 @@ void Server::createServer(
|
|||||||
namespace
|
namespace
|
||||||
{
|
{
|
||||||
|
|
||||||
void setOOMScore(int value, Poco::Logger * log)
|
void setOOMScore(int value, LoggerRawPtr log)
|
||||||
{
|
{
|
||||||
try
|
try
|
||||||
{
|
{
|
||||||
@ -450,7 +450,7 @@ void checkForUsersNotInMainConfig(
|
|||||||
const Poco::Util::AbstractConfiguration & config,
|
const Poco::Util::AbstractConfiguration & config,
|
||||||
const std::string & config_path,
|
const std::string & config_path,
|
||||||
const std::string & users_config_path,
|
const std::string & users_config_path,
|
||||||
Poco::Logger * log)
|
LoggerPtr log)
|
||||||
{
|
{
|
||||||
if (config.getBool("skip_check_for_incorrect_settings", false))
|
if (config.getBool("skip_check_for_incorrect_settings", false))
|
||||||
return;
|
return;
|
||||||
@ -1748,7 +1748,6 @@ try
|
|||||||
LOG_INFO(log, "Stopping AsyncLoader.");
|
LOG_INFO(log, "Stopping AsyncLoader.");
|
||||||
|
|
||||||
// Waits for all currently running jobs to finish and do not run any other pending jobs.
|
// Waits for all currently running jobs to finish and do not run any other pending jobs.
|
||||||
// Pending jobs will be canceled and destructed later by `load_metadata_tasks` dtor.
|
|
||||||
global_context->getAsyncLoader().stop();
|
global_context->getAsyncLoader().stop();
|
||||||
);
|
);
|
||||||
|
|
||||||
@ -2491,7 +2490,7 @@ void Server::stopServers(
|
|||||||
const ServerType & server_type
|
const ServerType & server_type
|
||||||
) const
|
) const
|
||||||
{
|
{
|
||||||
Poco::Logger * log = &logger();
|
LoggerRawPtr log = &logger();
|
||||||
|
|
||||||
/// Remove servers once all their connections are closed
|
/// Remove servers once all their connections are closed
|
||||||
auto check_server = [&log](const char prefix[], auto & server)
|
auto check_server = [&log](const char prefix[], auto & server)
|
||||||
@ -2530,7 +2529,7 @@ void Server::updateServers(
|
|||||||
std::vector<ProtocolServerAdapter> & servers,
|
std::vector<ProtocolServerAdapter> & servers,
|
||||||
std::vector<ProtocolServerAdapter> & servers_to_start_before_tables)
|
std::vector<ProtocolServerAdapter> & servers_to_start_before_tables)
|
||||||
{
|
{
|
||||||
Poco::Logger * log = &logger();
|
LoggerRawPtr log = &logger();
|
||||||
|
|
||||||
const auto listen_hosts = getListenHosts(config);
|
const auto listen_hosts = getListenHosts(config);
|
||||||
const auto interserver_listen_hosts = getInterserverListenHosts(config);
|
const auto interserver_listen_hosts = getInterserverListenHosts(config);
|
||||||
|
@ -60,10 +60,16 @@
|
|||||||
/// If it is hosted on server, assume that it is the address of ClickHouse.
|
/// If it is hosted on server, assume that it is the address of ClickHouse.
|
||||||
if (location.protocol != 'file:') {
|
if (location.protocol != 'file:') {
|
||||||
host = location.origin;
|
host = location.origin;
|
||||||
user = 'default';
|
|
||||||
add_http_cors_header = false;
|
add_http_cors_header = false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (window.location.search) {
|
||||||
|
const params = new URLSearchParams(window.location.search);
|
||||||
|
if (params.has('host')) { host = params.get('host'); }
|
||||||
|
if (params.has('user')) { user = params.get('user'); }
|
||||||
|
if (params.has('password')) { password = params.get('password'); }
|
||||||
|
}
|
||||||
|
|
||||||
let map = L.map('space', {
|
let map = L.map('space', {
|
||||||
crs: L.CRS.Simple,
|
crs: L.CRS.Simple,
|
||||||
center: [-512, 512],
|
center: [-512, 512],
|
||||||
|
@ -1562,6 +1562,10 @@
|
|||||||
<max_entry_size_in_rows>30000000</max_entry_size_in_rows>
|
<max_entry_size_in_rows>30000000</max_entry_size_in_rows>
|
||||||
</query_cache>
|
</query_cache>
|
||||||
|
|
||||||
|
<backups>
|
||||||
|
<allowed_path>backups</allowed_path>
|
||||||
|
</backups>
|
||||||
|
|
||||||
<!-- This allows to disable exposing addresses in stack traces for security reasons.
|
<!-- This allows to disable exposing addresses in stack traces for security reasons.
|
||||||
Please be aware that it does not improve security much, but makes debugging much harder.
|
Please be aware that it does not improve security much, but makes debugging much harder.
|
||||||
The addresses that are small offsets from zero will be displayed nevertheless to show nullptr dereferences.
|
The addresses that are small offsets from zero will be displayed nevertheless to show nullptr dereferences.
|
||||||
|
@ -5,6 +5,7 @@
|
|||||||
<title>ClickHouse Dashboard</title>
|
<title>ClickHouse Dashboard</title>
|
||||||
<link rel="icon" href="data:image/svg+xml;base64,PHN2ZyB4bWxucz0iaHR0cDovL3d3dy53My5vcmcvMjAwMC9zdmciIHdpZHRoPSI1NCIgaGVpZ2h0PSI0OCIgdmlld0JveD0iMCAwIDkgOCI+PHN0eWxlPi5ve2ZpbGw6I2ZjMH0ucntmaWxsOnJlZH08L3N0eWxlPjxwYXRoIGQ9Ik0wLDcgaDEgdjEgaC0xIHoiIGNsYXNzPSJyIi8+PHBhdGggZD0iTTAsMCBoMSB2NyBoLTEgeiIgY2xhc3M9Im8iLz48cGF0aCBkPSJNMiwwIGgxIHY4IGgtMSB6IiBjbGFzcz0ibyIvPjxwYXRoIGQ9Ik00LDAgaDEgdjggaC0xIHoiIGNsYXNzPSJvIi8+PHBhdGggZD0iTTYsMCBoMSB2OCBoLTEgeiIgY2xhc3M9Im8iLz48cGF0aCBkPSJNOCwzLjI1IGgxIHYxLjUgaC0xIHoiIGNsYXNzPSJvIi8+PC9zdmc+">
|
<link rel="icon" href="data:image/svg+xml;base64,PHN2ZyB4bWxucz0iaHR0cDovL3d3dy53My5vcmcvMjAwMC9zdmciIHdpZHRoPSI1NCIgaGVpZ2h0PSI0OCIgdmlld0JveD0iMCAwIDkgOCI+PHN0eWxlPi5ve2ZpbGw6I2ZjMH0ucntmaWxsOnJlZH08L3N0eWxlPjxwYXRoIGQ9Ik0wLDcgaDEgdjEgaC0xIHoiIGNsYXNzPSJyIi8+PHBhdGggZD0iTTAsMCBoMSB2NyBoLTEgeiIgY2xhc3M9Im8iLz48cGF0aCBkPSJNMiwwIGgxIHY4IGgtMSB6IiBjbGFzcz0ibyIvPjxwYXRoIGQ9Ik00LDAgaDEgdjggaC0xIHoiIGNsYXNzPSJvIi8+PHBhdGggZD0iTTYsMCBoMSB2OCBoLTEgeiIgY2xhc3M9Im8iLz48cGF0aCBkPSJNOCwzLjI1IGgxIHYxLjUgaC0xIHoiIGNsYXNzPSJvIi8+PC9zdmc+">
|
||||||
<script src="https://cdn.jsdelivr.net/npm/uplot@1.6.21/dist/uPlot.iife.min.js"></script>
|
<script src="https://cdn.jsdelivr.net/npm/uplot@1.6.21/dist/uPlot.iife.min.js"></script>
|
||||||
|
<script src="https://cdn.jsdelivr.net/npm/lz-string@1.5.0/libs/lz-string.min.js"></script>
|
||||||
<style>
|
<style>
|
||||||
:root {
|
:root {
|
||||||
--color: black;
|
--color: black;
|
||||||
@ -484,7 +485,6 @@
|
|||||||
*
|
*
|
||||||
* TODO:
|
* TODO:
|
||||||
* - zoom on the graphs should work on touch devices;
|
* - zoom on the graphs should work on touch devices;
|
||||||
* - compress the state for URL's #hash;
|
|
||||||
* - footer with "about" or a link to source code;
|
* - footer with "about" or a link to source code;
|
||||||
* - allow to configure a table on a server to save the dashboards;
|
* - allow to configure a table on a server to save the dashboards;
|
||||||
* - if a query returned one value, display this value instead of a diagram;
|
* - if a query returned one value, display this value instead of a diagram;
|
||||||
@ -1384,7 +1384,7 @@ document.getElementById('params').onsubmit = function(event) {
|
|||||||
function saveState() {
|
function saveState() {
|
||||||
const state = { host, user, queries, params, search_query, customized };
|
const state = { host, user, queries, params, search_query, customized };
|
||||||
history.pushState(state, '',
|
history.pushState(state, '',
|
||||||
window.location.pathname + (window.location.search || '') + '#' + btoa(JSON.stringify(state)));
|
window.location.pathname + (window.location.search || '') + '#' + LZString.compressToEncodedURIComponent(JSON.stringify(state)));
|
||||||
}
|
}
|
||||||
|
|
||||||
async function searchQueries() {
|
async function searchQueries() {
|
||||||
@ -1450,7 +1450,7 @@ window.onpopstate = function(event) {
|
|||||||
if (window.location.hash) {
|
if (window.location.hash) {
|
||||||
try {
|
try {
|
||||||
let search_query_, customized_;
|
let search_query_, customized_;
|
||||||
({host, user, queries, params, search_query_, customized_} = JSON.parse(atob(window.location.hash.substring(1))));
|
({host, user, queries, params, search_query_, customized_} = JSON.parse(LZString.decompressFromEncodedURIComponent(window.location.hash.substring(1))));
|
||||||
// For compatibility with old URLs' hashes
|
// For compatibility with old URLs' hashes
|
||||||
search_query = search_query_ !== undefined ? search_query_ : search_query;
|
search_query = search_query_ !== undefined ? search_query_ : search_query;
|
||||||
customized = customized_ !== undefined ? customized_ : true;
|
customized = customized_ !== undefined ? customized_ : true;
|
||||||
|
1
programs/server/js/lz-string.js
Normal file
1
programs/server/js/lz-string.js
Normal file
@ -0,0 +1 @@
|
|||||||
|
var LZString=function(){var r=String.fromCharCode,o="ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/=",n="ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+-$",e={};function t(r,o){if(!e[r]){e[r]={};for(var n=0;n<r.length;n++)e[r][r.charAt(n)]=n}return e[r][o]}var i={compressToBase64:function(r){if(null==r)return"";var n=i._compress(r,6,function(r){return o.charAt(r)});switch(n.length%4){default:case 0:return n;case 1:return n+"===";case 2:return n+"==";case 3:return n+"="}},decompressFromBase64:function(r){return null==r?"":""==r?null:i._decompress(r.length,32,function(n){return t(o,r.charAt(n))})},compressToUTF16:function(o){return null==o?"":i._compress(o,15,function(o){return r(o+32)})+" "},decompressFromUTF16:function(r){return null==r?"":""==r?null:i._decompress(r.length,16384,function(o){return r.charCodeAt(o)-32})},compressToUint8Array:function(r){for(var o=i.compress(r),n=new Uint8Array(2*o.length),e=0,t=o.length;e<t;e++){var s=o.charCodeAt(e);n[2*e]=s>>>8,n[2*e+1]=s%256}return n},decompressFromUint8Array:function(o){if(null==o)return i.decompress(o);for(var n=new Array(o.length/2),e=0,t=n.length;e<t;e++)n[e]=256*o[2*e]+o[2*e+1];var s=[];return n.forEach(function(o){s.push(r(o))}),i.decompress(s.join(""))},compressToEncodedURIComponent:function(r){return null==r?"":i._compress(r,6,function(r){return n.charAt(r)})},decompressFromEncodedURIComponent:function(r){return null==r?"":""==r?null:(r=r.replace(/ /g,"+"),i._decompress(r.length,32,function(o){return t(n,r.charAt(o))}))},compress:function(o){return i._compress(o,16,function(o){return r(o)})},_compress:function(r,o,n){if(null==r)return"";var e,t,i,s={},u={},a="",p="",c="",l=2,f=3,h=2,d=[],m=0,v=0;for(i=0;i<r.length;i+=1)if(a=r.charAt(i),Object.prototype.hasOwnProperty.call(s,a)||(s[a]=f++,u[a]=!0),p=c+a,Object.prototype.hasOwnProperty.call(s,p))c=p;else{if(Object.prototype.hasOwnProperty.call(u,c)){if(c.charCodeAt(0)<256){for(e=0;e<h;e++)m<<=1,v==o-1?(v=0,d.push(n(m)),m=0):v++;for(t=c.charCodeAt(0),e=0;e<8;e++)m=m<<1|1&t,v==o-1?(v=0,d.push(n(m)),m=0):v++,t>>=1}else{for(t=1,e=0;e<h;e++)m=m<<1|t,v==o-1?(v=0,d.push(n(m)),m=0):v++,t=0;for(t=c.charCodeAt(0),e=0;e<16;e++)m=m<<1|1&t,v==o-1?(v=0,d.push(n(m)),m=0):v++,t>>=1}0==--l&&(l=Math.pow(2,h),h++),delete u[c]}else for(t=s[c],e=0;e<h;e++)m=m<<1|1&t,v==o-1?(v=0,d.push(n(m)),m=0):v++,t>>=1;0==--l&&(l=Math.pow(2,h),h++),s[p]=f++,c=String(a)}if(""!==c){if(Object.prototype.hasOwnProperty.call(u,c)){if(c.charCodeAt(0)<256){for(e=0;e<h;e++)m<<=1,v==o-1?(v=0,d.push(n(m)),m=0):v++;for(t=c.charCodeAt(0),e=0;e<8;e++)m=m<<1|1&t,v==o-1?(v=0,d.push(n(m)),m=0):v++,t>>=1}else{for(t=1,e=0;e<h;e++)m=m<<1|t,v==o-1?(v=0,d.push(n(m)),m=0):v++,t=0;for(t=c.charCodeAt(0),e=0;e<16;e++)m=m<<1|1&t,v==o-1?(v=0,d.push(n(m)),m=0):v++,t>>=1}0==--l&&(l=Math.pow(2,h),h++),delete u[c]}else for(t=s[c],e=0;e<h;e++)m=m<<1|1&t,v==o-1?(v=0,d.push(n(m)),m=0):v++,t>>=1;0==--l&&(l=Math.pow(2,h),h++)}for(t=2,e=0;e<h;e++)m=m<<1|1&t,v==o-1?(v=0,d.push(n(m)),m=0):v++,t>>=1;for(;;){if(m<<=1,v==o-1){d.push(n(m));break}v++}return d.join("")},decompress:function(r){return null==r?"":""==r?null:i._decompress(r.length,32768,function(o){return r.charCodeAt(o)})},_decompress:function(o,n,e){var t,i,s,u,a,p,c,l=[],f=4,h=4,d=3,m="",v=[],g={val:e(0),position:n,index:1};for(t=0;t<3;t+=1)l[t]=t;for(s=0,a=Math.pow(2,2),p=1;p!=a;)u=g.val&g.position,g.position>>=1,0==g.position&&(g.position=n,g.val=e(g.index++)),s|=(u>0?1:0)*p,p<<=1;switch(s){case 0:for(s=0,a=Math.pow(2,8),p=1;p!=a;)u=g.val&g.position,g.position>>=1,0==g.position&&(g.position=n,g.val=e(g.index++)),s|=(u>0?1:0)*p,p<<=1;c=r(s);break;case 1:for(s=0,a=Math.pow(2,16),p=1;p!=a;)u=g.val&g.position,g.position>>=1,0==g.position&&(g.position=n,g.val=e(g.index++)),s|=(u>0?1:0)*p,p<<=1;c=r(s);break;case 2:return""}for(l[3]=c,i=c,v.push(c);;){if(g.index>o)return"";for(s=0,a=Math.pow(2,d),p=1;p!=a;)u=g.val&g.position,g.position>>=1,0==g.position&&(g.position=n,g.val=e(g.index++)),s|=(u>0?1:0)*p,p<<=1;switch(c=s){case 0:for(s=0,a=Math.pow(2,8),p=1;p!=a;)u=g.val&g.position,g.position>>=1,0==g.position&&(g.position=n,g.val=e(g.index++)),s|=(u>0?1:0)*p,p<<=1;l[h++]=r(s),c=h-1,f--;break;case 1:for(s=0,a=Math.pow(2,16),p=1;p!=a;)u=g.val&g.position,g.position>>=1,0==g.position&&(g.position=n,g.val=e(g.index++)),s|=(u>0?1:0)*p,p<<=1;l[h++]=r(s),c=h-1,f--;break;case 2:return v.join("")}if(0==f&&(f=Math.pow(2,d),d++),l[c])m=l[c];else{if(c!==h)return null;m=i+i.charAt(0)}v.push(m),l[h++]=i+m.charAt(0),i=m,0==--f&&(f=Math.pow(2,d),d++)}}};return i}();"function"==typeof define&&define.amd?define(function(){return LZString}):"undefined"!=typeof module&&null!=module?module.exports=LZString:"undefined"!=typeof angular&&null!=angular&&angular.module("LZString",[]).factory("LZString",function(){return LZString});
|
@ -993,7 +993,16 @@
|
|||||||
function renderError(response)
|
function renderError(response)
|
||||||
{
|
{
|
||||||
clear();
|
clear();
|
||||||
document.getElementById('error').innerText = response ? response : "No response.";
|
|
||||||
|
let message = response;
|
||||||
|
try {
|
||||||
|
let json = JSON.parse(response);
|
||||||
|
if (json.exception) {
|
||||||
|
message = json.exception;
|
||||||
|
}
|
||||||
|
} catch (e) {}
|
||||||
|
|
||||||
|
document.getElementById('error').innerText = message ? message : "No response.";
|
||||||
document.getElementById('error').style.display = 'block';
|
document.getElementById('error').style.display = 'block';
|
||||||
document.getElementById('logo-container').style.display = 'none';
|
document.getElementById('logo-container').style.display = 'none';
|
||||||
}
|
}
|
||||||
|
@ -13,7 +13,7 @@ serde_json = "1.0"
|
|||||||
crate-type = ["staticlib"]
|
crate-type = ["staticlib"]
|
||||||
|
|
||||||
[profile.release]
|
[profile.release]
|
||||||
debug = true
|
debug = false
|
||||||
|
|
||||||
[profile.release-thinlto]
|
[profile.release-thinlto]
|
||||||
inherits = "release"
|
inherits = "release"
|
||||||
|
@ -17,7 +17,7 @@ cxx-build = "1.0.83"
|
|||||||
crate-type = ["staticlib"]
|
crate-type = ["staticlib"]
|
||||||
|
|
||||||
[profile.release]
|
[profile.release]
|
||||||
debug = true
|
debug = false
|
||||||
|
|
||||||
[profile.release-thinlto]
|
[profile.release-thinlto]
|
||||||
inherits = "release"
|
inherits = "release"
|
||||||
|
@ -47,7 +47,7 @@ namespace
|
|||||||
const Poco::Util::AbstractConfiguration & config,
|
const Poco::Util::AbstractConfiguration & config,
|
||||||
const std::string & config_path,
|
const std::string & config_path,
|
||||||
const std::string & users_config_path,
|
const std::string & users_config_path,
|
||||||
Poco::Logger * log)
|
LoggerPtr log)
|
||||||
{
|
{
|
||||||
if (config.getBool("skip_check_for_incorrect_settings", false))
|
if (config.getBool("skip_check_for_incorrect_settings", false))
|
||||||
return;
|
return;
|
||||||
|
@ -443,7 +443,7 @@ public:
|
|||||||
optimizeTree();
|
optimizeTree();
|
||||||
}
|
}
|
||||||
|
|
||||||
void logTree(Poco::Logger * log, const String & title) const
|
void logTree(LoggerPtr log, const String & title) const
|
||||||
{
|
{
|
||||||
LOG_TRACE(log, "Tree({}): level={}, name={}, flags={}, min_flags={}, max_flags={}, num_children={}",
|
LOG_TRACE(log, "Tree({}): level={}, name={}, flags={}, min_flags={}, max_flags={}, num_children={}",
|
||||||
title, level, node_name ? *node_name : "NULL", flags.toString(),
|
title, level, node_name ? *node_name : "NULL", flags.toString(),
|
||||||
@ -1158,7 +1158,7 @@ AccessRights AccessRights::getFullAccess()
|
|||||||
|
|
||||||
void AccessRights::logTree() const
|
void AccessRights::logTree() const
|
||||||
{
|
{
|
||||||
auto * log = &Poco::Logger::get("AccessRights");
|
auto log = getLogger("AccessRights");
|
||||||
if (root)
|
if (root)
|
||||||
{
|
{
|
||||||
root->logTree(log, "");
|
root->logTree(log, "");
|
||||||
|
@ -73,7 +73,7 @@ namespace
|
|||||||
return checkPasswordDoubleSHA1MySQL(scramble, scrambled_password, Util::encodeDoubleSHA1(password_plaintext));
|
return checkPasswordDoubleSHA1MySQL(scramble, scrambled_password, Util::encodeDoubleSHA1(password_plaintext));
|
||||||
}
|
}
|
||||||
|
|
||||||
#if USE_SSL
|
#if USE_SSH
|
||||||
bool checkSshSignature(const std::vector<ssh::SSHKey> & keys, std::string_view signature, std::string_view original)
|
bool checkSshSignature(const std::vector<ssh::SSHKey> & keys, std::string_view signature, std::string_view original)
|
||||||
{
|
{
|
||||||
for (const auto & key: keys)
|
for (const auto & key: keys)
|
||||||
@ -243,7 +243,7 @@ bool Authentication::areCredentialsValid(
|
|||||||
throw Authentication::Require<SSLCertificateCredentials>("ClickHouse X.509 Authentication");
|
throw Authentication::Require<SSLCertificateCredentials>("ClickHouse X.509 Authentication");
|
||||||
|
|
||||||
case AuthenticationType::SSH_KEY:
|
case AuthenticationType::SSH_KEY:
|
||||||
#if USE_SSL
|
#if USE_SSH
|
||||||
return checkSshSignature(auth_data.getSSHKeys(), ssh_credentials->getSignature(), ssh_credentials->getOriginal());
|
return checkSshSignature(auth_data.getSSHKeys(), ssh_credentials->getSignature(), ssh_credentials->getOriginal());
|
||||||
#else
|
#else
|
||||||
throw Exception(ErrorCodes::SUPPORT_IS_DISABLED, "SSH is disabled, because ClickHouse is built without OpenSSL");
|
throw Exception(ErrorCodes::SUPPORT_IS_DISABLED, "SSH is disabled, because ClickHouse is built without OpenSSL");
|
||||||
|
@ -320,7 +320,7 @@ std::shared_ptr<ASTAuthenticationData> AuthenticationData::toAST() const
|
|||||||
}
|
}
|
||||||
case AuthenticationType::SSH_KEY:
|
case AuthenticationType::SSH_KEY:
|
||||||
{
|
{
|
||||||
#if USE_SSL
|
#if USE_SSH
|
||||||
for (const auto & key : getSSHKeys())
|
for (const auto & key : getSSHKeys())
|
||||||
node->children.push_back(std::make_shared<ASTPublicSSHKey>(key.getBase64(), key.getKeyType()));
|
node->children.push_back(std::make_shared<ASTPublicSSHKey>(key.getBase64(), key.getKeyType()));
|
||||||
|
|
||||||
@ -353,7 +353,7 @@ AuthenticationData AuthenticationData::fromAST(const ASTAuthenticationData & que
|
|||||||
/// For this type of authentication we have ASTPublicSSHKey as children for ASTAuthenticationData
|
/// For this type of authentication we have ASTPublicSSHKey as children for ASTAuthenticationData
|
||||||
if (query.type && query.type == AuthenticationType::SSH_KEY)
|
if (query.type && query.type == AuthenticationType::SSH_KEY)
|
||||||
{
|
{
|
||||||
#if USE_SSL
|
#if USE_SSH
|
||||||
AuthenticationData auth_data(*query.type);
|
AuthenticationData auth_data(*query.type);
|
||||||
std::vector<ssh::SSHKey> keys;
|
std::vector<ssh::SSHKey> keys;
|
||||||
|
|
||||||
|
@ -514,7 +514,7 @@ bool AllowedClientHosts::contains(const IPAddress & client_address) const
|
|||||||
throw;
|
throw;
|
||||||
/// Try to ignore DNS errors: if host cannot be resolved, skip it and try next.
|
/// Try to ignore DNS errors: if host cannot be resolved, skip it and try next.
|
||||||
LOG_WARNING(
|
LOG_WARNING(
|
||||||
&Poco::Logger::get("AddressPatterns"),
|
getLogger("AddressPatterns"),
|
||||||
"Failed to check if the allowed client hosts contain address {}. {}, code = {}",
|
"Failed to check if the allowed client hosts contain address {}. {}, code = {}",
|
||||||
client_address.toString(), e.displayText(), e.code());
|
client_address.toString(), e.displayText(), e.code());
|
||||||
return false;
|
return false;
|
||||||
@ -556,7 +556,7 @@ bool AllowedClientHosts::contains(const IPAddress & client_address) const
|
|||||||
throw;
|
throw;
|
||||||
/// Try to ignore DNS errors: if host cannot be resolved, skip it and try next.
|
/// Try to ignore DNS errors: if host cannot be resolved, skip it and try next.
|
||||||
LOG_WARNING(
|
LOG_WARNING(
|
||||||
&Poco::Logger::get("AddressPatterns"),
|
getLogger("AddressPatterns"),
|
||||||
"Failed to check if the allowed client hosts contain address {}. {}, code = {}",
|
"Failed to check if the allowed client hosts contain address {}. {}, code = {}",
|
||||||
client_address.toString(), e.displayText(), e.code());
|
client_address.toString(), e.displayText(), e.code());
|
||||||
return false;
|
return false;
|
||||||
|
@ -298,7 +298,7 @@ void ContextAccess::setUser(const UserPtr & user_) const
|
|||||||
}
|
}
|
||||||
|
|
||||||
user_name = user->getName();
|
user_name = user->getName();
|
||||||
trace_log = &Poco::Logger::get("ContextAccess (" + user_name + ")");
|
trace_log = getLogger("ContextAccess (" + user_name + ")");
|
||||||
|
|
||||||
std::vector<UUID> current_roles, current_roles_with_admin_option;
|
std::vector<UUID> current_roles, current_roles_with_admin_option;
|
||||||
if (params.use_default_roles)
|
if (params.use_default_roles)
|
||||||
|
@ -185,9 +185,10 @@ private:
|
|||||||
|
|
||||||
mutable std::atomic<bool> initialized = false; // can be removed after Bug 5504 is resolved
|
mutable std::atomic<bool> initialized = false; // can be removed after Bug 5504 is resolved
|
||||||
mutable std::atomic<bool> user_was_dropped = false;
|
mutable std::atomic<bool> user_was_dropped = false;
|
||||||
mutable std::atomic<Poco::Logger *> trace_log = nullptr;
|
|
||||||
|
|
||||||
mutable std::mutex mutex;
|
mutable std::mutex mutex;
|
||||||
|
/// TODO: Fix race
|
||||||
|
mutable LoggerPtr trace_log;
|
||||||
mutable UserPtr user TSA_GUARDED_BY(mutex);
|
mutable UserPtr user TSA_GUARDED_BY(mutex);
|
||||||
mutable String user_name TSA_GUARDED_BY(mutex);
|
mutable String user_name TSA_GUARDED_BY(mutex);
|
||||||
mutable scope_guard subscription_for_user_change TSA_GUARDED_BY(mutex);
|
mutable scope_guard subscription_for_user_change TSA_GUARDED_BY(mutex);
|
||||||
|
@ -47,7 +47,7 @@ namespace
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
AccessEntityPtr tryReadEntityFile(const String & file_path, Poco::Logger & log)
|
AccessEntityPtr tryReadEntityFile(const String & file_path, LoggerPtr log)
|
||||||
{
|
{
|
||||||
try
|
try
|
||||||
{
|
{
|
||||||
@ -55,7 +55,7 @@ namespace
|
|||||||
}
|
}
|
||||||
catch (...)
|
catch (...)
|
||||||
{
|
{
|
||||||
tryLogCurrentException(&log);
|
tryLogCurrentException(log);
|
||||||
return nullptr;
|
return nullptr;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -378,7 +378,7 @@ void DiskAccessStorage::reloadAllAndRebuildLists()
|
|||||||
continue;
|
continue;
|
||||||
|
|
||||||
const auto access_entity_file_path = getEntityFilePath(directory_path, id);
|
const auto access_entity_file_path = getEntityFilePath(directory_path, id);
|
||||||
auto entity = tryReadEntityFile(access_entity_file_path, *getLogger());
|
auto entity = tryReadEntityFile(access_entity_file_path, getLogger());
|
||||||
if (!entity)
|
if (!entity)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
|
@ -279,7 +279,7 @@ void ExternalAuthenticators::reset()
|
|||||||
resetImpl();
|
resetImpl();
|
||||||
}
|
}
|
||||||
|
|
||||||
void ExternalAuthenticators::setConfiguration(const Poco::Util::AbstractConfiguration & config, Poco::Logger * log)
|
void ExternalAuthenticators::setConfiguration(const Poco::Util::AbstractConfiguration & config, LoggerPtr log)
|
||||||
{
|
{
|
||||||
std::lock_guard lock(mutex);
|
std::lock_guard lock(mutex);
|
||||||
resetImpl();
|
resetImpl();
|
||||||
|
@ -36,7 +36,7 @@ class ExternalAuthenticators
|
|||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
void reset();
|
void reset();
|
||||||
void setConfiguration(const Poco::Util::AbstractConfiguration & config, Poco::Logger * log);
|
void setConfiguration(const Poco::Util::AbstractConfiguration & config, LoggerPtr log);
|
||||||
|
|
||||||
// The name and readiness of the credentials must be verified before calling these.
|
// The name and readiness of the credentials must be verified before calling these.
|
||||||
bool checkLDAPCredentials(const String & server, const BasicCredentials & credentials,
|
bool checkLDAPCredentials(const String & server, const BasicCredentials & credentials,
|
||||||
|
@ -328,7 +328,7 @@ void GSSAcceptorContext::initHandles()
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
String GSSAcceptorContext::processToken(const String & input_token, Poco::Logger * log)
|
String GSSAcceptorContext::processToken(const String & input_token, LoggerPtr log)
|
||||||
{
|
{
|
||||||
std::lock_guard lock(gss_global_mutex);
|
std::lock_guard lock(gss_global_mutex);
|
||||||
|
|
||||||
@ -455,7 +455,7 @@ void GSSAcceptorContext::initHandles()
|
|||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
String GSSAcceptorContext::processToken(const String &, Poco::Logger *)
|
String GSSAcceptorContext::processToken(const String &, LoggerPtr)
|
||||||
{
|
{
|
||||||
throw Exception(ErrorCodes::FEATURE_IS_NOT_ENABLED_AT_BUILD_TIME, "ClickHouse was built without GSS-API/Kerberos support");
|
throw Exception(ErrorCodes::FEATURE_IS_NOT_ENABLED_AT_BUILD_TIME, "ClickHouse was built without GSS-API/Kerberos support");
|
||||||
}
|
}
|
||||||
|
@ -3,6 +3,7 @@
|
|||||||
#include "config.h"
|
#include "config.h"
|
||||||
|
|
||||||
#include <Access/Credentials.h>
|
#include <Access/Credentials.h>
|
||||||
|
#include <Common/Logger.h>
|
||||||
#include <base/types.h>
|
#include <base/types.h>
|
||||||
#include <memory>
|
#include <memory>
|
||||||
|
|
||||||
@ -42,7 +43,7 @@ public:
|
|||||||
|
|
||||||
const String & getRealm() const;
|
const String & getRealm() const;
|
||||||
bool isFailed() const;
|
bool isFailed() const;
|
||||||
MAYBE_NORETURN String processToken(const String & input_token, Poco::Logger * log);
|
MAYBE_NORETURN String processToken(const String & input_token, LoggerPtr log);
|
||||||
|
|
||||||
private:
|
private:
|
||||||
void reset();
|
void reset();
|
||||||
|
@ -6,6 +6,7 @@
|
|||||||
#include <Backups/BackupEntriesCollector.h>
|
#include <Backups/BackupEntriesCollector.h>
|
||||||
#include <Common/Exception.h>
|
#include <Common/Exception.h>
|
||||||
#include <Common/quoteString.h>
|
#include <Common/quoteString.h>
|
||||||
|
#include <Common/callOnce.h>
|
||||||
#include <IO/WriteHelpers.h>
|
#include <IO/WriteHelpers.h>
|
||||||
#include <Interpreters/Context.h>
|
#include <Interpreters/Context.h>
|
||||||
#include <Poco/UUIDGenerator.h>
|
#include <Poco/UUIDGenerator.h>
|
||||||
@ -615,7 +616,7 @@ UUID IAccessStorage::generateRandomID()
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void IAccessStorage::clearConflictsInEntitiesList(std::vector<std::pair<UUID, AccessEntityPtr>> & entities, const Poco::Logger * log_)
|
void IAccessStorage::clearConflictsInEntitiesList(std::vector<std::pair<UUID, AccessEntityPtr>> & entities, const LoggerPtr log_)
|
||||||
{
|
{
|
||||||
std::unordered_map<UUID, size_t> positions_by_id;
|
std::unordered_map<UUID, size_t> positions_by_id;
|
||||||
std::unordered_map<std::string_view, size_t> positions_by_type_and_name[static_cast<size_t>(AccessEntityType::MAX)];
|
std::unordered_map<std::string_view, size_t> positions_by_type_and_name[static_cast<size_t>(AccessEntityType::MAX)];
|
||||||
@ -671,12 +672,13 @@ void IAccessStorage::clearConflictsInEntitiesList(std::vector<std::pair<UUID, Ac
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
Poco::Logger * IAccessStorage::getLogger() const
|
LoggerPtr IAccessStorage::getLogger() const
|
||||||
{
|
{
|
||||||
Poco::Logger * ptr = log.load();
|
callOnce(log_initialized, [&] {
|
||||||
if (!ptr)
|
log = ::getLogger("Access(" + storage_name + ")");
|
||||||
log.store(ptr = &Poco::Logger::get("Access(" + storage_name + ")"), std::memory_order_relaxed);
|
});
|
||||||
return ptr;
|
|
||||||
|
return log;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -6,6 +6,7 @@
|
|||||||
#include <Parsers/IParser.h>
|
#include <Parsers/IParser.h>
|
||||||
#include <Parsers/parseIdentifierOrStringLiteral.h>
|
#include <Parsers/parseIdentifierOrStringLiteral.h>
|
||||||
#include <Common/SettingsChanges.h>
|
#include <Common/SettingsChanges.h>
|
||||||
|
#include <Common/callOnce.h>
|
||||||
|
|
||||||
#include <atomic>
|
#include <atomic>
|
||||||
#include <functional>
|
#include <functional>
|
||||||
@ -225,9 +226,9 @@ protected:
|
|||||||
SettingsChanges & settings) const;
|
SettingsChanges & settings) const;
|
||||||
virtual bool isAddressAllowed(const User & user, const Poco::Net::IPAddress & address) const;
|
virtual bool isAddressAllowed(const User & user, const Poco::Net::IPAddress & address) const;
|
||||||
static UUID generateRandomID();
|
static UUID generateRandomID();
|
||||||
Poco::Logger * getLogger() const;
|
LoggerPtr getLogger() const;
|
||||||
static String formatEntityTypeWithName(AccessEntityType type, const String & name) { return AccessEntityTypeInfo::get(type).formatEntityNameWithType(name); }
|
static String formatEntityTypeWithName(AccessEntityType type, const String & name) { return AccessEntityTypeInfo::get(type).formatEntityNameWithType(name); }
|
||||||
static void clearConflictsInEntitiesList(std::vector<std::pair<UUID, AccessEntityPtr>> & entities, const Poco::Logger * log_);
|
static void clearConflictsInEntitiesList(std::vector<std::pair<UUID, AccessEntityPtr>> & entities, const LoggerPtr log_);
|
||||||
[[noreturn]] void throwNotFound(const UUID & id) const;
|
[[noreturn]] void throwNotFound(const UUID & id) const;
|
||||||
[[noreturn]] void throwNotFound(AccessEntityType type, const String & name) const;
|
[[noreturn]] void throwNotFound(AccessEntityType type, const String & name) const;
|
||||||
[[noreturn]] static void throwBadCast(const UUID & id, AccessEntityType type, const String & name, AccessEntityType required_type);
|
[[noreturn]] static void throwBadCast(const UUID & id, AccessEntityType type, const String & name, AccessEntityType required_type);
|
||||||
@ -246,7 +247,9 @@ protected:
|
|||||||
|
|
||||||
private:
|
private:
|
||||||
const String storage_name;
|
const String storage_name;
|
||||||
mutable std::atomic<Poco::Logger *> log = nullptr;
|
|
||||||
|
mutable OnceFlag log_initialized;
|
||||||
|
mutable LoggerPtr log = nullptr;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
|
@ -63,7 +63,7 @@ String KerberosInit::fmtError(krb5_error_code code) const
|
|||||||
|
|
||||||
void KerberosInit::init(const String & keytab_file, const String & principal, const String & cache_name)
|
void KerberosInit::init(const String & keytab_file, const String & principal, const String & cache_name)
|
||||||
{
|
{
|
||||||
auto * log = &Poco::Logger::get("KerberosInit");
|
auto log = getLogger("KerberosInit");
|
||||||
LOG_TRACE(log,"Trying to authenticate with Kerberos v5");
|
LOG_TRACE(log,"Trying to authenticate with Kerberos v5");
|
||||||
|
|
||||||
krb5_error_code ret;
|
krb5_error_code ret;
|
||||||
|
@ -532,7 +532,7 @@ LDAPClient::SearchResults LDAPClient::search(const SearchParams & search_params)
|
|||||||
|
|
||||||
for (size_t i = 0; referrals[i]; ++i)
|
for (size_t i = 0; referrals[i]; ++i)
|
||||||
{
|
{
|
||||||
LOG_WARNING(&Poco::Logger::get("LDAPClient"), "Received reference during LDAP search but not following it: {}", referrals[i]);
|
LOG_WARNING(getLogger("LDAPClient"), "Received reference during LDAP search but not following it: {}", referrals[i]);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -91,7 +91,7 @@ void RowPolicyCache::PolicyInfo::setPolicy(const RowPolicyPtr & policy_)
|
|||||||
catch (...)
|
catch (...)
|
||||||
{
|
{
|
||||||
tryLogCurrentException(
|
tryLogCurrentException(
|
||||||
&Poco::Logger::get("RowPolicy"),
|
getLogger("RowPolicy"),
|
||||||
String("Could not parse the condition ") + toString(filter_type) + " of row policy "
|
String("Could not parse the condition ") + toString(filter_type) + " of row policy "
|
||||||
+ backQuote(policy->getName()));
|
+ backQuote(policy->getName()));
|
||||||
}
|
}
|
||||||
|
@ -37,7 +37,7 @@ SettingsAuthResponseParser::parse(const Poco::Net::HTTPResponse & response, std:
|
|||||||
}
|
}
|
||||||
catch (...)
|
catch (...)
|
||||||
{
|
{
|
||||||
LOG_INFO(&Poco::Logger::get("HTTPAuthentication"), "Failed to parse settings from authentication response. Skip it.");
|
LOG_INFO(getLogger("HTTPAuthentication"), "Failed to parse settings from authentication response. Skip it.");
|
||||||
}
|
}
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
@ -209,7 +209,7 @@ namespace
|
|||||||
}
|
}
|
||||||
else if (has_ssh_keys)
|
else if (has_ssh_keys)
|
||||||
{
|
{
|
||||||
#if USE_SSL
|
#if USE_SSH
|
||||||
user->auth_data = AuthenticationData{AuthenticationType::SSH_KEY};
|
user->auth_data = AuthenticationData{AuthenticationType::SSH_KEY};
|
||||||
|
|
||||||
Poco::Util::AbstractConfiguration::Keys entries;
|
Poco::Util::AbstractConfiguration::Keys entries;
|
||||||
|
@ -440,7 +440,7 @@ struct GroupArrayNodeGeneral : public GroupArrayNodeBase<GroupArrayNodeGeneral>
|
|||||||
return node;
|
return node;
|
||||||
}
|
}
|
||||||
|
|
||||||
void insertInto(IColumn & column) { column.deserializeAndInsertFromArena(data()); }
|
void insertInto(IColumn & column) { std::ignore = column.deserializeAndInsertFromArena(data()); }
|
||||||
};
|
};
|
||||||
|
|
||||||
template <typename Node, bool has_sampler>
|
template <typename Node, bool has_sampler>
|
||||||
|
@ -10,12 +10,28 @@ struct Settings;
|
|||||||
|
|
||||||
void registerAggregateFunctionNothing(AggregateFunctionFactory & factory)
|
void registerAggregateFunctionNothing(AggregateFunctionFactory & factory)
|
||||||
{
|
{
|
||||||
factory.registerFunction("nothing", [](const std::string & name, const DataTypes & argument_types, const Array & parameters, const Settings *)
|
factory.registerFunction(NameAggregateFunctionNothing::name,
|
||||||
{
|
[](const std::string & name, const DataTypes & argument_types, const Array & parameters, const Settings *)
|
||||||
assertNoParameters(name, parameters);
|
{
|
||||||
|
assertNoParameters(name, parameters);
|
||||||
|
return std::make_shared<AggregateFunctionNothing>(argument_types, parameters);
|
||||||
|
});
|
||||||
|
|
||||||
auto result_type = argument_types.empty() ? std::make_shared<DataTypeNullable>(std::make_shared<DataTypeNothing>()) : argument_types.front();
|
factory.registerFunction(NameAggregateFunctionNothingNull::name,
|
||||||
return std::make_shared<AggregateFunctionNothing>(argument_types, parameters, result_type);
|
[](const std::string & name, const DataTypes & argument_types, const Array & parameters, const Settings *)
|
||||||
|
{
|
||||||
|
assertNoParameters(name, parameters);
|
||||||
|
return std::make_shared<AggregateFunctionNothingNull>(argument_types, parameters);
|
||||||
|
});
|
||||||
|
|
||||||
|
|
||||||
|
factory.registerFunction(NameAggregateFunctionNothingUInt64::name, {
|
||||||
|
[](const std::string & name, const DataTypes & argument_types, const Array & parameters, const Settings *)
|
||||||
|
{
|
||||||
|
assertNoParameters(name, parameters);
|
||||||
|
return std::make_shared<AggregateFunctionNothingUInt64>(argument_types, parameters);
|
||||||
|
},
|
||||||
|
AggregateFunctionProperties{ .returns_default_when_only_null = true }
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -6,7 +6,8 @@
|
|||||||
#include <AggregateFunctions/IAggregateFunction.h>
|
#include <AggregateFunctions/IAggregateFunction.h>
|
||||||
#include <IO/ReadHelpers.h>
|
#include <IO/ReadHelpers.h>
|
||||||
#include <IO/WriteHelpers.h>
|
#include <IO/WriteHelpers.h>
|
||||||
#include "DataTypes/IDataType.h"
|
#include <DataTypes/IDataType.h>
|
||||||
|
#include <DataTypes/DataTypesNumber.h>
|
||||||
|
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
@ -18,20 +19,42 @@ namespace ErrorCodes
|
|||||||
extern const int INCORRECT_DATA;
|
extern const int INCORRECT_DATA;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Returns the same type as the first argument
|
||||||
|
struct NameAggregateFunctionNothing { static constexpr auto name = "nothing"; };
|
||||||
|
/// Always returns Nullable(Nothing)
|
||||||
|
struct NameAggregateFunctionNothingNull { static constexpr auto name = "nothingNull"; };
|
||||||
|
/// Always returns UInt64
|
||||||
|
struct NameAggregateFunctionNothingUInt64 { static constexpr auto name = "nothingUInt64"; };
|
||||||
|
|
||||||
|
template <typename Name> class AggregateFunctionNothingImpl;
|
||||||
|
|
||||||
|
using AggregateFunctionNothing = AggregateFunctionNothingImpl<NameAggregateFunctionNothing>;
|
||||||
|
using AggregateFunctionNothingNull = AggregateFunctionNothingImpl<NameAggregateFunctionNothingNull>;
|
||||||
|
using AggregateFunctionNothingUInt64 = AggregateFunctionNothingImpl<NameAggregateFunctionNothingUInt64>;
|
||||||
|
|
||||||
|
|
||||||
/** Aggregate function that takes arbitrary number of arbitrary arguments and does nothing.
|
/** Aggregate function that takes arbitrary number of arbitrary arguments and does nothing.
|
||||||
*/
|
*/
|
||||||
class AggregateFunctionNothing final : public IAggregateFunctionHelper<AggregateFunctionNothing>
|
template <typename Name>
|
||||||
|
class AggregateFunctionNothingImpl final : public IAggregateFunctionHelper<AggregateFunctionNothingImpl<Name>>
|
||||||
{
|
{
|
||||||
public:
|
static DataTypePtr getReturnType(const DataTypes & arguments [[maybe_unused]])
|
||||||
AggregateFunctionNothing(const DataTypes & arguments, const Array & params, const DataTypePtr & result_type_)
|
|
||||||
: IAggregateFunctionHelper<AggregateFunctionNothing>(arguments, params, result_type_) {}
|
|
||||||
|
|
||||||
String getName() const override
|
|
||||||
{
|
{
|
||||||
return "nothing";
|
if constexpr (std::is_same_v<Name, NameAggregateFunctionNothingUInt64>)
|
||||||
|
return std::make_shared<DataTypeUInt64>();
|
||||||
|
else if constexpr (std::is_same_v<Name, NameAggregateFunctionNothingNull>)
|
||||||
|
return std::make_shared<DataTypeNullable>(std::make_shared<DataTypeNothing>());
|
||||||
|
return arguments.empty() ? std::make_shared<DataTypeNullable>(std::make_shared<DataTypeNothing>()) : arguments.front();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public:
|
||||||
|
AggregateFunctionNothingImpl(const DataTypes & arguments, const Array & params)
|
||||||
|
: IAggregateFunctionHelper<AggregateFunctionNothingImpl<Name>>(arguments, params, getReturnType(arguments))
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
|
String getName() const override { return Name::name; }
|
||||||
|
|
||||||
bool allocatesMemoryInArena() const override { return false; }
|
bool allocatesMemoryInArena() const override { return false; }
|
||||||
|
|
||||||
void create(AggregateDataPtr __restrict) const override
|
void create(AggregateDataPtr __restrict) const override
|
||||||
@ -75,7 +98,8 @@ public:
|
|||||||
[[maybe_unused]] char symbol;
|
[[maybe_unused]] char symbol;
|
||||||
readChar(symbol, buf);
|
readChar(symbol, buf);
|
||||||
if (symbol != '\0')
|
if (symbol != '\0')
|
||||||
throw Exception(ErrorCodes::INCORRECT_DATA, "Incorrect state of aggregate function 'nothing', it should contain exactly one zero byte, while it is {}.", static_cast<UInt32>(symbol));
|
throw Exception(ErrorCodes::INCORRECT_DATA, "Incorrect state of aggregate function '{}', it should contain exactly one zero byte, while it is {}",
|
||||||
|
getName(), static_cast<UInt32>(symbol));
|
||||||
}
|
}
|
||||||
|
|
||||||
void insertResultInto(AggregateDataPtr __restrict, IColumn & to, Arena *) const override
|
void insertResultInto(AggregateDataPtr __restrict, IColumn & to, Arena *) const override
|
||||||
|
@ -31,7 +31,7 @@ namespace ErrorCodes
|
|||||||
|
|
||||||
template <typename> class QuantileTiming;
|
template <typename> class QuantileTiming;
|
||||||
template <typename> class QuantileGK;
|
template <typename> class QuantileGK;
|
||||||
template <typename> class QuantileDDSketch;
|
template <typename> class QuantileDD;
|
||||||
|
|
||||||
/** Generic aggregate function for calculation of quantiles.
|
/** Generic aggregate function for calculation of quantiles.
|
||||||
* It depends on quantile calculation data structure. Look at Quantile*.h for various implementations.
|
* It depends on quantile calculation data structure. Look at Quantile*.h for various implementations.
|
||||||
@ -64,7 +64,7 @@ private:
|
|||||||
using ColVecType = ColumnVectorOrDecimal<Value>;
|
using ColVecType = ColumnVectorOrDecimal<Value>;
|
||||||
|
|
||||||
static constexpr bool returns_float = !(std::is_same_v<FloatReturnType, void>);
|
static constexpr bool returns_float = !(std::is_same_v<FloatReturnType, void>);
|
||||||
static constexpr bool is_quantile_ddsketch = std::is_same_v<Data, QuantileDDSketch<Value>>;
|
static constexpr bool is_quantile_ddsketch = std::is_same_v<Data, QuantileDD<Value>>;
|
||||||
static_assert(!is_decimal<Value> || !returns_float);
|
static_assert(!is_decimal<Value> || !returns_float);
|
||||||
|
|
||||||
QuantileLevels<Float64> levels;
|
QuantileLevels<Float64> levels;
|
||||||
@ -334,7 +334,7 @@ struct NameQuantilesBFloat16Weighted { static constexpr auto name = "quantilesBF
|
|||||||
struct NameQuantileGK { static constexpr auto name = "quantileGK"; };
|
struct NameQuantileGK { static constexpr auto name = "quantileGK"; };
|
||||||
struct NameQuantilesGK { static constexpr auto name = "quantilesGK"; };
|
struct NameQuantilesGK { static constexpr auto name = "quantilesGK"; };
|
||||||
|
|
||||||
struct NameQuantileDDSketch { static constexpr auto name = "quantileDDSketch"; };
|
struct NameQuantileDD { static constexpr auto name = "quantileDD"; };
|
||||||
struct NameQuantilesDDSketch { static constexpr auto name = "quantilesDDSketch"; };
|
struct NameQuantilesDD { static constexpr auto name = "quantilesDD"; };
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
#include <AggregateFunctions/AggregateFunctionQuantile.h>
|
#include <AggregateFunctions/AggregateFunctionQuantile.h>
|
||||||
#include <AggregateFunctions/QuantileDDSketch.h>
|
#include <AggregateFunctions/QuantileDD.h>
|
||||||
#include <AggregateFunctions/AggregateFunctionFactory.h>
|
#include <AggregateFunctions/AggregateFunctionFactory.h>
|
||||||
#include <AggregateFunctions/Helpers.h>
|
#include <AggregateFunctions/Helpers.h>
|
||||||
#include <DataTypes/DataTypeDate.h>
|
#include <DataTypes/DataTypeDate.h>
|
||||||
@ -19,8 +19,8 @@ namespace ErrorCodes
|
|||||||
namespace
|
namespace
|
||||||
{
|
{
|
||||||
|
|
||||||
template <typename Value, bool float_return> using FuncQuantileDDSketch = AggregateFunctionQuantile<Value, QuantileDDSketch<Value>, NameQuantileDDSketch, false, std::conditional_t<float_return, Float64, void>, false, true>;
|
template <typename Value, bool float_return> using FuncQuantileDD = AggregateFunctionQuantile<Value, QuantileDD<Value>, NameQuantileDD, false, std::conditional_t<float_return, Float64, void>, false, true>;
|
||||||
template <typename Value, bool float_return> using FuncQuantilesDDSketch = AggregateFunctionQuantile<Value, QuantileDDSketch<Value>, NameQuantilesDDSketch, false, std::conditional_t<float_return, Float64, void>, true, true>;
|
template <typename Value, bool float_return> using FuncQuantilesDD = AggregateFunctionQuantile<Value, QuantileDD<Value>, NameQuantilesDD, false, std::conditional_t<float_return, Float64, void>, true, true>;
|
||||||
|
|
||||||
|
|
||||||
template <template <typename, bool> class Function>
|
template <template <typename, bool> class Function>
|
||||||
@ -46,16 +46,16 @@ AggregateFunctionPtr createAggregateFunctionQuantile(
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void registerAggregateFunctionsQuantileDDSketch(AggregateFunctionFactory & factory)
|
void registerAggregateFunctionsQuantileDD(AggregateFunctionFactory & factory)
|
||||||
{
|
{
|
||||||
/// For aggregate functions returning array we cannot return NULL on empty set.
|
/// For aggregate functions returning array we cannot return NULL on empty set.
|
||||||
AggregateFunctionProperties properties = { .returns_default_when_only_null = true };
|
AggregateFunctionProperties properties = { .returns_default_when_only_null = true };
|
||||||
|
|
||||||
factory.registerFunction(NameQuantileDDSketch::name, createAggregateFunctionQuantile<FuncQuantileDDSketch>);
|
factory.registerFunction(NameQuantileDD::name, createAggregateFunctionQuantile<FuncQuantileDD>);
|
||||||
factory.registerFunction(NameQuantilesDDSketch::name, { createAggregateFunctionQuantile<FuncQuantilesDDSketch>, properties });
|
factory.registerFunction(NameQuantilesDD::name, { createAggregateFunctionQuantile<FuncQuantilesDD>, properties });
|
||||||
|
|
||||||
/// 'median' is an alias for 'quantile'
|
/// 'median' is an alias for 'quantile'
|
||||||
factory.registerAlias("medianDDSketch", NameQuantileDDSketch::name);
|
factory.registerAlias("medianDD", NameQuantileDD::name);
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
@ -19,6 +19,7 @@
|
|||||||
#include <Common/assert_cast.h>
|
#include <Common/assert_cast.h>
|
||||||
|
|
||||||
#include <AggregateFunctions/IAggregateFunction.h>
|
#include <AggregateFunctions/IAggregateFunction.h>
|
||||||
|
#include <AggregateFunctions/KeyHolderHelpers.h>
|
||||||
|
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
@ -153,8 +154,6 @@ private:
|
|||||||
UInt64 threshold;
|
UInt64 threshold;
|
||||||
UInt64 reserved;
|
UInt64 reserved;
|
||||||
|
|
||||||
static void deserializeAndInsert(StringRef str, IColumn & data_to);
|
|
||||||
|
|
||||||
public:
|
public:
|
||||||
AggregateFunctionTopKGeneric(
|
AggregateFunctionTopKGeneric(
|
||||||
UInt64 threshold_, UInt64 load_factor, const DataTypes & argument_types_, const Array & params)
|
UInt64 threshold_, UInt64 load_factor, const DataTypes & argument_types_, const Array & params)
|
||||||
@ -251,12 +250,7 @@ public:
|
|||||||
offsets_to.push_back(offsets_to.back() + result_vec.size());
|
offsets_to.push_back(offsets_to.back() + result_vec.size());
|
||||||
|
|
||||||
for (auto & elem : result_vec)
|
for (auto & elem : result_vec)
|
||||||
{
|
deserializeAndInsert<is_plain_column>(elem.key, data_to);
|
||||||
if constexpr (is_plain_column)
|
|
||||||
data_to.insertData(elem.key.data, elem.key.size);
|
|
||||||
else
|
|
||||||
data_to.deserializeAndInsertFromArena(elem.key.data);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -111,9 +111,9 @@ public:
|
|||||||
* To address this, we handle `nothing` in a special way in `FunctionNode::toASTImpl`.
|
* To address this, we handle `nothing` in a special way in `FunctionNode::toASTImpl`.
|
||||||
*/
|
*/
|
||||||
if (properties.returns_default_when_only_null)
|
if (properties.returns_default_when_only_null)
|
||||||
return std::make_shared<AggregateFunctionNothing>(arguments, params, std::make_shared<DataTypeUInt64>());
|
return std::make_shared<AggregateFunctionNothingUInt64>(arguments, params);
|
||||||
else
|
else
|
||||||
return std::make_shared<AggregateFunctionNothing>(arguments, params, std::make_shared<DataTypeNullable>(std::make_shared<DataTypeNothing>()));
|
return std::make_shared<AggregateFunctionNothingNull>(arguments, params);
|
||||||
}
|
}
|
||||||
|
|
||||||
assert(nested_function);
|
assert(nested_function);
|
||||||
|
@ -29,7 +29,7 @@ static void deserializeAndInsert(StringRef str, IColumn & data_to)
|
|||||||
if constexpr (is_plain_column)
|
if constexpr (is_plain_column)
|
||||||
data_to.insertData(str.data, str.size);
|
data_to.insertData(str.data, str.size);
|
||||||
else
|
else
|
||||||
data_to.deserializeAndInsertFromArena(str.data);
|
std::ignore = data_to.deserializeAndInsertFromArena(str.data);
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -33,14 +33,14 @@ namespace DB
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
template <typename Value>
|
template <typename Value>
|
||||||
class QuantileDDSketch
|
class QuantileDD
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
using Weight = UInt64;
|
using Weight = UInt64;
|
||||||
|
|
||||||
QuantileDDSketch() = default;
|
QuantileDD() = default;
|
||||||
|
|
||||||
explicit QuantileDDSketch(Float64 relative_accuracy) : data(relative_accuracy) { }
|
explicit QuantileDD(Float64 relative_accuracy) : data(relative_accuracy) { }
|
||||||
|
|
||||||
void add(const Value & x)
|
void add(const Value & x)
|
||||||
{
|
{
|
||||||
@ -53,7 +53,7 @@ public:
|
|||||||
data.add(x, w);
|
data.add(x, w);
|
||||||
}
|
}
|
||||||
|
|
||||||
void merge(const QuantileDDSketch &other)
|
void merge(const QuantileDD &other)
|
||||||
{
|
{
|
||||||
data.merge(other.data);
|
data.merge(other.data);
|
||||||
}
|
}
|
@ -31,7 +31,7 @@ void registerAggregateFunctionsQuantileTimingWeighted(AggregateFunctionFactory &
|
|||||||
void registerAggregateFunctionsQuantileTDigest(AggregateFunctionFactory &);
|
void registerAggregateFunctionsQuantileTDigest(AggregateFunctionFactory &);
|
||||||
void registerAggregateFunctionsQuantileTDigestWeighted(AggregateFunctionFactory &);
|
void registerAggregateFunctionsQuantileTDigestWeighted(AggregateFunctionFactory &);
|
||||||
void registerAggregateFunctionsQuantileBFloat16(AggregateFunctionFactory &);
|
void registerAggregateFunctionsQuantileBFloat16(AggregateFunctionFactory &);
|
||||||
void registerAggregateFunctionsQuantileDDSketch(AggregateFunctionFactory &);
|
void registerAggregateFunctionsQuantileDD(AggregateFunctionFactory &);
|
||||||
void registerAggregateFunctionsQuantileBFloat16Weighted(AggregateFunctionFactory &);
|
void registerAggregateFunctionsQuantileBFloat16Weighted(AggregateFunctionFactory &);
|
||||||
void registerAggregateFunctionsQuantileApprox(AggregateFunctionFactory &);
|
void registerAggregateFunctionsQuantileApprox(AggregateFunctionFactory &);
|
||||||
void registerAggregateFunctionsSequenceMatch(AggregateFunctionFactory &);
|
void registerAggregateFunctionsSequenceMatch(AggregateFunctionFactory &);
|
||||||
@ -128,7 +128,7 @@ void registerAggregateFunctions()
|
|||||||
registerAggregateFunctionsQuantileTDigest(factory);
|
registerAggregateFunctionsQuantileTDigest(factory);
|
||||||
registerAggregateFunctionsQuantileTDigestWeighted(factory);
|
registerAggregateFunctionsQuantileTDigestWeighted(factory);
|
||||||
registerAggregateFunctionsQuantileBFloat16(factory);
|
registerAggregateFunctionsQuantileBFloat16(factory);
|
||||||
registerAggregateFunctionsQuantileDDSketch(factory);
|
registerAggregateFunctionsQuantileDD(factory);
|
||||||
registerAggregateFunctionsQuantileBFloat16Weighted(factory);
|
registerAggregateFunctionsQuantileBFloat16Weighted(factory);
|
||||||
registerAggregateFunctionsQuantileApprox(factory);
|
registerAggregateFunctionsQuantileApprox(factory);
|
||||||
registerAggregateFunctionsSequenceMatch(factory);
|
registerAggregateFunctionsSequenceMatch(factory);
|
||||||
|
@ -210,18 +210,6 @@ ASTPtr FunctionNode::toASTImpl(const ConvertToASTOptions & options) const
|
|||||||
function_ast->name = function_name;
|
function_ast->name = function_name;
|
||||||
function_ast->nulls_action = nulls_action;
|
function_ast->nulls_action = nulls_action;
|
||||||
|
|
||||||
if (function_name == "nothing")
|
|
||||||
{
|
|
||||||
/** Inside AggregateFunctionCombinatorNull we may replace functions with `NULL` in arguments with `nothing`.
|
|
||||||
* Result type of `nothing` depends on `returns_default_when_only_null` property of nested function.
|
|
||||||
* If we convert `nothing` to AST, we will lose this information, so we use original function name instead.
|
|
||||||
*/
|
|
||||||
const auto & original_ast = getOriginalAST();
|
|
||||||
const auto & original_function_ast = original_ast ? original_ast->as<ASTFunction>() : nullptr;
|
|
||||||
if (original_function_ast)
|
|
||||||
function_ast->name = original_function_ast->name;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (isWindowFunction())
|
if (isWindowFunction())
|
||||||
{
|
{
|
||||||
function_ast->is_window_function = true;
|
function_ast->is_window_function = true;
|
||||||
|
@ -181,6 +181,23 @@ public:
|
|||||||
|
|
||||||
node = std::make_shared<ColumnNode>(column, column_source);
|
node = std::make_shared<ColumnNode>(column, column_source);
|
||||||
}
|
}
|
||||||
|
else if (function_name == "variantElement" && isVariant(column_type) && second_argument_constant_node)
|
||||||
|
{
|
||||||
|
/// Replace `variantElement(variant_argument, type_name)` with `variant_argument.type_name`.
|
||||||
|
const auto & variant_element_constant_value = second_argument_constant_node->getValue();
|
||||||
|
String subcolumn_name;
|
||||||
|
|
||||||
|
if (variant_element_constant_value.getType() != Field::Types::String)
|
||||||
|
return;
|
||||||
|
|
||||||
|
subcolumn_name = variant_element_constant_value.get<const String &>();
|
||||||
|
|
||||||
|
column.name += '.';
|
||||||
|
column.name += subcolumn_name;
|
||||||
|
column.type = function_node->getResultType();
|
||||||
|
|
||||||
|
node = std::make_shared<ColumnNode>(column, column_source);
|
||||||
|
}
|
||||||
else if (function_name == "mapContains" && column_type.isMap())
|
else if (function_name == "mapContains" && column_type.isMap())
|
||||||
{
|
{
|
||||||
const auto & data_type_map = assert_cast<const DataTypeMap &>(*column.type);
|
const auto & data_type_map = assert_cast<const DataTypeMap &>(*column.type);
|
||||||
|
124
src/Analyzer/Passes/OptimizeGroupByInjectiveFunctionsPass.cpp
Normal file
124
src/Analyzer/Passes/OptimizeGroupByInjectiveFunctionsPass.cpp
Normal file
@ -0,0 +1,124 @@
|
|||||||
|
#include <Analyzer/Passes/OptimizeGroupByInjectiveFunctionsPass.h>
|
||||||
|
#include <Analyzer/ConstantNode.h>
|
||||||
|
#include <Analyzer/FunctionNode.h>
|
||||||
|
#include <Analyzer/InDepthQueryTreeVisitor.h>
|
||||||
|
#include <Analyzer/IQueryTreeNode.h>
|
||||||
|
#include <DataTypes/IDataType.h>
|
||||||
|
#include <Interpreters/ExternalDictionariesLoader.h>
|
||||||
|
|
||||||
|
namespace DB
|
||||||
|
{
|
||||||
|
|
||||||
|
namespace
|
||||||
|
{
|
||||||
|
|
||||||
|
const std::unordered_set<String> possibly_injective_function_names
|
||||||
|
{
|
||||||
|
"dictGet",
|
||||||
|
"dictGetString",
|
||||||
|
"dictGetUInt8",
|
||||||
|
"dictGetUInt16",
|
||||||
|
"dictGetUInt32",
|
||||||
|
"dictGetUInt64",
|
||||||
|
"dictGetInt8",
|
||||||
|
"dictGetInt16",
|
||||||
|
"dictGetInt32",
|
||||||
|
"dictGetInt64",
|
||||||
|
"dictGetFloat32",
|
||||||
|
"dictGetFloat64",
|
||||||
|
"dictGetDate",
|
||||||
|
"dictGetDateTime"
|
||||||
|
};
|
||||||
|
|
||||||
|
class OptimizeGroupByInjectiveFunctionsVisitor : public InDepthQueryTreeVisitorWithContext<OptimizeGroupByInjectiveFunctionsVisitor>
|
||||||
|
{
|
||||||
|
using Base = InDepthQueryTreeVisitorWithContext<OptimizeGroupByInjectiveFunctionsVisitor>;
|
||||||
|
public:
|
||||||
|
explicit OptimizeGroupByInjectiveFunctionsVisitor(ContextPtr context)
|
||||||
|
: Base(std::move(context))
|
||||||
|
{}
|
||||||
|
|
||||||
|
void enterImpl(QueryTreeNodePtr & node)
|
||||||
|
{
|
||||||
|
if (!getSettings().optimize_injective_functions_in_group_by)
|
||||||
|
return;
|
||||||
|
|
||||||
|
auto * query = node->as<QueryNode>();
|
||||||
|
if (!query)
|
||||||
|
return;
|
||||||
|
|
||||||
|
if (!query->hasGroupBy())
|
||||||
|
return;
|
||||||
|
|
||||||
|
if (query->isGroupByWithCube() || query->isGroupByWithRollup())
|
||||||
|
return;
|
||||||
|
|
||||||
|
auto & group_by = query->getGroupBy().getNodes();
|
||||||
|
if (query->isGroupByWithGroupingSets())
|
||||||
|
{
|
||||||
|
for (auto & set : group_by)
|
||||||
|
{
|
||||||
|
auto & grouping_set = set->as<ListNode>()->getNodes();
|
||||||
|
optimizeGroupingSet(grouping_set);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
else
|
||||||
|
optimizeGroupingSet(group_by);
|
||||||
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
void optimizeGroupingSet(QueryTreeNodes & grouping_set)
|
||||||
|
{
|
||||||
|
auto context = getContext();
|
||||||
|
|
||||||
|
QueryTreeNodes new_group_by_keys;
|
||||||
|
new_group_by_keys.reserve(grouping_set.size());
|
||||||
|
for (auto & group_by_elem : grouping_set)
|
||||||
|
{
|
||||||
|
std::queue<QueryTreeNodePtr> nodes_to_process;
|
||||||
|
nodes_to_process.push(group_by_elem);
|
||||||
|
|
||||||
|
while (!nodes_to_process.empty())
|
||||||
|
{
|
||||||
|
auto node_to_process = nodes_to_process.front();
|
||||||
|
nodes_to_process.pop();
|
||||||
|
|
||||||
|
auto const * function_node = node_to_process->as<FunctionNode>();
|
||||||
|
if (!function_node)
|
||||||
|
{
|
||||||
|
// Constant aggregation keys are removed in PlannerExpressionAnalysis.cpp
|
||||||
|
new_group_by_keys.push_back(node_to_process);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Aggregate functions are not allowed in GROUP BY clause
|
||||||
|
auto function = function_node->getFunctionOrThrow();
|
||||||
|
bool can_be_eliminated = function->isInjective(function_node->getArgumentColumns());
|
||||||
|
|
||||||
|
if (can_be_eliminated)
|
||||||
|
{
|
||||||
|
for (auto const & argument : function_node->getArguments())
|
||||||
|
{
|
||||||
|
// We can skip constants here because aggregation key is already not a constant.
|
||||||
|
if (argument->getNodeType() != QueryTreeNodeType::CONSTANT)
|
||||||
|
nodes_to_process.push(argument);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
else
|
||||||
|
new_group_by_keys.push_back(node_to_process);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
grouping_set = std::move(new_group_by_keys);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
void OptimizeGroupByInjectiveFunctionsPass::run(QueryTreeNodePtr query_tree_node, ContextPtr context)
|
||||||
|
{
|
||||||
|
OptimizeGroupByInjectiveFunctionsVisitor visitor(std::move(context));
|
||||||
|
visitor.visit(query_tree_node);
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
20
src/Analyzer/Passes/OptimizeGroupByInjectiveFunctionsPass.h
Normal file
20
src/Analyzer/Passes/OptimizeGroupByInjectiveFunctionsPass.h
Normal file
@ -0,0 +1,20 @@
|
|||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include <Analyzer/IQueryTreePass.h>
|
||||||
|
|
||||||
|
namespace DB
|
||||||
|
{
|
||||||
|
|
||||||
|
/* Eliminates injective functions in GROUP BY section.
|
||||||
|
*/
|
||||||
|
class OptimizeGroupByInjectiveFunctionsPass final : public IQueryTreePass
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
String getName() override { return "OptimizeGroupByInjectiveFunctionsPass"; }
|
||||||
|
|
||||||
|
String getDescription() override { return "Replaces injective functions by it's arguments in GROUP BY section."; }
|
||||||
|
|
||||||
|
void run(QueryTreeNodePtr query_tree_node, ContextPtr context) override;
|
||||||
|
};
|
||||||
|
|
||||||
|
}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user