mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-22 07:31:57 +00:00
Merge branch 'master' of github.com:ClickHouse/ClickHouse into docs/CLICKHOUSEDOCS-548-adopters
This commit is contained in:
commit
1b1e4970ab
@ -52,7 +52,7 @@ IncludeCategories:
|
||||
ReflowComments: false
|
||||
AlignEscapedNewlinesLeft: false
|
||||
AlignEscapedNewlines: DontAlign
|
||||
AlignTrailingComments: true
|
||||
AlignTrailingComments: false
|
||||
|
||||
# Not changed:
|
||||
AccessModifierOffset: -4
|
||||
|
@ -12,6 +12,3 @@ ClickHouse is an open-source column-oriented database management system that all
|
||||
* [Contacts](https://clickhouse.tech/#contacts) can help to get your questions answered if there are any.
|
||||
* You can also [fill this form](https://forms.yandex.com/surveys/meet-yandex-clickhouse-team/) to meet Yandex ClickHouse team in person.
|
||||
|
||||
## Upcoming Events
|
||||
|
||||
* [ClickHouse Meetup in Athens](https://www.meetup.com/Athens-Big-Data/events/268379195/) on March 5.
|
||||
|
@ -3,6 +3,7 @@
|
||||
#include <errno.h>
|
||||
#include <string.h>
|
||||
#include <unistd.h>
|
||||
#include <functional>
|
||||
|
||||
namespace
|
||||
{
|
||||
@ -18,18 +19,31 @@ void trim(String & s)
|
||||
ReplxxLineReader::ReplxxLineReader(const Suggest & suggest, const String & history_file_path_, char extender_, char delimiter_)
|
||||
: LineReader(history_file_path_, extender_, delimiter_)
|
||||
{
|
||||
using namespace std::placeholders;
|
||||
using Replxx = replxx::Replxx;
|
||||
|
||||
if (!history_file_path.empty())
|
||||
rx.history_load(history_file_path);
|
||||
|
||||
auto callback = [&suggest] (const String & context, size_t context_size)
|
||||
{
|
||||
auto range = suggest.getCompletions(context, context_size);
|
||||
return replxx::Replxx::completions_t(range.first, range.second);
|
||||
return Replxx::completions_t(range.first, range.second);
|
||||
};
|
||||
|
||||
rx.set_completion_callback(callback);
|
||||
rx.set_complete_on_empty(false);
|
||||
rx.set_word_break_characters(word_break_characters);
|
||||
|
||||
/// By default C-p/C-n binded to COMPLETE_NEXT/COMPLETE_PREV,
|
||||
/// bind C-p/C-n to history-previous/history-next like readline.
|
||||
rx.bind_key(Replxx::KEY::control('N'), std::bind(&Replxx::invoke, &rx, Replxx::ACTION::HISTORY_NEXT, _1));
|
||||
rx.bind_key(Replxx::KEY::control('P'), std::bind(&Replxx::invoke, &rx, Replxx::ACTION::HISTORY_PREVIOUS, _1));
|
||||
/// By default COMPLETE_NEXT/COMPLETE_PREV was binded to C-p/C-n, re-bind
|
||||
/// to M-P/M-N (that was used for HISTORY_COMMON_PREFIX_SEARCH before, but
|
||||
/// it also binded to M-p/M-n).
|
||||
rx.bind_key(Replxx::KEY::meta('N'), std::bind(&Replxx::invoke, &rx, Replxx::ACTION::COMPLETE_NEXT, _1));
|
||||
rx.bind_key(Replxx::KEY::meta('P'), std::bind(&Replxx::invoke, &rx, Replxx::ACTION::COMPLETE_PREVIOUS, _1));
|
||||
}
|
||||
|
||||
ReplxxLineReader::~ReplxxLineReader()
|
||||
|
@ -80,7 +80,6 @@ dumpImpl(Out & out, T && x)
|
||||
}
|
||||
|
||||
|
||||
|
||||
/// Tuple, pair
|
||||
template <size_t N, typename Out, typename T>
|
||||
Out & dumpTupleImpl(Out & out, T && x)
|
||||
|
@ -88,10 +88,14 @@ using signal_function = void(int, siginfo_t*, void*);
|
||||
|
||||
static void writeSignalIDtoSignalPipe(int sig)
|
||||
{
|
||||
auto saved_errno = errno; /// We must restore previous value of errno in signal handler.
|
||||
|
||||
char buf[buf_size];
|
||||
DB::WriteBufferFromFileDescriptor out(signal_pipe.fds_rw[1], buf_size, buf);
|
||||
DB::writeBinary(sig, out);
|
||||
out.next();
|
||||
|
||||
errno = saved_errno;
|
||||
}
|
||||
|
||||
/** Signal handler for HUP / USR1 */
|
||||
@ -110,6 +114,8 @@ static void terminateRequestedSignalHandler(int sig, siginfo_t * info, void * co
|
||||
*/
|
||||
static void signalHandler(int sig, siginfo_t * info, void * context)
|
||||
{
|
||||
auto saved_errno = errno; /// We must restore previous value of errno in signal handler.
|
||||
|
||||
char buf[buf_size];
|
||||
DB::WriteBufferFromFileDescriptorDiscardOnFailure out(signal_pipe.fds_rw[1], buf_size, buf);
|
||||
|
||||
@ -134,6 +140,8 @@ static void signalHandler(int sig, siginfo_t * info, void * context)
|
||||
::sleep(10);
|
||||
call_default_signal_handler(sig);
|
||||
}
|
||||
|
||||
errno = saved_errno;
|
||||
}
|
||||
|
||||
|
||||
|
@ -1,44 +0,0 @@
|
||||
#pragma once
|
||||
|
||||
#include <memory>
|
||||
|
||||
namespace ext
|
||||
{
|
||||
|
||||
/** Thread-unsafe singleton. It works simply like a global variable.
|
||||
* Supports deinitialization.
|
||||
*
|
||||
* In most of the cases, you don't need this class.
|
||||
* Use "Meyers Singleton" instead: static T & instance() { static T x; return x; }
|
||||
*/
|
||||
|
||||
template <class T>
|
||||
class Singleton
|
||||
{
|
||||
public:
|
||||
Singleton()
|
||||
{
|
||||
if (!instance)
|
||||
instance = std::make_unique<T>();
|
||||
}
|
||||
|
||||
T * operator->()
|
||||
{
|
||||
return instance.get();
|
||||
}
|
||||
|
||||
static bool isInitialized()
|
||||
{
|
||||
return !!instance;
|
||||
}
|
||||
|
||||
static void reset()
|
||||
{
|
||||
instance.reset();
|
||||
}
|
||||
|
||||
private:
|
||||
inline static std::unique_ptr<T> instance{};
|
||||
};
|
||||
|
||||
}
|
@ -2,7 +2,6 @@
|
||||
#include <stdint.h>
|
||||
#include <time.h>
|
||||
#include "atomic.h"
|
||||
#include "musl_features.h"
|
||||
#include "syscall.h"
|
||||
|
||||
#ifdef VDSO_CGT_SYM
|
||||
@ -54,7 +53,7 @@ static void *volatile vdso_func = (void *)cgt_init;
|
||||
|
||||
#endif
|
||||
|
||||
int __clock_gettime(clockid_t clk, struct timespec *ts)
|
||||
int clock_gettime(clockid_t clk, struct timespec *ts)
|
||||
{
|
||||
int r;
|
||||
|
||||
@ -104,5 +103,3 @@ int __clock_gettime(clockid_t clk, struct timespec *ts)
|
||||
return __syscall_ret(r);
|
||||
#endif
|
||||
}
|
||||
|
||||
weak_alias(__clock_gettime, clock_gettime);
|
||||
|
@ -1,10 +1,9 @@
|
||||
#include <errno.h>
|
||||
#include <pthread.h>
|
||||
#include <time.h>
|
||||
#include "musl_features.h"
|
||||
#include "syscall.h"
|
||||
|
||||
int __clock_nanosleep(clockid_t clk, int flags, const struct timespec * req, struct timespec * rem)
|
||||
int clock_nanosleep(clockid_t clk, int flags, const struct timespec * req, struct timespec * rem)
|
||||
{
|
||||
if (clk == CLOCK_THREAD_CPUTIME_ID)
|
||||
return EINVAL;
|
||||
@ -23,5 +22,3 @@ int __clock_nanosleep(clockid_t clk, int flags, const struct timespec * req, str
|
||||
pthread_setcanceltype(old_cancel_type, NULL);
|
||||
return status;
|
||||
}
|
||||
|
||||
weak_alias(__clock_nanosleep, clock_nanosleep);
|
||||
|
@ -2,7 +2,4 @@
|
||||
|
||||
#define weak __attribute__((__weak__))
|
||||
#define hidden __attribute__((__visibility__("hidden")))
|
||||
#define weak_alias(old, new) \
|
||||
extern __typeof(old) new __attribute__((__weak__, __alias__(#old)))
|
||||
|
||||
#define predict_false(x) __builtin_expect(x, 0)
|
||||
|
@ -2,6 +2,7 @@
|
||||
.hidden __syscall
|
||||
.type __syscall,@function
|
||||
__syscall:
|
||||
.cfi_startproc
|
||||
movq %rdi,%rax
|
||||
movq %rsi,%rdi
|
||||
movq %rdx,%rsi
|
||||
@ -11,3 +12,4 @@ __syscall:
|
||||
movq 8(%rsp),%r9
|
||||
syscall
|
||||
ret
|
||||
.cfi_endproc
|
||||
|
@ -39,7 +39,6 @@ typedef __attribute__((__aligned__(1))) uint32_t uint32_unaligned_t;
|
||||
typedef __attribute__((__aligned__(1))) uint64_t uint64_unaligned_t;
|
||||
|
||||
|
||||
|
||||
//---------------------------------------------------------------------
|
||||
// fast copy for different sizes
|
||||
//---------------------------------------------------------------------
|
||||
@ -694,4 +693,3 @@ static INLINE void* memcpy_fast(void *destination, const void *source, size_t si
|
||||
#endif
|
||||
|
||||
|
||||
|
||||
|
@ -8,6 +8,7 @@ add_library (mysqlxx
|
||||
src/Row.cpp
|
||||
src/Value.cpp
|
||||
src/Pool.cpp
|
||||
src/PoolFactory.cpp
|
||||
src/PoolWithFailover.cpp
|
||||
|
||||
include/mysqlxx/Connection.h
|
||||
@ -15,6 +16,7 @@ add_library (mysqlxx
|
||||
include/mysqlxx/mysqlxx.h
|
||||
include/mysqlxx/Null.h
|
||||
include/mysqlxx/Pool.h
|
||||
include/mysqlxx/PoolFactory.h
|
||||
include/mysqlxx/PoolWithFailover.h
|
||||
include/mysqlxx/Query.h
|
||||
include/mysqlxx/ResultBase.h
|
||||
|
@ -198,6 +198,8 @@ public:
|
||||
return description;
|
||||
}
|
||||
|
||||
void removeConnection(Connection * data);
|
||||
|
||||
protected:
|
||||
/// Number of MySQL connections which are created at launch.
|
||||
unsigned default_connections;
|
||||
|
55
base/mysqlxx/include/mysqlxx/PoolFactory.h
Normal file
55
base/mysqlxx/include/mysqlxx/PoolFactory.h
Normal file
@ -0,0 +1,55 @@
|
||||
#pragma once
|
||||
|
||||
#include <mutex>
|
||||
#include <memory>
|
||||
#include <boost/noncopyable.hpp>
|
||||
|
||||
#include <mysqlxx/PoolWithFailover.h>
|
||||
|
||||
|
||||
#define MYSQLXX_POOL_WITH_FAILOVER_DEFAULT_START_CONNECTIONS 1
|
||||
#define MYSQLXX_POOL_WITH_FAILOVER_DEFAULT_MAX_CONNECTIONS 16
|
||||
#define MYSQLXX_POOL_WITH_FAILOVER_DEFAULT_MAX_TRIES 3
|
||||
|
||||
|
||||
namespace mysqlxx
|
||||
{
|
||||
|
||||
/*
|
||||
* PoolFactory.h
|
||||
* This class is a helper singleton to mutualize connections to MySQL.
|
||||
*/
|
||||
class PoolFactory final : private boost::noncopyable
|
||||
{
|
||||
public:
|
||||
static PoolFactory & instance();
|
||||
|
||||
PoolFactory(const PoolFactory &) = delete;
|
||||
|
||||
/** Allocates a PoolWithFailover to connect to MySQL. */
|
||||
PoolWithFailover Get(const std::string & config_name,
|
||||
unsigned default_connections = MYSQLXX_POOL_WITH_FAILOVER_DEFAULT_START_CONNECTIONS,
|
||||
unsigned max_connections = MYSQLXX_POOL_WITH_FAILOVER_DEFAULT_MAX_CONNECTIONS,
|
||||
size_t max_tries = MYSQLXX_POOL_WITH_FAILOVER_DEFAULT_MAX_TRIES);
|
||||
|
||||
/** Allocates a PoolWithFailover to connect to MySQL. */
|
||||
PoolWithFailover Get(const Poco::Util::AbstractConfiguration & config,
|
||||
const std::string & config_name,
|
||||
unsigned default_connections = MYSQLXX_POOL_WITH_FAILOVER_DEFAULT_START_CONNECTIONS,
|
||||
unsigned max_connections = MYSQLXX_POOL_WITH_FAILOVER_DEFAULT_MAX_CONNECTIONS,
|
||||
size_t max_tries = MYSQLXX_POOL_WITH_FAILOVER_DEFAULT_MAX_TRIES);
|
||||
|
||||
void reset();
|
||||
|
||||
|
||||
~PoolFactory() = default;
|
||||
PoolFactory& operator=(const PoolFactory &) = delete;
|
||||
|
||||
private:
|
||||
PoolFactory();
|
||||
|
||||
struct Impl;
|
||||
std::unique_ptr<Impl> impl;
|
||||
};
|
||||
|
||||
}
|
@ -77,6 +77,10 @@ namespace mysqlxx
|
||||
size_t max_tries;
|
||||
/// Mutex for set of replicas.
|
||||
std::mutex mutex;
|
||||
std::string config_name;
|
||||
|
||||
/// Can the Pool be shared
|
||||
bool shareable;
|
||||
|
||||
public:
|
||||
using Entry = Pool::Entry;
|
||||
@ -100,8 +104,6 @@ namespace mysqlxx
|
||||
|
||||
PoolWithFailover(const PoolWithFailover & other);
|
||||
|
||||
PoolWithFailover & operator=(const PoolWithFailover &) = delete;
|
||||
|
||||
/** Allocates a connection to use. */
|
||||
Entry Get();
|
||||
};
|
||||
|
@ -23,26 +23,26 @@ namespace mysqlxx
|
||||
class ResultBase;
|
||||
|
||||
|
||||
/** Представляет одно значение, считанное из MySQL.
|
||||
* Объект сам не хранит данные, а является всего лишь обёрткой над парой (const char *, size_t).
|
||||
* Если уничтожить UseQueryResult/StoreQueryResult или Connection,
|
||||
* или считать следующий Row при использовании UseQueryResult, то объект станет некорректным.
|
||||
* Позволяет преобразовать значение (распарсить) в различные типы данных:
|
||||
* - с помощью функций вида getUInt(), getString(), ... (рекомендуется);
|
||||
* - с помощью шаблонной функции get<Type>(), которая специализирована для многих типов (для шаблонного кода);
|
||||
* - шаблонная функция get<Type> работает также для всех типов, у которых есть конструктор из Value
|
||||
* (это сделано для возможности расширения);
|
||||
* - с помощью operator Type() - но этот метод реализован лишь для совместимости и не рекомендуется
|
||||
* к использованию, так как неудобен (часто возникают неоднозначности).
|
||||
/** Represents a single value read from MySQL.
|
||||
* It doesn't owns the value. It's just a wrapper of a pair (const char *, size_t).
|
||||
* If the UseQueryResult/StoreQueryResult or Connection is destroyed,
|
||||
* or you have read the next Row while using UseQueryResult, then the object is invalidated.
|
||||
* Allows to transform (parse) the value to various data types:
|
||||
* - with getUInt(), getString(), ... (recommended);
|
||||
* - with template function get<Type>() that is specialized for multiple data types;
|
||||
* - the template function get<Type> also works for all types that can be constructed from Value
|
||||
* (it is an extension point);
|
||||
* - with operator Type() - this is done for compatibility and not recommended because ambiguities possible.
|
||||
*
|
||||
* При ошибке парсинга, выкидывается исключение.
|
||||
* При попытке достать значение, которое равно nullptr, выкидывается исключение
|
||||
* - используйте метод isNull() для проверки.
|
||||
* On parsing error, exception is thrown.
|
||||
* When trying to extract a value that is nullptr, exception is thrown
|
||||
* - use isNull() method to check.
|
||||
*
|
||||
* Во всех распространённых системах, time_t - это всего лишь typedef от Int64 или Int32.
|
||||
* Для того, чтобы можно было писать row[0].get<time_t>(), ожидая, что значение вида '2011-01-01 00:00:00'
|
||||
* корректно распарсится согласно текущей тайм-зоне, сделано так, что метод getUInt и соответствующие методы get<>()
|
||||
* также умеют парсить дату и дату-время.
|
||||
* As time_t is just an alias for integer data type
|
||||
* to allow to write row[0].get<time_t>(), and expect that the values like '2011-01-01 00:00:00'
|
||||
* will be successfully parsed according to the current time zone,
|
||||
* the getUInt method and the corresponding get<>() methods
|
||||
* are capable of parsing Date and DateTime.
|
||||
*/
|
||||
class Value
|
||||
{
|
||||
@ -166,7 +166,7 @@ private:
|
||||
else
|
||||
throwException("Cannot parse DateTime");
|
||||
|
||||
return 0; /// чтобы не было warning-а.
|
||||
return 0; /// avoid warning.
|
||||
}
|
||||
|
||||
|
||||
@ -184,7 +184,7 @@ private:
|
||||
else
|
||||
throwException("Cannot parse Date");
|
||||
|
||||
return 0; /// чтобы не было warning-а.
|
||||
return 0; /// avoid warning.
|
||||
}
|
||||
|
||||
|
||||
@ -231,7 +231,7 @@ private:
|
||||
double readFloatText(const char * buf, size_t length) const;
|
||||
|
||||
/// Выкинуть исключение с подробной информацией
|
||||
void throwException(const char * text) const;
|
||||
[[noreturn]] void throwException(const char * text) const;
|
||||
};
|
||||
|
||||
|
||||
|
@ -22,15 +22,20 @@ void Pool::Entry::incrementRefCount()
|
||||
if (!data)
|
||||
return;
|
||||
++data->ref_count;
|
||||
mysql_thread_init();
|
||||
if (data->ref_count == 1)
|
||||
mysql_thread_init();
|
||||
}
|
||||
|
||||
void Pool::Entry::decrementRefCount()
|
||||
{
|
||||
if (!data)
|
||||
return;
|
||||
--data->ref_count;
|
||||
mysql_thread_end();
|
||||
if (data->ref_count > 0)
|
||||
{
|
||||
--data->ref_count;
|
||||
if (data->ref_count == 0)
|
||||
mysql_thread_end();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -169,14 +174,24 @@ Pool::Entry Pool::tryGet()
|
||||
return Entry();
|
||||
}
|
||||
|
||||
void Pool::removeConnection(Connection* connection)
|
||||
{
|
||||
std::lock_guard<std::mutex> lock(mutex);
|
||||
if (connection)
|
||||
{
|
||||
if (connection->ref_count > 0)
|
||||
{
|
||||
connection->conn.disconnect();
|
||||
connection->ref_count = 0;
|
||||
}
|
||||
connections.remove(connection);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void Pool::Entry::disconnect()
|
||||
{
|
||||
if (data)
|
||||
{
|
||||
decrementRefCount();
|
||||
data->conn.disconnect();
|
||||
}
|
||||
pool->removeConnection(data);
|
||||
}
|
||||
|
||||
|
||||
|
122
base/mysqlxx/src/PoolFactory.cpp
Normal file
122
base/mysqlxx/src/PoolFactory.cpp
Normal file
@ -0,0 +1,122 @@
|
||||
#include <mysqlxx/PoolFactory.h>
|
||||
#include <Poco/Util/Application.h>
|
||||
#include <Poco/Util/LayeredConfiguration.h>
|
||||
|
||||
namespace mysqlxx
|
||||
{
|
||||
|
||||
struct PoolFactory::Impl
|
||||
{
|
||||
// Cache of already affected pools identified by their config name
|
||||
std::map<std::string, std::shared_ptr<PoolWithFailover>> pools;
|
||||
|
||||
// Cache of Pool ID (host + port + user +...) cibling already established shareable pool
|
||||
std::map<std::string, std::string> pools_by_ids;
|
||||
|
||||
/// Protect pools and pools_by_ids caches
|
||||
std::mutex mutex;
|
||||
};
|
||||
|
||||
PoolWithFailover PoolFactory::Get(const std::string & config_name, unsigned default_connections,
|
||||
unsigned max_connections, size_t max_tries)
|
||||
{
|
||||
return Get(Poco::Util::Application::instance().config(), config_name, default_connections, max_connections, max_tries);
|
||||
}
|
||||
|
||||
/// Duplicate of code from StringUtils.h. Copied here for less dependencies.
|
||||
static bool startsWith(const std::string & s, const char * prefix)
|
||||
{
|
||||
return s.size() >= strlen(prefix) && 0 == memcmp(s.data(), prefix, strlen(prefix));
|
||||
}
|
||||
|
||||
static std::string getPoolEntryName(const Poco::Util::AbstractConfiguration & config,
|
||||
const std::string & config_name)
|
||||
{
|
||||
bool shared = config.getBool(config_name + ".share_connection", false);
|
||||
|
||||
// Not shared no need to generate a name the pool won't be stored
|
||||
if (!shared)
|
||||
return "";
|
||||
|
||||
std::string entry_name = "";
|
||||
std::string host = config.getString(config_name + ".host", "");
|
||||
std::string port = config.getString(config_name + ".port", "");
|
||||
std::string user = config.getString(config_name + ".user", "");
|
||||
std::string db = config.getString(config_name + ".db", "");
|
||||
std::string table = config.getString(config_name + ".table", "");
|
||||
|
||||
Poco::Util::AbstractConfiguration::Keys keys;
|
||||
config.keys(config_name, keys);
|
||||
|
||||
if (config.has(config_name + ".replica"))
|
||||
{
|
||||
Poco::Util::AbstractConfiguration::Keys replica_keys;
|
||||
config.keys(config_name, replica_keys);
|
||||
for (const auto & replica_config_key : replica_keys)
|
||||
{
|
||||
/// There could be another elements in the same level in configuration file, like "user", "port"...
|
||||
if (startsWith(replica_config_key, "replica"))
|
||||
{
|
||||
std::string replica_name = config_name + "." + replica_config_key;
|
||||
std::string tmp_host = config.getString(replica_name + ".host", host);
|
||||
std::string tmp_port = config.getString(replica_name + ".port", port);
|
||||
std::string tmp_user = config.getString(replica_name + ".user", user);
|
||||
entry_name += (entry_name.empty() ? "" : "|") + tmp_user + "@" + tmp_host + ":" + tmp_port + "/" + db;
|
||||
}
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
entry_name = user + "@" + host + ":" + port + "/" + db;
|
||||
}
|
||||
return entry_name;
|
||||
}
|
||||
|
||||
PoolWithFailover PoolFactory::Get(const Poco::Util::AbstractConfiguration & config,
|
||||
const std::string & config_name, unsigned default_connections, unsigned max_connections, size_t max_tries)
|
||||
{
|
||||
|
||||
std::lock_guard<std::mutex> lock(impl->mutex);
|
||||
if (auto entry = impl->pools.find(config_name); entry != impl->pools.end())
|
||||
{
|
||||
return *(entry->second.get());
|
||||
}
|
||||
else
|
||||
{
|
||||
std::string entry_name = getPoolEntryName(config, config_name);
|
||||
if (auto id = impl->pools_by_ids.find(entry_name); id != impl->pools_by_ids.end())
|
||||
{
|
||||
entry = impl->pools.find(id->second);
|
||||
std::shared_ptr<PoolWithFailover> pool = entry->second;
|
||||
impl->pools.insert_or_assign(config_name, pool);
|
||||
return *pool;
|
||||
}
|
||||
|
||||
auto pool = std::make_shared<PoolWithFailover>(config, config_name, default_connections, max_connections, max_tries);
|
||||
// Check the pool will be shared
|
||||
if (!entry_name.empty())
|
||||
{
|
||||
// Store shared pool
|
||||
impl->pools.insert_or_assign(config_name, pool);
|
||||
impl->pools_by_ids.insert_or_assign(entry_name, config_name);
|
||||
}
|
||||
return *(pool.get());
|
||||
}
|
||||
}
|
||||
|
||||
void PoolFactory::reset()
|
||||
{
|
||||
std::lock_guard<std::mutex> lock(impl->mutex);
|
||||
impl->pools.clear();
|
||||
impl->pools_by_ids.clear();
|
||||
}
|
||||
|
||||
PoolFactory::PoolFactory() : impl(std::make_unique<PoolFactory::Impl>()) {}
|
||||
|
||||
PoolFactory & PoolFactory::instance()
|
||||
{
|
||||
static PoolFactory ret;
|
||||
return ret;
|
||||
}
|
||||
|
||||
}
|
@ -15,6 +15,7 @@ PoolWithFailover::PoolWithFailover(const Poco::Util::AbstractConfiguration & cfg
|
||||
const unsigned max_connections, const size_t max_tries)
|
||||
: max_tries(max_tries)
|
||||
{
|
||||
shareable = cfg.getBool(config_name + ".share_connection", false);
|
||||
if (cfg.has(config_name + ".replica"))
|
||||
{
|
||||
Poco::Util::AbstractConfiguration::Keys replica_keys;
|
||||
@ -48,15 +49,22 @@ PoolWithFailover::PoolWithFailover(const std::string & config_name, const unsign
|
||||
{}
|
||||
|
||||
PoolWithFailover::PoolWithFailover(const PoolWithFailover & other)
|
||||
: max_tries{other.max_tries}
|
||||
: max_tries{other.max_tries}, config_name{other.config_name}, shareable{other.shareable}
|
||||
{
|
||||
for (const auto & priority_replicas : other.replicas_by_priority)
|
||||
if (shareable)
|
||||
{
|
||||
Replicas replicas;
|
||||
replicas.reserve(priority_replicas.second.size());
|
||||
for (const auto & pool : priority_replicas.second)
|
||||
replicas.emplace_back(std::make_shared<Pool>(*pool));
|
||||
replicas_by_priority.emplace(priority_replicas.first, std::move(replicas));
|
||||
replicas_by_priority = other.replicas_by_priority;
|
||||
}
|
||||
else
|
||||
{
|
||||
for (const auto & priority_replicas : other.replicas_by_priority)
|
||||
{
|
||||
Replicas replicas;
|
||||
replicas.reserve(priority_replicas.second.size());
|
||||
for (const auto & pool : priority_replicas.second)
|
||||
replicas.emplace_back(std::make_shared<Pool>(*pool));
|
||||
replicas_by_priority.emplace(priority_replicas.first, std::move(replicas));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -81,7 +89,7 @@ PoolWithFailover::Entry PoolWithFailover::Get()
|
||||
|
||||
try
|
||||
{
|
||||
Entry entry = pool->tryGet();
|
||||
Entry entry = shareable ? pool->Get() : pool->tryGet();
|
||||
|
||||
if (!entry.isNull())
|
||||
{
|
||||
|
2
contrib/base64
vendored
2
contrib/base64
vendored
@ -1 +1 @@
|
||||
Subproject commit 5257626d2be17a3eb23f79be17fe55ebba394ad2
|
||||
Subproject commit 95ba56a9b041f9933f5cd2bbb2ee4e083468c20a
|
@ -52,7 +52,7 @@ set(SRCS
|
||||
)
|
||||
add_library(libxml2 ${SRCS})
|
||||
|
||||
target_link_libraries(libxml2 PRIVATE ${ZLIB_LIBRARIES} ${CMAKE_DL_LIBS})
|
||||
target_link_libraries(libxml2 PRIVATE ${ZLIB_LIBRARIES})
|
||||
if(M_LIBRARY)
|
||||
target_link_libraries(libxml2 PRIVATE ${M_LIBRARY})
|
||||
endif()
|
||||
|
2
contrib/llvm
vendored
2
contrib/llvm
vendored
@ -1 +1 @@
|
||||
Subproject commit 778c297395b4a2dfd60e13969a0f9488bf2c16cf
|
||||
Subproject commit 5dab18f4861677548b8f7f6815f49384480ecead
|
2
contrib/mariadb-connector-c
vendored
2
contrib/mariadb-connector-c
vendored
@ -1 +1 @@
|
||||
Subproject commit 18016300b00825a3fcbc6fb2aa37ac3e51416f71
|
||||
Subproject commit 3f512fedf0ba0f769a1b4852b4bac542d92c5b20
|
2
contrib/poco
vendored
2
contrib/poco
vendored
@ -1 +1 @@
|
||||
Subproject commit d805cf5ca4cf8bdc642261cfcbe7a0a241cb7298
|
||||
Subproject commit 860574c93980d887a89df141edd9ca2fb0024fa3
|
@ -32,7 +32,6 @@ target_include_directories(ltdl PUBLIC ${ODBC_SOURCE_DIR}/libltdl/libltdl)
|
||||
target_compile_definitions(ltdl PRIVATE -DHAVE_CONFIG_H -DLTDL -DLTDLOPEN=libltdlc)
|
||||
|
||||
target_compile_options(ltdl PRIVATE -Wno-constant-logical-operand -Wno-unknown-warning-option -O2)
|
||||
target_link_libraries(ltdl PRIVATE ${CMAKE_DL_LIBS})
|
||||
|
||||
|
||||
set(SRCS
|
||||
|
@ -105,6 +105,7 @@ namespace ErrorCodes
|
||||
extern const int UNEXPECTED_PACKET_FROM_SERVER;
|
||||
extern const int CLIENT_OUTPUT_FORMAT_SPECIFIED;
|
||||
extern const int INVALID_USAGE_OF_INPUT;
|
||||
extern const int DEADLOCK_AVOIDED;
|
||||
}
|
||||
|
||||
|
||||
@ -906,9 +907,34 @@ private:
|
||||
query = serializeAST(*parsed_query);
|
||||
}
|
||||
|
||||
connection->sendQuery(connection_parameters.timeouts, query, query_id, QueryProcessingStage::Complete, &context.getSettingsRef(), nullptr, true);
|
||||
sendExternalTables();
|
||||
receiveResult();
|
||||
static constexpr size_t max_retries = 10;
|
||||
for (size_t retry = 0; retry < max_retries; ++retry)
|
||||
{
|
||||
try
|
||||
{
|
||||
connection->sendQuery(
|
||||
connection_parameters.timeouts,
|
||||
query,
|
||||
query_id,
|
||||
QueryProcessingStage::Complete,
|
||||
&context.getSettingsRef(),
|
||||
nullptr,
|
||||
true);
|
||||
|
||||
sendExternalTables();
|
||||
receiveResult();
|
||||
|
||||
break;
|
||||
}
|
||||
catch (const Exception & e)
|
||||
{
|
||||
/// Retry when the server said "Client should retry" and no rows has been received yet.
|
||||
if (processed_rows == 0 && e.code() == ErrorCodes::DEADLOCK_AVOIDED && retry + 1 < max_retries)
|
||||
continue;
|
||||
|
||||
throw;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
@ -9,29 +9,43 @@ namespace ErrorCodes
|
||||
{
|
||||
extern const int LOGICAL_ERROR;
|
||||
extern const int UNKNOWN_PACKET_FROM_SERVER;
|
||||
extern const int DEADLOCK_AVOIDED;
|
||||
}
|
||||
|
||||
void Suggest::load(const ConnectionParameters & connection_parameters, size_t suggestion_limit)
|
||||
{
|
||||
loading_thread = std::thread([connection_parameters, suggestion_limit, this]
|
||||
{
|
||||
try
|
||||
for (size_t retry = 0; retry < 10; ++retry)
|
||||
{
|
||||
Connection connection(
|
||||
connection_parameters.host,
|
||||
connection_parameters.port,
|
||||
connection_parameters.default_database,
|
||||
connection_parameters.user,
|
||||
connection_parameters.password,
|
||||
"client",
|
||||
connection_parameters.compression,
|
||||
connection_parameters.security);
|
||||
try
|
||||
{
|
||||
Connection connection(
|
||||
connection_parameters.host,
|
||||
connection_parameters.port,
|
||||
connection_parameters.default_database,
|
||||
connection_parameters.user,
|
||||
connection_parameters.password,
|
||||
"client",
|
||||
connection_parameters.compression,
|
||||
connection_parameters.security);
|
||||
|
||||
loadImpl(connection, connection_parameters.timeouts, suggestion_limit);
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
std::cerr << "Cannot load data for command line suggestions: " << getCurrentExceptionMessage(false, true) << "\n";
|
||||
loadImpl(connection, connection_parameters.timeouts, suggestion_limit);
|
||||
}
|
||||
catch (const Exception & e)
|
||||
{
|
||||
/// Retry when the server said "Client should retry".
|
||||
if (e.code() == ErrorCodes::DEADLOCK_AVOIDED)
|
||||
continue;
|
||||
|
||||
std::cerr << "Cannot load data for command line suggestions: " << getCurrentExceptionMessage(false, true) << "\n";
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
std::cerr << "Cannot load data for command line suggestions: " << getCurrentExceptionMessage(false, true) << "\n";
|
||||
}
|
||||
|
||||
break;
|
||||
}
|
||||
|
||||
/// Note that keyword suggestions are available even if we cannot load data from server.
|
||||
|
@ -131,7 +131,6 @@ struct TaskStateWithOwner
|
||||
};
|
||||
|
||||
|
||||
|
||||
struct ShardPriority
|
||||
{
|
||||
UInt8 is_remote = 1;
|
||||
|
@ -298,7 +298,7 @@ void LocalServer::processQueries()
|
||||
|
||||
try
|
||||
{
|
||||
executeQuery(read_buf, write_buf, /* allow_into_outfile = */ true, *context, {}, {});
|
||||
executeQuery(read_buf, write_buf, /* allow_into_outfile = */ true, *context, {});
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
|
@ -60,5 +60,4 @@ void StopConditionsSet::report(UInt64 value, StopConditionsSet::StopCondition &
|
||||
}
|
||||
|
||||
|
||||
|
||||
}
|
||||
|
@ -592,12 +592,14 @@ void HTTPHandler::processQuery(
|
||||
customizeContext(context);
|
||||
|
||||
executeQuery(*in, *used_output.out_maybe_delayed_and_compressed, /* allow_into_outfile = */ false, context,
|
||||
[&response] (const String & content_type, const String & format)
|
||||
[&response] (const String & current_query_id, const String & content_type, const String & format, const String & timezone)
|
||||
{
|
||||
response.setContentType(content_type);
|
||||
response.add("X-ClickHouse-Query-Id", current_query_id);
|
||||
response.add("X-ClickHouse-Format", format);
|
||||
},
|
||||
[&response] (const String & current_query_id) { response.add("X-ClickHouse-Query-Id", current_query_id); });
|
||||
response.add("X-ClickHouse-Timezone", timezone);
|
||||
}
|
||||
);
|
||||
|
||||
if (used_output.hasDelayed())
|
||||
{
|
||||
@ -705,7 +707,7 @@ void HTTPHandler::handleRequest(Poco::Net::HTTPServerRequest & request, Poco::Ne
|
||||
if (request.getMethod() == Poco::Net::HTTPRequest::HTTP_POST && !request.getChunkedTransferEncoding() &&
|
||||
!request.hasContentLength())
|
||||
{
|
||||
throw Exception("There is neither Transfer-Encoding header nor Content-Length header", ErrorCodes::HTTP_LENGTH_REQUIRED);
|
||||
throw Exception("The Transfer-Encoding is not chunked and there is no Content-Length header for POST request", ErrorCodes::HTTP_LENGTH_REQUIRED);
|
||||
}
|
||||
|
||||
processQuery(request, params, response, used_output);
|
||||
|
@ -282,14 +282,9 @@ void MySQLHandler::comQuery(ReadBuffer & payload)
|
||||
}
|
||||
else
|
||||
{
|
||||
bool with_output = false;
|
||||
std::function<void(const String &, const String &)> set_content_type_and_format = [&with_output](const String &, const String &) -> void
|
||||
{
|
||||
with_output = true;
|
||||
};
|
||||
|
||||
String replacement_query = "select ''";
|
||||
bool should_replace = false;
|
||||
bool with_output = false;
|
||||
|
||||
// Translate query from MySQL to ClickHouse.
|
||||
// This is a temporary workaround until ClickHouse supports the syntax "@@var_name".
|
||||
@ -307,7 +302,13 @@ void MySQLHandler::comQuery(ReadBuffer & payload)
|
||||
ReadBufferFromString replacement(replacement_query);
|
||||
|
||||
Context query_context = connection_context;
|
||||
executeQuery(should_replace ? replacement : payload, *out, true, query_context, set_content_type_and_format, {});
|
||||
|
||||
executeQuery(should_replace ? replacement : payload, *out, true, query_context,
|
||||
[&with_output](const String &, const String &, const String &, const String &)
|
||||
{
|
||||
with_output = true;
|
||||
}
|
||||
);
|
||||
|
||||
if (!with_output)
|
||||
packet_sender->sendPacket(OK_Packet(0x00, client_capability_flags, 0, 0, 0), true);
|
||||
|
@ -44,6 +44,7 @@
|
||||
#include <Interpreters/DNSCacheUpdater.h>
|
||||
#include <Interpreters/SystemLog.cpp>
|
||||
#include <Interpreters/ExternalLoaderXMLConfigRepository.h>
|
||||
#include <Access/AccessControlManager.h>
|
||||
#include <Storages/StorageReplicatedMergeTree.h>
|
||||
#include <Storages/System/attachSystemTables.h>
|
||||
#include <AggregateFunctions/registerAggregateFunctions.h>
|
||||
@ -59,6 +60,7 @@
|
||||
#include "TCPHandlerFactory.h"
|
||||
#include "Common/config_version.h"
|
||||
#include <Common/SensitiveDataMasker.h>
|
||||
#include <Common/ThreadFuzzer.h>
|
||||
#include "MySQLHandlerFactory.h"
|
||||
|
||||
#if defined(OS_LINUX)
|
||||
@ -218,6 +220,9 @@ int Server::main(const std::vector<std::string> & /*args*/)
|
||||
CurrentMetrics::set(CurrentMetrics::Revision, ClickHouseRevision::get());
|
||||
CurrentMetrics::set(CurrentMetrics::VersionInteger, ClickHouseRevision::getVersionInteger());
|
||||
|
||||
if (ThreadFuzzer::instance().isEffective())
|
||||
LOG_WARNING(log, "ThreadFuzzer is enabled. Application will run slowly and unstable.");
|
||||
|
||||
/** Context contains all that query execution is dependent:
|
||||
* settings, available functions, data types, aggregate functions, databases...
|
||||
*/
|
||||
@ -465,6 +470,8 @@ int Server::main(const std::vector<std::string> & /*args*/)
|
||||
|
||||
if (config->has("max_partition_size_to_drop"))
|
||||
global_context->setMaxPartitionSizeToDrop(config->getUInt64("max_partition_size_to_drop"));
|
||||
|
||||
global_context->updateStorageConfiguration(*config);
|
||||
},
|
||||
/* already_loaded = */ true);
|
||||
|
||||
@ -493,6 +500,11 @@ int Server::main(const std::vector<std::string> & /*args*/)
|
||||
users_config_reloader->reload();
|
||||
});
|
||||
|
||||
/// Sets a local directory storing information about access control.
|
||||
std::string access_control_local_path = config().getString("access_control_path", "");
|
||||
if (!access_control_local_path.empty())
|
||||
global_context->getAccessControlManager().setLocalDirectory(access_control_local_path);
|
||||
|
||||
/// Limit on total number of concurrently executed queries.
|
||||
global_context->getProcessList().setMaxSize(config().getInt("max_concurrent_queries", 0));
|
||||
|
||||
|
@ -503,7 +503,7 @@ void TCPHandler::processOrdinaryQuery()
|
||||
|
||||
if (after_send_progress.elapsed() / 1000 >= query_context->getSettingsRef().interactive_delay)
|
||||
{
|
||||
/// Some time passed and there is a progress.
|
||||
/// Some time passed.
|
||||
after_send_progress.restart();
|
||||
sendProgress();
|
||||
}
|
||||
@ -539,6 +539,8 @@ void TCPHandler::processOrdinaryQuery()
|
||||
}
|
||||
|
||||
state.io.onFinish();
|
||||
|
||||
sendProgress();
|
||||
}
|
||||
|
||||
|
||||
@ -546,8 +548,8 @@ void TCPHandler::processOrdinaryQueryWithProcessors(size_t num_threads)
|
||||
{
|
||||
auto & pipeline = state.io.pipeline;
|
||||
|
||||
if (pipeline.getMaxThreads())
|
||||
num_threads = std::min(num_threads, pipeline.getMaxThreads());
|
||||
/// Reduce the number of threads to recommended value.
|
||||
num_threads = std::min(num_threads, pipeline.getNumThreads());
|
||||
|
||||
/// Send header-block, to allow client to prepare output format for data to send.
|
||||
{
|
||||
@ -658,6 +660,8 @@ void TCPHandler::processOrdinaryQueryWithProcessors(size_t num_threads)
|
||||
}
|
||||
|
||||
state.io.onFinish();
|
||||
|
||||
sendProgress();
|
||||
}
|
||||
|
||||
|
||||
@ -875,48 +879,55 @@ void TCPHandler::receiveQuery()
|
||||
query_context->setCurrentQueryId(state.query_id);
|
||||
|
||||
/// Client info
|
||||
ClientInfo & client_info = query_context->getClientInfo();
|
||||
if (client_revision >= DBMS_MIN_REVISION_WITH_CLIENT_INFO)
|
||||
client_info.read(*in, client_revision);
|
||||
|
||||
/// For better support of old clients, that does not send ClientInfo.
|
||||
if (client_info.query_kind == ClientInfo::QueryKind::NO_QUERY)
|
||||
{
|
||||
ClientInfo & client_info = query_context->getClientInfo();
|
||||
if (client_revision >= DBMS_MIN_REVISION_WITH_CLIENT_INFO)
|
||||
client_info.read(*in, client_revision);
|
||||
|
||||
/// For better support of old clients, that does not send ClientInfo.
|
||||
if (client_info.query_kind == ClientInfo::QueryKind::NO_QUERY)
|
||||
{
|
||||
client_info.query_kind = ClientInfo::QueryKind::INITIAL_QUERY;
|
||||
client_info.client_name = client_name;
|
||||
client_info.client_version_major = client_version_major;
|
||||
client_info.client_version_minor = client_version_minor;
|
||||
client_info.client_version_patch = client_version_patch;
|
||||
client_info.client_revision = client_revision;
|
||||
}
|
||||
|
||||
/// Set fields, that are known apriori.
|
||||
client_info.interface = ClientInfo::Interface::TCP;
|
||||
|
||||
if (client_info.query_kind == ClientInfo::QueryKind::INITIAL_QUERY)
|
||||
{
|
||||
/// 'Current' fields was set at receiveHello.
|
||||
client_info.initial_user = client_info.current_user;
|
||||
client_info.initial_query_id = client_info.current_query_id;
|
||||
client_info.initial_address = client_info.current_address;
|
||||
}
|
||||
else
|
||||
{
|
||||
query_context->setInitialRowPolicy();
|
||||
}
|
||||
client_info.query_kind = ClientInfo::QueryKind::INITIAL_QUERY;
|
||||
client_info.client_name = client_name;
|
||||
client_info.client_version_major = client_version_major;
|
||||
client_info.client_version_minor = client_version_minor;
|
||||
client_info.client_version_patch = client_version_patch;
|
||||
client_info.client_revision = client_revision;
|
||||
}
|
||||
|
||||
/// Per query settings.
|
||||
Settings custom_settings{};
|
||||
/// Set fields, that are known apriori.
|
||||
client_info.interface = ClientInfo::Interface::TCP;
|
||||
|
||||
if (client_info.query_kind == ClientInfo::QueryKind::INITIAL_QUERY)
|
||||
{
|
||||
/// 'Current' fields was set at receiveHello.
|
||||
client_info.initial_user = client_info.current_user;
|
||||
client_info.initial_query_id = client_info.current_query_id;
|
||||
client_info.initial_address = client_info.current_address;
|
||||
}
|
||||
else
|
||||
{
|
||||
query_context->setInitialRowPolicy();
|
||||
}
|
||||
|
||||
/// Per query settings are also passed via TCP.
|
||||
/// We need to check them before applying due to they can violate the settings constraints.
|
||||
auto settings_format = (client_revision >= DBMS_MIN_REVISION_WITH_SETTINGS_SERIALIZED_AS_STRINGS) ? SettingsBinaryFormat::STRINGS
|
||||
: SettingsBinaryFormat::OLD;
|
||||
custom_settings.deserialize(*in, settings_format);
|
||||
auto settings_changes = custom_settings.changes();
|
||||
query_context->checkSettingsConstraints(settings_changes);
|
||||
Settings passed_settings;
|
||||
passed_settings.deserialize(*in, settings_format);
|
||||
auto settings_changes = passed_settings.changes();
|
||||
if (client_info.query_kind == ClientInfo::QueryKind::INITIAL_QUERY)
|
||||
{
|
||||
/// Throw an exception if the passed settings violate the constraints.
|
||||
query_context->checkSettingsConstraints(settings_changes);
|
||||
}
|
||||
else
|
||||
{
|
||||
/// Quietly clamp to the constraints if it's not an initial query.
|
||||
query_context->clampToSettingsConstraints(settings_changes);
|
||||
}
|
||||
query_context->applySettingsChanges(settings_changes);
|
||||
|
||||
Settings & settings = query_context->getSettingsRef();
|
||||
const Settings & settings = query_context->getSettingsRef();
|
||||
|
||||
/// Sync timeouts on client and server during current query to avoid dangling queries on server
|
||||
/// NOTE: We use settings.send_timeout for the receive timeout and vice versa (change arguments ordering in TimeoutSetter),
|
||||
|
3
dbms/programs/server/config.d/listen.xml.disabled
Normal file
3
dbms/programs/server/config.d/listen.xml.disabled
Normal file
@ -0,0 +1,3 @@
|
||||
<yandex>
|
||||
<listen_host>::</listen_host>
|
||||
</yandex>
|
9
dbms/programs/server/config.d/tls.xml.disabled
Normal file
9
dbms/programs/server/config.d/tls.xml.disabled
Normal file
@ -0,0 +1,9 @@
|
||||
<yandex>
|
||||
<https_port>8443</https_port>
|
||||
<tcp_port_secure>9440</tcp_port_secure>
|
||||
<openSSL>
|
||||
<server>
|
||||
<dhParamsFile remove="remove"/>
|
||||
</server>
|
||||
</openSSL>
|
||||
</yandex>
|
@ -3,25 +3,6 @@
|
||||
NOTE: User and query level settings are set up in "users.xml" file.
|
||||
-->
|
||||
<yandex>
|
||||
<!-- The list of hosts allowed to use in URL-related storage engines and table functions.
|
||||
If this section is not present in configuration, all hosts are allowed.
|
||||
-->
|
||||
<remote_url_allow_hosts>
|
||||
<!-- Host should be specified exactly as in URL. The name is checked before DNS resolution.
|
||||
Example: "yandex.ru", "yandex.ru." and "www.yandex.ru" are different hosts.
|
||||
If port is explicitly specified in URL, the host:port is checked as a whole.
|
||||
If host specified here without port, any port with this host allowed.
|
||||
"yandex.ru" -> "yandex.ru:443", "yandex.ru:80" etc. is allowed, but "yandex.ru:80" -> only "yandex.ru:80" is allowed.
|
||||
If the host is specified as IP address, it is checked as specified in URL. Example: "[2a02:6b8:a::a]".
|
||||
If there are redirects and support for redirects is enabled, every redirect (the Location field) is checked.
|
||||
-->
|
||||
|
||||
<!-- Regular expression can be specified. RE2 engine is used for regexps.
|
||||
Regexps are not aligned: don't forget to add ^ and $. Also don't forget to escape dot (.) metacharacter
|
||||
(forgetting to do so is a common source of error).
|
||||
-->
|
||||
</remote_url_allow_hosts>
|
||||
|
||||
<logger>
|
||||
<!-- Possible levels: https://github.com/pocoproject/poco/blob/poco-1.9.4-release/Foundation/include/Poco/Logger.h#L105 -->
|
||||
<level>trace</level>
|
||||
@ -250,6 +231,24 @@
|
||||
</test_unavailable_shard>
|
||||
</remote_servers>
|
||||
|
||||
<!-- The list of hosts allowed to use in URL-related storage engines and table functions.
|
||||
If this section is not present in configuration, all hosts are allowed.
|
||||
-->
|
||||
<remote_url_allow_hosts>
|
||||
<!-- Host should be specified exactly as in URL. The name is checked before DNS resolution.
|
||||
Example: "yandex.ru", "yandex.ru." and "www.yandex.ru" are different hosts.
|
||||
If port is explicitly specified in URL, the host:port is checked as a whole.
|
||||
If host specified here without port, any port with this host allowed.
|
||||
"yandex.ru" -> "yandex.ru:443", "yandex.ru:80" etc. is allowed, but "yandex.ru:80" -> only "yandex.ru:80" is allowed.
|
||||
If the host is specified as IP address, it is checked as specified in URL. Example: "[2a02:6b8:a::a]".
|
||||
If there are redirects and support for redirects is enabled, every redirect (the Location field) is checked.
|
||||
-->
|
||||
|
||||
<!-- Regular expression can be specified. RE2 engine is used for regexps.
|
||||
Regexps are not aligned: don't forget to add ^ and $. Also don't forget to escape dot (.) metacharacter
|
||||
(forgetting to do so is a common source of error).
|
||||
-->
|
||||
</remote_url_allow_hosts>
|
||||
|
||||
<!-- If element has 'incl' attribute, then for it's value will be used corresponding substitution from another file.
|
||||
By default, path to file with substitutions is /etc/metrika.xml. It could be changed in config in 'include_from' element.
|
||||
|
@ -2,6 +2,7 @@
|
||||
#include <Access/MultipleAccessStorage.h>
|
||||
#include <Access/MemoryAccessStorage.h>
|
||||
#include <Access/UsersConfigAccessStorage.h>
|
||||
#include <Access/DiskAccessStorage.h>
|
||||
#include <Access/AccessRightsContextFactory.h>
|
||||
#include <Access/RoleContextFactory.h>
|
||||
#include <Access/RowPolicyContextFactory.h>
|
||||
@ -15,10 +16,14 @@ namespace
|
||||
std::vector<std::unique_ptr<IAccessStorage>> createStorages()
|
||||
{
|
||||
std::vector<std::unique_ptr<IAccessStorage>> list;
|
||||
list.emplace_back(std::make_unique<MemoryAccessStorage>());
|
||||
list.emplace_back(std::make_unique<DiskAccessStorage>());
|
||||
list.emplace_back(std::make_unique<UsersConfigAccessStorage>());
|
||||
list.emplace_back(std::make_unique<MemoryAccessStorage>());
|
||||
return list;
|
||||
}
|
||||
|
||||
constexpr size_t DISK_ACCESS_STORAGE_INDEX = 0;
|
||||
constexpr size_t USERS_CONFIG_ACCESS_STORAGE_INDEX = 1;
|
||||
}
|
||||
|
||||
|
||||
@ -37,10 +42,17 @@ AccessControlManager::~AccessControlManager()
|
||||
}
|
||||
|
||||
|
||||
void AccessControlManager::loadFromConfig(const Poco::Util::AbstractConfiguration & users_config)
|
||||
void AccessControlManager::setLocalDirectory(const String & directory_path)
|
||||
{
|
||||
auto & users_config_access_storage = dynamic_cast<UsersConfigAccessStorage &>(getStorageByIndex(1));
|
||||
users_config_access_storage.loadFromConfig(users_config);
|
||||
auto & disk_access_storage = dynamic_cast<DiskAccessStorage &>(getStorageByIndex(DISK_ACCESS_STORAGE_INDEX));
|
||||
disk_access_storage.setDirectory(directory_path);
|
||||
}
|
||||
|
||||
|
||||
void AccessControlManager::setUsersConfig(const Poco::Util::AbstractConfiguration & users_config)
|
||||
{
|
||||
auto & users_config_access_storage = dynamic_cast<UsersConfigAccessStorage &>(getStorageByIndex(USERS_CONFIG_ACCESS_STORAGE_INDEX));
|
||||
users_config_access_storage.setConfiguration(users_config);
|
||||
}
|
||||
|
||||
|
||||
|
@ -45,7 +45,8 @@ public:
|
||||
AccessControlManager();
|
||||
~AccessControlManager();
|
||||
|
||||
void loadFromConfig(const Poco::Util::AbstractConfiguration & users_config);
|
||||
void setLocalDirectory(const String & directory);
|
||||
void setUsersConfig(const Poco::Util::AbstractConfiguration & users_config);
|
||||
|
||||
AccessRightsContextPtr getAccessRightsContext(
|
||||
const UUID & user_id,
|
||||
|
@ -9,6 +9,7 @@ namespace DB
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int INVALID_GRANT;
|
||||
extern const int LOGICAL_ERROR;
|
||||
}
|
||||
|
||||
|
||||
@ -45,6 +46,13 @@ namespace
|
||||
const AccessFlags create_table_flag = AccessType::CREATE_TABLE;
|
||||
const AccessFlags create_temporary_table_flag = AccessType::CREATE_TEMPORARY_TABLE;
|
||||
};
|
||||
|
||||
std::string_view checkCurrentDatabase(const std::string_view & current_database)
|
||||
{
|
||||
if (current_database.empty())
|
||||
throw Exception("No current database", ErrorCodes::LOGICAL_ERROR);
|
||||
return current_database;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -520,21 +528,21 @@ void AccessRights::grantImpl(const AccessRightsElement & element, std::string_vi
|
||||
else if (element.any_table)
|
||||
{
|
||||
if (element.database.empty())
|
||||
grantImpl(element.access_flags, current_database);
|
||||
grantImpl(element.access_flags, checkCurrentDatabase(current_database));
|
||||
else
|
||||
grantImpl(element.access_flags, element.database);
|
||||
}
|
||||
else if (element.any_column)
|
||||
{
|
||||
if (element.database.empty())
|
||||
grantImpl(element.access_flags, current_database, element.table);
|
||||
grantImpl(element.access_flags, checkCurrentDatabase(current_database), element.table);
|
||||
else
|
||||
grantImpl(element.access_flags, element.database, element.table);
|
||||
}
|
||||
else
|
||||
{
|
||||
if (element.database.empty())
|
||||
grantImpl(element.access_flags, current_database, element.table, element.columns);
|
||||
grantImpl(element.access_flags, checkCurrentDatabase(current_database), element.table, element.columns);
|
||||
else
|
||||
grantImpl(element.access_flags, element.database, element.table, element.columns);
|
||||
}
|
||||
@ -575,21 +583,21 @@ void AccessRights::revokeImpl(const AccessRightsElement & element, std::string_v
|
||||
else if (element.any_table)
|
||||
{
|
||||
if (element.database.empty())
|
||||
revokeImpl<mode>(element.access_flags, current_database);
|
||||
revokeImpl<mode>(element.access_flags, checkCurrentDatabase(current_database));
|
||||
else
|
||||
revokeImpl<mode>(element.access_flags, element.database);
|
||||
}
|
||||
else if (element.any_column)
|
||||
{
|
||||
if (element.database.empty())
|
||||
revokeImpl<mode>(element.access_flags, current_database, element.table);
|
||||
revokeImpl<mode>(element.access_flags, checkCurrentDatabase(current_database), element.table);
|
||||
else
|
||||
revokeImpl<mode>(element.access_flags, element.database, element.table);
|
||||
}
|
||||
else
|
||||
{
|
||||
if (element.database.empty())
|
||||
revokeImpl<mode>(element.access_flags, current_database, element.table, element.columns);
|
||||
revokeImpl<mode>(element.access_flags, checkCurrentDatabase(current_database), element.table, element.columns);
|
||||
else
|
||||
revokeImpl<mode>(element.access_flags, element.database, element.table, element.columns);
|
||||
}
|
||||
@ -710,21 +718,21 @@ bool AccessRights::isGrantedImpl(const AccessRightsElement & element, std::strin
|
||||
else if (element.any_table)
|
||||
{
|
||||
if (element.database.empty())
|
||||
return isGrantedImpl(element.access_flags, current_database);
|
||||
return isGrantedImpl(element.access_flags, checkCurrentDatabase(current_database));
|
||||
else
|
||||
return isGrantedImpl(element.access_flags, element.database);
|
||||
}
|
||||
else if (element.any_column)
|
||||
{
|
||||
if (element.database.empty())
|
||||
return isGrantedImpl(element.access_flags, current_database, element.table);
|
||||
return isGrantedImpl(element.access_flags, checkCurrentDatabase(current_database), element.table);
|
||||
else
|
||||
return isGrantedImpl(element.access_flags, element.database, element.table);
|
||||
}
|
||||
else
|
||||
{
|
||||
if (element.database.empty())
|
||||
return isGrantedImpl(element.access_flags, current_database, element.table, element.columns);
|
||||
return isGrantedImpl(element.access_flags, checkCurrentDatabase(current_database), element.table, element.columns);
|
||||
else
|
||||
return isGrantedImpl(element.access_flags, element.database, element.table, element.columns);
|
||||
}
|
||||
|
@ -186,20 +186,20 @@ void AccessRightsContext::setRolesInfo(const CurrentRolesInfoPtr & roles_info_)
|
||||
}
|
||||
|
||||
|
||||
void AccessRightsContext::checkPassword(const String & password) const
|
||||
bool AccessRightsContext::isCorrectPassword(const String & password) const
|
||||
{
|
||||
std::lock_guard lock{mutex};
|
||||
if (!user)
|
||||
throw Exception(user_name + ": User has been dropped", ErrorCodes::UNKNOWN_USER);
|
||||
user->authentication.checkPassword(password, user_name);
|
||||
return false;
|
||||
return user->authentication.isCorrectPassword(password);
|
||||
}
|
||||
|
||||
void AccessRightsContext::checkHostIsAllowed() const
|
||||
bool AccessRightsContext::isClientHostAllowed() const
|
||||
{
|
||||
std::lock_guard lock{mutex};
|
||||
if (!user)
|
||||
throw Exception(user_name + ": User has been dropped", ErrorCodes::UNKNOWN_USER);
|
||||
user->allowed_client_hosts.checkContains(params.address, user_name);
|
||||
return false;
|
||||
return user->allowed_client_hosts.contains(params.address);
|
||||
}
|
||||
|
||||
|
||||
|
@ -60,8 +60,8 @@ public:
|
||||
UserPtr getUser() const;
|
||||
String getUserName() const;
|
||||
|
||||
void checkPassword(const String & password) const;
|
||||
void checkHostIsAllowed() const;
|
||||
bool isCorrectPassword(const String & password) const;
|
||||
bool isClientHostAllowed() const;
|
||||
|
||||
CurrentRolesInfoPtr getRolesInfo() const;
|
||||
std::vector<UUID> getCurrentRoles() const;
|
||||
|
@ -15,7 +15,6 @@ namespace DB
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int DNS_ERROR;
|
||||
extern const int IP_ADDRESS_NOT_ALLOWED;
|
||||
}
|
||||
|
||||
namespace
|
||||
@ -367,16 +366,4 @@ bool AllowedClientHosts::contains(const IPAddress & client_address) const
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
void AllowedClientHosts::checkContains(const IPAddress & address, const String & user_name) const
|
||||
{
|
||||
if (!contains(address))
|
||||
{
|
||||
if (user_name.empty())
|
||||
throw Exception("It's not allowed to connect from address " + address.toString(), ErrorCodes::IP_ADDRESS_NOT_ALLOWED);
|
||||
else
|
||||
throw Exception("User " + user_name + " is not allowed to connect from address " + address.toString(), ErrorCodes::IP_ADDRESS_NOT_ALLOWED);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -111,10 +111,6 @@ public:
|
||||
/// Checks if the provided address is in the list. Returns false if not.
|
||||
bool contains(const IPAddress & address) const;
|
||||
|
||||
/// Checks if the provided address is in the list. Throws an exception if not.
|
||||
/// `username` is only used for generating an error message if the address isn't in the list.
|
||||
void checkContains(const IPAddress & address, const String & user_name = String()) const;
|
||||
|
||||
friend bool operator ==(const AllowedClientHosts & lhs, const AllowedClientHosts & rhs);
|
||||
friend bool operator !=(const AllowedClientHosts & lhs, const AllowedClientHosts & rhs) { return !(lhs == rhs); }
|
||||
|
||||
|
@ -9,8 +9,6 @@ namespace ErrorCodes
|
||||
{
|
||||
extern const int LOGICAL_ERROR;
|
||||
extern const int BAD_ARGUMENTS;
|
||||
extern const int REQUIRED_PASSWORD;
|
||||
extern const int WRONG_PASSWORD;
|
||||
}
|
||||
|
||||
|
||||
@ -77,15 +75,4 @@ bool Authentication::isCorrectPassword(const String & password_) const
|
||||
throw Exception("Unknown authentication type: " + std::to_string(static_cast<int>(type)), ErrorCodes::LOGICAL_ERROR);
|
||||
}
|
||||
|
||||
|
||||
void Authentication::checkPassword(const String & password_, const String & user_name) const
|
||||
{
|
||||
if (isCorrectPassword(password_))
|
||||
return;
|
||||
auto info_about_user_name = [&user_name]() { return user_name.empty() ? String() : " for user " + user_name; };
|
||||
if (password_.empty() && (type != NO_PASSWORD))
|
||||
throw Exception("Password required" + info_about_user_name(), ErrorCodes::REQUIRED_PASSWORD);
|
||||
throw Exception("Wrong password" + info_about_user_name(), ErrorCodes::WRONG_PASSWORD);
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -70,10 +70,6 @@ public:
|
||||
/// Checks if the provided password is correct. Returns false if not.
|
||||
bool isCorrectPassword(const String & password) const;
|
||||
|
||||
/// Checks if the provided password is correct. Throws an exception if not.
|
||||
/// `user_name` is only used for generating an error message if the password is incorrect.
|
||||
void checkPassword(const String & password, const String & user_name = String()) const;
|
||||
|
||||
friend bool operator ==(const Authentication & lhs, const Authentication & rhs) { return (lhs.type == rhs.type) && (lhs.password_hash == rhs.password_hash); }
|
||||
friend bool operator !=(const Authentication & lhs, const Authentication & rhs) { return !(lhs == rhs); }
|
||||
|
||||
|
775
dbms/src/Access/DiskAccessStorage.cpp
Normal file
775
dbms/src/Access/DiskAccessStorage.cpp
Normal file
@ -0,0 +1,775 @@
|
||||
#include <Access/DiskAccessStorage.h>
|
||||
#include <IO/WriteHelpers.h>
|
||||
#include <IO/ReadHelpers.h>
|
||||
#include <IO/ReadBufferFromFile.h>
|
||||
#include <IO/WriteBufferFromFile.h>
|
||||
#include <IO/ReadBufferFromString.h>
|
||||
#include <Access/User.h>
|
||||
#include <Access/Role.h>
|
||||
#include <Access/RowPolicy.h>
|
||||
#include <Access/Quota.h>
|
||||
#include <Parsers/ASTCreateUserQuery.h>
|
||||
#include <Parsers/ASTCreateRoleQuery.h>
|
||||
#include <Parsers/ASTCreateRowPolicyQuery.h>
|
||||
#include <Parsers/ASTCreateQuotaQuery.h>
|
||||
#include <Parsers/ASTGrantQuery.h>
|
||||
#include <Parsers/ParserCreateUserQuery.h>
|
||||
#include <Parsers/ParserCreateRoleQuery.h>
|
||||
#include <Parsers/ParserCreateRowPolicyQuery.h>
|
||||
#include <Parsers/ParserCreateQuotaQuery.h>
|
||||
#include <Parsers/ParserGrantQuery.h>
|
||||
#include <Parsers/formatAST.h>
|
||||
#include <Parsers/parseQuery.h>
|
||||
#include <Interpreters/InterpreterCreateUserQuery.h>
|
||||
#include <Interpreters/InterpreterCreateRoleQuery.h>
|
||||
#include <Interpreters/InterpreterCreateRowPolicyQuery.h>
|
||||
#include <Interpreters/InterpreterCreateQuotaQuery.h>
|
||||
#include <Interpreters/InterpreterGrantQuery.h>
|
||||
#include <Interpreters/InterpreterShowCreateAccessEntityQuery.h>
|
||||
#include <Interpreters/InterpreterShowGrantsQuery.h>
|
||||
#include <Common/quoteString.h>
|
||||
#include <boost/range/adaptor/map.hpp>
|
||||
#include <boost/range/algorithm/copy.hpp>
|
||||
#include <boost/range/algorithm_ext/push_back.hpp>
|
||||
#include <filesystem>
|
||||
#include <fstream>
|
||||
|
||||
|
||||
namespace DB
|
||||
{
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int DIRECTORY_DOESNT_EXIST;
|
||||
extern const int FILE_DOESNT_EXIST;
|
||||
extern const int INCORRECT_ACCESS_ENTITY_DEFINITION;
|
||||
extern const int LOGICAL_ERROR;
|
||||
}
|
||||
|
||||
|
||||
namespace
|
||||
{
|
||||
/// Special parser for the 'ATTACH access entity' queries.
|
||||
class ParserAttachAccessEntity : public IParserBase
|
||||
{
|
||||
protected:
|
||||
const char * getName() const override { return "ATTACH access entity query"; }
|
||||
|
||||
bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected) override
|
||||
{
|
||||
if (ParserCreateUserQuery{}.enableAttachMode(true).parse(pos, node, expected))
|
||||
return true;
|
||||
if (ParserCreateRoleQuery{}.enableAttachMode(true).parse(pos, node, expected))
|
||||
return true;
|
||||
if (ParserCreateRowPolicyQuery{}.enableAttachMode(true).parse(pos, node, expected))
|
||||
return true;
|
||||
if (ParserCreateQuotaQuery{}.enableAttachMode(true).parse(pos, node, expected))
|
||||
return true;
|
||||
if (ParserGrantQuery{}.enableAttachMode(true).parse(pos, node, expected))
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
/// Reads a file containing ATTACH queries and then parses it to build an access entity.
|
||||
AccessEntityPtr readAccessEntityFile(const std::filesystem::path & file_path)
|
||||
{
|
||||
/// Read the file.
|
||||
ReadBufferFromFile in{file_path};
|
||||
String file_contents;
|
||||
readStringUntilEOF(file_contents, in);
|
||||
|
||||
/// Parse the file contents.
|
||||
ASTs queries;
|
||||
ParserAttachAccessEntity parser;
|
||||
const char * begin = file_contents.data(); /// begin of current query
|
||||
const char * pos = begin; /// parser moves pos from begin to the end of current query
|
||||
const char * end = begin + file_contents.size();
|
||||
while (pos < end)
|
||||
{
|
||||
queries.emplace_back(parseQueryAndMovePosition(parser, pos, end, "", true, 0));
|
||||
while (isWhitespaceASCII(*pos) || *pos == ';')
|
||||
++pos;
|
||||
}
|
||||
|
||||
/// Interpret the AST to build an access entity.
|
||||
std::shared_ptr<User> user;
|
||||
std::shared_ptr<Role> role;
|
||||
std::shared_ptr<RowPolicy> policy;
|
||||
std::shared_ptr<Quota> quota;
|
||||
AccessEntityPtr res;
|
||||
|
||||
for (const auto & query : queries)
|
||||
{
|
||||
if (auto create_user_query = query->as<ASTCreateUserQuery>())
|
||||
{
|
||||
if (res)
|
||||
throw Exception("Two access entities in one file " + file_path.string(), ErrorCodes::INCORRECT_ACCESS_ENTITY_DEFINITION);
|
||||
res = user = std::make_unique<User>();
|
||||
InterpreterCreateUserQuery::updateUserFromQuery(*user, *create_user_query);
|
||||
}
|
||||
else if (auto create_role_query = query->as<ASTCreateRoleQuery>())
|
||||
{
|
||||
if (res)
|
||||
throw Exception("Two access entities in one file " + file_path.string(), ErrorCodes::INCORRECT_ACCESS_ENTITY_DEFINITION);
|
||||
res = role = std::make_unique<Role>();
|
||||
InterpreterCreateRoleQuery::updateRoleFromQuery(*role, *create_role_query);
|
||||
}
|
||||
else if (auto create_policy_query = query->as<ASTCreateRowPolicyQuery>())
|
||||
{
|
||||
if (res)
|
||||
throw Exception("Two access entities in one file " + file_path.string(), ErrorCodes::INCORRECT_ACCESS_ENTITY_DEFINITION);
|
||||
res = policy = std::make_unique<RowPolicy>();
|
||||
InterpreterCreateRowPolicyQuery::updateRowPolicyFromQuery(*policy, *create_policy_query);
|
||||
}
|
||||
else if (auto create_quota_query = query->as<ASTCreateQuotaQuery>())
|
||||
{
|
||||
if (res)
|
||||
throw Exception("Two access entities are attached in the same file " + file_path.string(), ErrorCodes::INCORRECT_ACCESS_ENTITY_DEFINITION);
|
||||
res = quota = std::make_unique<Quota>();
|
||||
InterpreterCreateQuotaQuery::updateQuotaFromQuery(*quota, *create_quota_query);
|
||||
}
|
||||
else if (auto grant_query = query->as<ASTGrantQuery>())
|
||||
{
|
||||
if (!user && !role)
|
||||
throw Exception("A user or role should be attached before grant in file " + file_path.string(), ErrorCodes::INCORRECT_ACCESS_ENTITY_DEFINITION);
|
||||
if (user)
|
||||
InterpreterGrantQuery::updateUserFromQuery(*user, *grant_query);
|
||||
else
|
||||
InterpreterGrantQuery::updateRoleFromQuery(*role, *grant_query);
|
||||
}
|
||||
else
|
||||
throw Exception("Two access entities are attached in the same file " + file_path.string(), ErrorCodes::INCORRECT_ACCESS_ENTITY_DEFINITION);
|
||||
}
|
||||
|
||||
if (!res)
|
||||
throw Exception("No access entities attached in file " + file_path.string(), ErrorCodes::INCORRECT_ACCESS_ENTITY_DEFINITION);
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
|
||||
/// Writes ATTACH queries for building a specified access entity to a file.
|
||||
void writeAccessEntityFile(const std::filesystem::path & file_path, const IAccessEntity & entity)
|
||||
{
|
||||
/// Build list of ATTACH queries.
|
||||
ASTs queries;
|
||||
queries.push_back(InterpreterShowCreateAccessEntityQuery::getAttachQuery(entity));
|
||||
if (entity.getType() == typeid(User) || entity.getType() == typeid(Role))
|
||||
boost::range::push_back(queries, InterpreterShowGrantsQuery::getAttachGrantQueries(entity));
|
||||
|
||||
/// Serialize the list of ATTACH queries to a string.
|
||||
std::stringstream ss;
|
||||
for (const ASTPtr & query : queries)
|
||||
ss << *query << ";\n";
|
||||
String file_contents = std::move(ss).str();
|
||||
|
||||
/// First we save *.tmp file and then we rename if everything's ok.
|
||||
auto tmp_file_path = std::filesystem::path{file_path}.replace_extension(".tmp");
|
||||
bool succeeded = false;
|
||||
SCOPE_EXIT(
|
||||
{
|
||||
if (!succeeded)
|
||||
std::filesystem::remove(tmp_file_path);
|
||||
});
|
||||
|
||||
/// Write the file.
|
||||
WriteBufferFromFile out{tmp_file_path.string()};
|
||||
out.write(file_contents.data(), file_contents.size());
|
||||
|
||||
/// Rename.
|
||||
std::filesystem::rename(tmp_file_path, file_path);
|
||||
succeeded = true;
|
||||
}
|
||||
|
||||
|
||||
/// Calculates the path to a file named <id>.sql for saving an access entity.
|
||||
std::filesystem::path getAccessEntityFilePath(const String & directory_path, const UUID & id)
|
||||
{
|
||||
return std::filesystem::path(directory_path).append(toString(id)).replace_extension(".sql");
|
||||
}
|
||||
|
||||
|
||||
/// Reads a map of name of access entity to UUID for access entities of some type from a file.
|
||||
std::unordered_map<String, UUID> readListFile(const std::filesystem::path & file_path)
|
||||
{
|
||||
ReadBufferFromFile in(file_path);
|
||||
|
||||
size_t num;
|
||||
readVarUInt(num, in);
|
||||
std::unordered_map<String, UUID> res;
|
||||
res.reserve(num);
|
||||
|
||||
for (size_t i = 0; i != num; ++i)
|
||||
{
|
||||
String name;
|
||||
readStringBinary(name, in);
|
||||
UUID id;
|
||||
readUUIDText(id, in);
|
||||
res[name] = id;
|
||||
}
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
|
||||
/// Writes a map of name of access entity to UUID for access entities of some type to a file.
|
||||
void writeListFile(const std::filesystem::path & file_path, const std::unordered_map<String, UUID> & map)
|
||||
{
|
||||
WriteBufferFromFile out(file_path);
|
||||
writeVarUInt(map.size(), out);
|
||||
for (const auto & [name, id] : map)
|
||||
{
|
||||
writeStringBinary(name, out);
|
||||
writeUUIDText(id, out);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/// Calculates the path for storing a map of name of access entity to UUID for access entities of some type.
|
||||
std::filesystem::path getListFilePath(const String & directory_path, std::type_index type)
|
||||
{
|
||||
std::string_view file_name;
|
||||
if (type == typeid(User))
|
||||
file_name = "users";
|
||||
else if (type == typeid(Role))
|
||||
file_name = "roles";
|
||||
else if (type == typeid(Quota))
|
||||
file_name = "quotas";
|
||||
else if (type == typeid(RowPolicy))
|
||||
file_name = "row_policies";
|
||||
else
|
||||
throw Exception("Unexpected type of access entity: " + IAccessEntity::getTypeName(type),
|
||||
ErrorCodes::LOGICAL_ERROR);
|
||||
|
||||
return std::filesystem::path(directory_path).append(file_name).replace_extension(".list");
|
||||
}
|
||||
|
||||
|
||||
/// Calculates the path to a temporary file which existence means that list files are corrupted
|
||||
/// and need to be rebuild.
|
||||
std::filesystem::path getNeedRebuildListsMarkFilePath(const String & directory_path)
|
||||
{
|
||||
return std::filesystem::path(directory_path).append("need_rebuild_lists.mark");
|
||||
}
|
||||
|
||||
|
||||
static const std::vector<std::type_index> & getAllAccessEntityTypes()
|
||||
{
|
||||
static const std::vector<std::type_index> res = {typeid(User), typeid(Role), typeid(RowPolicy), typeid(Quota)};
|
||||
return res;
|
||||
}
|
||||
|
||||
|
||||
bool tryParseUUID(const String & str, UUID & id)
|
||||
{
|
||||
try
|
||||
{
|
||||
id = parseFromString<UUID>(str);
|
||||
return true;
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
DiskAccessStorage::DiskAccessStorage()
|
||||
: IAccessStorage("disk")
|
||||
{
|
||||
for (const auto & type : getAllAccessEntityTypes())
|
||||
name_to_id_maps[type];
|
||||
}
|
||||
|
||||
|
||||
DiskAccessStorage::~DiskAccessStorage()
|
||||
{
|
||||
stopListsWritingThread();
|
||||
writeLists();
|
||||
}
|
||||
|
||||
|
||||
void DiskAccessStorage::setDirectory(const String & directory_path_)
|
||||
{
|
||||
Notifications notifications;
|
||||
SCOPE_EXIT({ notify(notifications); });
|
||||
|
||||
std::lock_guard lock{mutex};
|
||||
initialize(directory_path_, notifications);
|
||||
}
|
||||
|
||||
|
||||
void DiskAccessStorage::initialize(const String & directory_path_, Notifications & notifications)
|
||||
{
|
||||
auto canonical_directory_path = std::filesystem::weakly_canonical(directory_path_);
|
||||
|
||||
if (initialized)
|
||||
{
|
||||
if (directory_path == canonical_directory_path)
|
||||
return;
|
||||
throw Exception("Storage " + getStorageName() + " already initialized with another directory", ErrorCodes::LOGICAL_ERROR);
|
||||
}
|
||||
|
||||
std::filesystem::create_directories(canonical_directory_path);
|
||||
if (!std::filesystem::exists(canonical_directory_path) || !std::filesystem::is_directory(canonical_directory_path))
|
||||
throw Exception("Couldn't create directory " + canonical_directory_path.string(), ErrorCodes::DIRECTORY_DOESNT_EXIST);
|
||||
|
||||
directory_path = canonical_directory_path;
|
||||
initialized = true;
|
||||
|
||||
bool should_rebuild_lists = std::filesystem::exists(getNeedRebuildListsMarkFilePath(directory_path));
|
||||
if (!should_rebuild_lists)
|
||||
{
|
||||
if (!readLists())
|
||||
should_rebuild_lists = true;
|
||||
}
|
||||
|
||||
if (should_rebuild_lists)
|
||||
{
|
||||
rebuildLists();
|
||||
writeLists();
|
||||
}
|
||||
|
||||
for (const auto & [id, entry] : id_to_entry_map)
|
||||
prepareNotifications(id, entry, false, notifications);
|
||||
}
|
||||
|
||||
|
||||
bool DiskAccessStorage::readLists()
|
||||
{
|
||||
assert(id_to_entry_map.empty());
|
||||
assert(name_to_id_maps.size() == getAllAccessEntityTypes().size());
|
||||
bool ok = true;
|
||||
for (auto & [type, name_to_id_map] : name_to_id_maps)
|
||||
{
|
||||
auto file_path = getListFilePath(directory_path, type);
|
||||
if (!std::filesystem::exists(file_path))
|
||||
{
|
||||
LOG_WARNING(getLogger(), "File " + file_path.string() + " doesn't exist");
|
||||
ok = false;
|
||||
break;
|
||||
}
|
||||
|
||||
try
|
||||
{
|
||||
name_to_id_map = readListFile(file_path);
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
tryLogCurrentException(getLogger(), "Could not read " + file_path.string());
|
||||
ok = false;
|
||||
break;
|
||||
}
|
||||
for (const auto & [name, id] : name_to_id_map)
|
||||
id_to_entry_map.emplace(id, Entry{name, type});
|
||||
}
|
||||
|
||||
if (!ok)
|
||||
{
|
||||
id_to_entry_map.clear();
|
||||
for (auto & name_to_id_map : name_to_id_maps | boost::adaptors::map_values)
|
||||
name_to_id_map.clear();
|
||||
}
|
||||
return ok;
|
||||
}
|
||||
|
||||
|
||||
void DiskAccessStorage::writeLists()
|
||||
{
|
||||
if (failed_to_write_lists || types_of_lists_to_write.empty())
|
||||
return; /// We don't try to write list files after the first fail.
|
||||
/// The next restart of the server will invoke rebuilding of the list files.
|
||||
|
||||
for (const auto & type : types_of_lists_to_write)
|
||||
{
|
||||
const auto & name_to_id_map = name_to_id_maps.at(type);
|
||||
auto file_path = getListFilePath(directory_path, type);
|
||||
try
|
||||
{
|
||||
writeListFile(file_path, name_to_id_map);
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
tryLogCurrentException(getLogger(), "Could not write " + file_path.string());
|
||||
failed_to_write_lists = true;
|
||||
types_of_lists_to_write.clear();
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
/// The list files was successfully written, we don't need the 'need_rebuild_lists.mark' file any longer.
|
||||
std::filesystem::remove(getNeedRebuildListsMarkFilePath(directory_path));
|
||||
types_of_lists_to_write.clear();
|
||||
}
|
||||
|
||||
|
||||
void DiskAccessStorage::scheduleWriteLists(std::type_index type)
|
||||
{
|
||||
if (failed_to_write_lists)
|
||||
return;
|
||||
|
||||
bool already_scheduled = !types_of_lists_to_write.empty();
|
||||
types_of_lists_to_write.insert(type);
|
||||
|
||||
if (already_scheduled)
|
||||
return;
|
||||
|
||||
/// Create the 'need_rebuild_lists.mark' file.
|
||||
/// This file will be used later to find out if writing lists is successful or not.
|
||||
std::ofstream{getNeedRebuildListsMarkFilePath(directory_path)};
|
||||
|
||||
startListsWritingThread();
|
||||
}
|
||||
|
||||
|
||||
void DiskAccessStorage::startListsWritingThread()
|
||||
{
|
||||
if (lists_writing_thread.joinable())
|
||||
{
|
||||
if (!lists_writing_thread_exited)
|
||||
return;
|
||||
lists_writing_thread.detach();
|
||||
}
|
||||
|
||||
lists_writing_thread_exited = false;
|
||||
lists_writing_thread = ThreadFromGlobalPool{&DiskAccessStorage::listsWritingThreadFunc, this};
|
||||
}
|
||||
|
||||
|
||||
void DiskAccessStorage::stopListsWritingThread()
|
||||
{
|
||||
if (lists_writing_thread.joinable())
|
||||
{
|
||||
lists_writing_thread_should_exit.notify_one();
|
||||
lists_writing_thread.join();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void DiskAccessStorage::listsWritingThreadFunc()
|
||||
{
|
||||
std::unique_lock lock{mutex};
|
||||
SCOPE_EXIT({ lists_writing_thread_exited = true; });
|
||||
|
||||
/// It's better not to write the lists files too often, that's why we need
|
||||
/// the following timeout.
|
||||
const auto timeout = std::chrono::minutes(1);
|
||||
if (lists_writing_thread_should_exit.wait_for(lock, timeout) != std::cv_status::timeout)
|
||||
return; /// The destructor requires us to exit.
|
||||
|
||||
writeLists();
|
||||
}
|
||||
|
||||
|
||||
/// Reads and parses all the "<id>.sql" files from a specified directory
|
||||
/// and then saves the files "users.list", "roles.list", etc. to the same directory.
|
||||
void DiskAccessStorage::rebuildLists()
|
||||
{
|
||||
LOG_WARNING(getLogger(), "Recovering lists in directory " + directory_path);
|
||||
assert(id_to_entry_map.empty());
|
||||
for (const auto & directory_entry : std::filesystem::directory_iterator(directory_path))
|
||||
{
|
||||
if (!directory_entry.is_regular_file())
|
||||
continue;
|
||||
const auto & path = directory_entry.path();
|
||||
if (path.extension() != ".sql")
|
||||
continue;
|
||||
|
||||
UUID id;
|
||||
if (!tryParseUUID(path.stem(), id))
|
||||
continue;
|
||||
|
||||
auto entity = readAccessEntityFile(getAccessEntityFilePath(directory_path, id));
|
||||
auto type = entity->getType();
|
||||
auto & name_to_id_map = name_to_id_maps[type];
|
||||
auto it_by_name = name_to_id_map.emplace(entity->getFullName(), id).first;
|
||||
id_to_entry_map.emplace(id, Entry{it_by_name->first, type});
|
||||
}
|
||||
|
||||
boost::range::copy(getAllAccessEntityTypes(), std::inserter(types_of_lists_to_write, types_of_lists_to_write.end()));
|
||||
}
|
||||
|
||||
|
||||
std::optional<UUID> DiskAccessStorage::findImpl(std::type_index type, const String & name) const
|
||||
{
|
||||
std::lock_guard lock{mutex};
|
||||
const auto & name_to_id_map = name_to_id_maps.at(type);
|
||||
auto it = name_to_id_map.find(name);
|
||||
if (it == name_to_id_map.end())
|
||||
return {};
|
||||
return it->second;
|
||||
}
|
||||
|
||||
|
||||
std::vector<UUID> DiskAccessStorage::findAllImpl(std::type_index type) const
|
||||
{
|
||||
std::lock_guard lock{mutex};
|
||||
const auto & name_to_id_map = name_to_id_maps.at(type);
|
||||
std::vector<UUID> res;
|
||||
res.reserve(name_to_id_map.size());
|
||||
boost::range::copy(name_to_id_map | boost::adaptors::map_values, std::back_inserter(res));
|
||||
return res;
|
||||
}
|
||||
|
||||
bool DiskAccessStorage::existsImpl(const UUID & id) const
|
||||
{
|
||||
std::lock_guard lock{mutex};
|
||||
return id_to_entry_map.contains(id);
|
||||
}
|
||||
|
||||
|
||||
AccessEntityPtr DiskAccessStorage::readImpl(const UUID & id) const
|
||||
{
|
||||
std::lock_guard lock{mutex};
|
||||
auto it = id_to_entry_map.find(id);
|
||||
if (it == id_to_entry_map.end())
|
||||
throwNotFound(id);
|
||||
|
||||
auto & entry = it->second;
|
||||
if (!entry.entity)
|
||||
entry.entity = readAccessEntityFromDisk(id);
|
||||
return entry.entity;
|
||||
}
|
||||
|
||||
|
||||
String DiskAccessStorage::readNameImpl(const UUID & id) const
|
||||
{
|
||||
std::lock_guard lock{mutex};
|
||||
auto it = id_to_entry_map.find(id);
|
||||
if (it == id_to_entry_map.end())
|
||||
throwNotFound(id);
|
||||
return String{it->second.name};
|
||||
}
|
||||
|
||||
|
||||
bool DiskAccessStorage::canInsertImpl(const AccessEntityPtr &) const
|
||||
{
|
||||
return initialized;
|
||||
}
|
||||
|
||||
|
||||
UUID DiskAccessStorage::insertImpl(const AccessEntityPtr & new_entity, bool replace_if_exists)
|
||||
{
|
||||
Notifications notifications;
|
||||
SCOPE_EXIT({ notify(notifications); });
|
||||
|
||||
UUID id = generateRandomID();
|
||||
std::lock_guard lock{mutex};
|
||||
insertNoLock(generateRandomID(), new_entity, replace_if_exists, notifications);
|
||||
return id;
|
||||
}
|
||||
|
||||
|
||||
void DiskAccessStorage::insertNoLock(const UUID & id, const AccessEntityPtr & new_entity, bool replace_if_exists, Notifications & notifications)
|
||||
{
|
||||
const String & name = new_entity->getFullName();
|
||||
std::type_index type = new_entity->getType();
|
||||
if (!initialized)
|
||||
throw Exception(
|
||||
"Cannot insert " + new_entity->getTypeName() + " " + backQuote(name) + " to " + getStorageName()
|
||||
+ " because the output directory is not set",
|
||||
ErrorCodes::LOGICAL_ERROR);
|
||||
|
||||
/// Check that we can insert.
|
||||
auto it_by_id = id_to_entry_map.find(id);
|
||||
if (it_by_id != id_to_entry_map.end())
|
||||
{
|
||||
const auto & existing_entry = it_by_id->second;
|
||||
throwIDCollisionCannotInsert(id, type, name, existing_entry.entity->getType(), existing_entry.entity->getFullName());
|
||||
}
|
||||
|
||||
auto & name_to_id_map = name_to_id_maps.at(type);
|
||||
auto it_by_name = name_to_id_map.find(name);
|
||||
bool name_collision = (it_by_name != name_to_id_map.end());
|
||||
|
||||
if (name_collision && !replace_if_exists)
|
||||
throwNameCollisionCannotInsert(type, name);
|
||||
|
||||
scheduleWriteLists(type);
|
||||
writeAccessEntityToDisk(id, *new_entity);
|
||||
|
||||
if (name_collision && replace_if_exists)
|
||||
removeNoLock(it_by_name->second, notifications);
|
||||
|
||||
/// Do insertion.
|
||||
it_by_name = name_to_id_map.emplace(name, id).first;
|
||||
it_by_id = id_to_entry_map.emplace(id, Entry{it_by_name->first, type}).first;
|
||||
auto & entry = it_by_id->second;
|
||||
entry.entity = new_entity;
|
||||
prepareNotifications(id, entry, false, notifications);
|
||||
}
|
||||
|
||||
|
||||
void DiskAccessStorage::removeImpl(const UUID & id)
|
||||
{
|
||||
Notifications notifications;
|
||||
SCOPE_EXIT({ notify(notifications); });
|
||||
|
||||
std::lock_guard lock{mutex};
|
||||
removeNoLock(id, notifications);
|
||||
}
|
||||
|
||||
|
||||
void DiskAccessStorage::removeNoLock(const UUID & id, Notifications & notifications)
|
||||
{
|
||||
auto it = id_to_entry_map.find(id);
|
||||
if (it == id_to_entry_map.end())
|
||||
throwNotFound(id);
|
||||
|
||||
Entry & entry = it->second;
|
||||
String name{it->second.name};
|
||||
std::type_index type = it->second.type;
|
||||
|
||||
scheduleWriteLists(type);
|
||||
deleteAccessEntityOnDisk(id);
|
||||
|
||||
/// Do removing.
|
||||
prepareNotifications(id, entry, true, notifications);
|
||||
id_to_entry_map.erase(it);
|
||||
auto & name_to_id_map = name_to_id_maps.at(type);
|
||||
name_to_id_map.erase(name);
|
||||
}
|
||||
|
||||
|
||||
void DiskAccessStorage::updateImpl(const UUID & id, const UpdateFunc & update_func)
|
||||
{
|
||||
Notifications notifications;
|
||||
SCOPE_EXIT({ notify(notifications); });
|
||||
|
||||
std::lock_guard lock{mutex};
|
||||
updateNoLock(id, update_func, notifications);
|
||||
}
|
||||
|
||||
|
||||
void DiskAccessStorage::updateNoLock(const UUID & id, const UpdateFunc & update_func, Notifications & notifications)
|
||||
{
|
||||
auto it = id_to_entry_map.find(id);
|
||||
if (it == id_to_entry_map.end())
|
||||
throwNotFound(id);
|
||||
|
||||
Entry & entry = it->second;
|
||||
if (!entry.entity)
|
||||
entry.entity = readAccessEntityFromDisk(id);
|
||||
auto old_entity = entry.entity;
|
||||
auto new_entity = update_func(old_entity);
|
||||
|
||||
if (*new_entity == *old_entity)
|
||||
return;
|
||||
|
||||
String new_name = new_entity->getFullName();
|
||||
auto old_name = entry.name;
|
||||
const std::type_index type = entry.type;
|
||||
bool name_changed = (new_name != old_name);
|
||||
if (name_changed)
|
||||
{
|
||||
const auto & name_to_id_map = name_to_id_maps.at(type);
|
||||
if (name_to_id_map.contains(new_name))
|
||||
throwNameCollisionCannotRename(type, String{old_name}, new_name);
|
||||
scheduleWriteLists(type);
|
||||
}
|
||||
|
||||
writeAccessEntityToDisk(id, *new_entity);
|
||||
entry.entity = new_entity;
|
||||
|
||||
if (name_changed)
|
||||
{
|
||||
auto & name_to_id_map = name_to_id_maps.at(type);
|
||||
name_to_id_map.erase(String{old_name});
|
||||
auto it_by_name = name_to_id_map.emplace(new_name, id).first;
|
||||
entry.name = it_by_name->first;
|
||||
}
|
||||
|
||||
prepareNotifications(id, entry, false, notifications);
|
||||
}
|
||||
|
||||
|
||||
AccessEntityPtr DiskAccessStorage::readAccessEntityFromDisk(const UUID & id) const
|
||||
{
|
||||
return readAccessEntityFile(getAccessEntityFilePath(directory_path, id));
|
||||
}
|
||||
|
||||
|
||||
void DiskAccessStorage::writeAccessEntityToDisk(const UUID & id, const IAccessEntity & entity) const
|
||||
{
|
||||
writeAccessEntityFile(getAccessEntityFilePath(directory_path, id), entity);
|
||||
}
|
||||
|
||||
|
||||
void DiskAccessStorage::deleteAccessEntityOnDisk(const UUID & id) const
|
||||
{
|
||||
auto file_path = getAccessEntityFilePath(directory_path, id);
|
||||
if (!std::filesystem::remove(file_path))
|
||||
throw Exception("Couldn't delete " + file_path.string(), ErrorCodes::FILE_DOESNT_EXIST);
|
||||
}
|
||||
|
||||
|
||||
void DiskAccessStorage::prepareNotifications(const UUID & id, const Entry & entry, bool remove, Notifications & notifications) const
|
||||
{
|
||||
if (!remove && !entry.entity)
|
||||
return;
|
||||
|
||||
const AccessEntityPtr entity = remove ? nullptr : entry.entity;
|
||||
for (const auto & handler : entry.handlers_by_id)
|
||||
notifications.push_back({handler, id, entity});
|
||||
|
||||
auto range = handlers_by_type.equal_range(entry.type);
|
||||
for (auto it = range.first; it != range.second; ++it)
|
||||
notifications.push_back({it->second, id, entity});
|
||||
}
|
||||
|
||||
|
||||
ext::scope_guard DiskAccessStorage::subscribeForChangesImpl(const UUID & id, const OnChangedHandler & handler) const
|
||||
{
|
||||
std::lock_guard lock{mutex};
|
||||
auto it = id_to_entry_map.find(id);
|
||||
if (it == id_to_entry_map.end())
|
||||
return {};
|
||||
const Entry & entry = it->second;
|
||||
auto handler_it = entry.handlers_by_id.insert(entry.handlers_by_id.end(), handler);
|
||||
|
||||
return [this, id, handler_it]
|
||||
{
|
||||
std::lock_guard lock2{mutex};
|
||||
auto it2 = id_to_entry_map.find(id);
|
||||
if (it2 != id_to_entry_map.end())
|
||||
{
|
||||
const Entry & entry2 = it2->second;
|
||||
entry2.handlers_by_id.erase(handler_it);
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
ext::scope_guard DiskAccessStorage::subscribeForChangesImpl(std::type_index type, const OnChangedHandler & handler) const
|
||||
{
|
||||
std::lock_guard lock{mutex};
|
||||
auto handler_it = handlers_by_type.emplace(type, handler);
|
||||
|
||||
return [this, handler_it]
|
||||
{
|
||||
std::lock_guard lock2{mutex};
|
||||
handlers_by_type.erase(handler_it);
|
||||
};
|
||||
}
|
||||
|
||||
bool DiskAccessStorage::hasSubscriptionImpl(const UUID & id) const
|
||||
{
|
||||
std::lock_guard lock{mutex};
|
||||
auto it = id_to_entry_map.find(id);
|
||||
if (it != id_to_entry_map.end())
|
||||
{
|
||||
const Entry & entry = it->second;
|
||||
return !entry.handlers_by_id.empty();
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
bool DiskAccessStorage::hasSubscriptionImpl(std::type_index type) const
|
||||
{
|
||||
std::lock_guard lock{mutex};
|
||||
auto range = handlers_by_type.equal_range(type);
|
||||
return range.first != range.second;
|
||||
}
|
||||
|
||||
}
|
76
dbms/src/Access/DiskAccessStorage.h
Normal file
76
dbms/src/Access/DiskAccessStorage.h
Normal file
@ -0,0 +1,76 @@
|
||||
#pragma once
|
||||
|
||||
#include <Access/MemoryAccessStorage.h>
|
||||
#include <Common/ThreadPool.h>
|
||||
#include <boost/container/flat_set.hpp>
|
||||
|
||||
|
||||
namespace DB
|
||||
{
|
||||
/// Loads and saves access entities on a local disk to a specified directory.
|
||||
class DiskAccessStorage : public IAccessStorage
|
||||
{
|
||||
public:
|
||||
DiskAccessStorage();
|
||||
~DiskAccessStorage() override;
|
||||
|
||||
void setDirectory(const String & directory_path_);
|
||||
|
||||
private:
|
||||
std::optional<UUID> findImpl(std::type_index type, const String & name) const override;
|
||||
std::vector<UUID> findAllImpl(std::type_index type) const override;
|
||||
bool existsImpl(const UUID & id) const override;
|
||||
AccessEntityPtr readImpl(const UUID & id) const override;
|
||||
String readNameImpl(const UUID & id) const override;
|
||||
bool canInsertImpl(const AccessEntityPtr & entity) const override;
|
||||
UUID insertImpl(const AccessEntityPtr & entity, bool replace_if_exists) override;
|
||||
void removeImpl(const UUID & id) override;
|
||||
void updateImpl(const UUID & id, const UpdateFunc & update_func) override;
|
||||
ext::scope_guard subscribeForChangesImpl(const UUID & id, const OnChangedHandler & handler) const override;
|
||||
ext::scope_guard subscribeForChangesImpl(std::type_index type, const OnChangedHandler & handler) const override;
|
||||
bool hasSubscriptionImpl(const UUID & id) const override;
|
||||
bool hasSubscriptionImpl(std::type_index type) const override;
|
||||
|
||||
void initialize(const String & directory_path_, Notifications & notifications);
|
||||
bool readLists();
|
||||
void writeLists();
|
||||
void scheduleWriteLists(std::type_index type);
|
||||
void rebuildLists();
|
||||
|
||||
void startListsWritingThread();
|
||||
void stopListsWritingThread();
|
||||
void listsWritingThreadFunc();
|
||||
|
||||
void insertNoLock(const UUID & id, const AccessEntityPtr & new_entity, bool replace_if_exists, Notifications & notifications);
|
||||
void removeNoLock(const UUID & id, Notifications & notifications);
|
||||
void updateNoLock(const UUID & id, const UpdateFunc & update_func, Notifications & notifications);
|
||||
|
||||
AccessEntityPtr readAccessEntityFromDisk(const UUID & id) const;
|
||||
void writeAccessEntityToDisk(const UUID & id, const IAccessEntity & entity) const;
|
||||
void deleteAccessEntityOnDisk(const UUID & id) const;
|
||||
|
||||
using NameToIDMap = std::unordered_map<String, UUID>;
|
||||
struct Entry
|
||||
{
|
||||
Entry(const std::string_view & name_, std::type_index type_) : name(name_), type(type_) {}
|
||||
std::string_view name; /// view points to a string in `name_to_id_maps`.
|
||||
std::type_index type;
|
||||
mutable AccessEntityPtr entity; /// may be nullptr, if the entity hasn't been loaded yet.
|
||||
mutable std::list<OnChangedHandler> handlers_by_id;
|
||||
};
|
||||
|
||||
void prepareNotifications(const UUID & id, const Entry & entry, bool remove, Notifications & notifications) const;
|
||||
|
||||
String directory_path;
|
||||
bool initialized = false;
|
||||
std::unordered_map<std::type_index, NameToIDMap> name_to_id_maps;
|
||||
std::unordered_map<UUID, Entry> id_to_entry_map;
|
||||
boost::container::flat_set<std::type_index> types_of_lists_to_write;
|
||||
bool failed_to_write_lists = false; /// Whether writing of the list files has been failed since the recent restart of the server.
|
||||
ThreadFromGlobalPool lists_writing_thread; /// List files are written in a separate thread.
|
||||
std::condition_variable lists_writing_thread_should_exit; /// Signals `lists_writing_thread` to exit.
|
||||
std::atomic<bool> lists_writing_thread_exited = false;
|
||||
mutable std::unordered_multimap<std::type_index, OnChangedHandler> handlers_by_type;
|
||||
mutable std::mutex mutex;
|
||||
};
|
||||
}
|
@ -4,6 +4,8 @@
|
||||
#include <Access/Role.h>
|
||||
#include <Parsers/ASTGenericRoleSet.h>
|
||||
#include <Parsers/formatAST.h>
|
||||
#include <IO/ReadHelpers.h>
|
||||
#include <IO/WriteHelpers.h>
|
||||
#include <boost/range/algorithm/set_algorithm.hpp>
|
||||
#include <boost/range/algorithm/sort.hpp>
|
||||
#include <boost/range/algorithm_ext/push_back.hpp>
|
||||
@ -45,26 +47,51 @@ GenericRoleSet::GenericRoleSet(const boost::container::flat_set<UUID> & ids_)
|
||||
}
|
||||
|
||||
|
||||
GenericRoleSet::GenericRoleSet(const ASTGenericRoleSet & ast, const AccessControlManager & manager, const std::optional<UUID> & current_user_id)
|
||||
GenericRoleSet::GenericRoleSet(const ASTGenericRoleSet & ast)
|
||||
{
|
||||
init(ast, nullptr, nullptr);
|
||||
}
|
||||
|
||||
GenericRoleSet::GenericRoleSet(const ASTGenericRoleSet & ast, const UUID & current_user_id)
|
||||
{
|
||||
init(ast, nullptr, ¤t_user_id);
|
||||
}
|
||||
|
||||
GenericRoleSet::GenericRoleSet(const ASTGenericRoleSet & ast, const AccessControlManager & manager)
|
||||
{
|
||||
init(ast, &manager, nullptr);
|
||||
}
|
||||
|
||||
GenericRoleSet::GenericRoleSet(const ASTGenericRoleSet & ast, const AccessControlManager & manager, const UUID & current_user_id)
|
||||
{
|
||||
init(ast, &manager, ¤t_user_id);
|
||||
}
|
||||
|
||||
void GenericRoleSet::init(const ASTGenericRoleSet & ast, const AccessControlManager * manager, const UUID * current_user_id)
|
||||
{
|
||||
all = ast.all;
|
||||
|
||||
auto name_to_id = [id_mode{ast.id_mode}, manager](const String & name) -> UUID
|
||||
{
|
||||
if (id_mode)
|
||||
return parse<UUID>(name);
|
||||
assert(manager);
|
||||
auto id = manager->find<User>(name);
|
||||
if (id)
|
||||
return *id;
|
||||
return manager->getID<Role>(name);
|
||||
};
|
||||
|
||||
if (!ast.names.empty() && !all)
|
||||
{
|
||||
ids.reserve(ast.names.size());
|
||||
for (const String & name : ast.names)
|
||||
{
|
||||
auto id = manager.find<User>(name);
|
||||
if (!id)
|
||||
id = manager.getID<Role>(name);
|
||||
ids.insert(*id);
|
||||
}
|
||||
ids.insert(name_to_id(name));
|
||||
}
|
||||
|
||||
if (ast.current_user && !all)
|
||||
{
|
||||
if (!current_user_id)
|
||||
throw Exception("Current user is unknown", ErrorCodes::LOGICAL_ERROR);
|
||||
assert(current_user_id);
|
||||
ids.insert(*current_user_id);
|
||||
}
|
||||
|
||||
@ -72,18 +99,12 @@ GenericRoleSet::GenericRoleSet(const ASTGenericRoleSet & ast, const AccessContro
|
||||
{
|
||||
except_ids.reserve(ast.except_names.size());
|
||||
for (const String & except_name : ast.except_names)
|
||||
{
|
||||
auto except_id = manager.find<User>(except_name);
|
||||
if (!except_id)
|
||||
except_id = manager.getID<Role>(except_name);
|
||||
except_ids.insert(*except_id);
|
||||
}
|
||||
except_ids.insert(name_to_id(except_name));
|
||||
}
|
||||
|
||||
if (ast.except_current_user)
|
||||
{
|
||||
if (!current_user_id)
|
||||
throw Exception("Current user is unknown", ErrorCodes::LOGICAL_ERROR);
|
||||
assert(current_user_id);
|
||||
except_ids.insert(*current_user_id);
|
||||
}
|
||||
|
||||
@ -91,7 +112,52 @@ GenericRoleSet::GenericRoleSet(const ASTGenericRoleSet & ast, const AccessContro
|
||||
ids.erase(except_id);
|
||||
}
|
||||
|
||||
std::shared_ptr<ASTGenericRoleSet> GenericRoleSet::toAST(const AccessControlManager & manager) const
|
||||
|
||||
std::shared_ptr<ASTGenericRoleSet> GenericRoleSet::toAST() const
|
||||
{
|
||||
auto ast = std::make_shared<ASTGenericRoleSet>();
|
||||
ast->id_mode = true;
|
||||
ast->all = all;
|
||||
|
||||
if (!ids.empty())
|
||||
{
|
||||
ast->names.reserve(ids.size());
|
||||
for (const UUID & id : ids)
|
||||
ast->names.emplace_back(::DB::toString(id));
|
||||
}
|
||||
|
||||
if (!except_ids.empty())
|
||||
{
|
||||
ast->except_names.reserve(except_ids.size());
|
||||
for (const UUID & except_id : except_ids)
|
||||
ast->except_names.emplace_back(::DB::toString(except_id));
|
||||
}
|
||||
|
||||
return ast;
|
||||
}
|
||||
|
||||
|
||||
String GenericRoleSet::toString() const
|
||||
{
|
||||
auto ast = toAST();
|
||||
return serializeAST(*ast);
|
||||
}
|
||||
|
||||
|
||||
Strings GenericRoleSet::toStrings() const
|
||||
{
|
||||
if (all || !except_ids.empty())
|
||||
return {toString()};
|
||||
|
||||
Strings names;
|
||||
names.reserve(ids.size());
|
||||
for (const UUID & id : ids)
|
||||
names.emplace_back(::DB::toString(id));
|
||||
return names;
|
||||
}
|
||||
|
||||
|
||||
std::shared_ptr<ASTGenericRoleSet> GenericRoleSet::toASTWithNames(const AccessControlManager & manager) const
|
||||
{
|
||||
auto ast = std::make_shared<ASTGenericRoleSet>();
|
||||
ast->all = all;
|
||||
@ -124,17 +190,17 @@ std::shared_ptr<ASTGenericRoleSet> GenericRoleSet::toAST(const AccessControlMana
|
||||
}
|
||||
|
||||
|
||||
String GenericRoleSet::toString(const AccessControlManager & manager) const
|
||||
String GenericRoleSet::toStringWithNames(const AccessControlManager & manager) const
|
||||
{
|
||||
auto ast = toAST(manager);
|
||||
auto ast = toASTWithNames(manager);
|
||||
return serializeAST(*ast);
|
||||
}
|
||||
|
||||
|
||||
Strings GenericRoleSet::toStrings(const AccessControlManager & manager) const
|
||||
Strings GenericRoleSet::toStringsWithNames(const AccessControlManager & manager) const
|
||||
{
|
||||
if (all || !except_ids.empty())
|
||||
return {toString(manager)};
|
||||
return {toStringWithNames(manager)};
|
||||
|
||||
Strings names;
|
||||
names.reserve(ids.size());
|
||||
|
@ -30,11 +30,19 @@ struct GenericRoleSet
|
||||
GenericRoleSet(const std::vector<UUID> & ids_);
|
||||
GenericRoleSet(const boost::container::flat_set<UUID> & ids_);
|
||||
|
||||
GenericRoleSet(const ASTGenericRoleSet & ast, const AccessControlManager & manager, const std::optional<UUID> & current_user_id = {});
|
||||
std::shared_ptr<ASTGenericRoleSet> toAST(const AccessControlManager & manager) const;
|
||||
/// The constructor from AST requires the AccessControlManager if `ast.id_mode == false`.
|
||||
GenericRoleSet(const ASTGenericRoleSet & ast);
|
||||
GenericRoleSet(const ASTGenericRoleSet & ast, const UUID & current_user_id);
|
||||
GenericRoleSet(const ASTGenericRoleSet & ast, const AccessControlManager & manager);
|
||||
GenericRoleSet(const ASTGenericRoleSet & ast, const AccessControlManager & manager, const UUID & current_user_id);
|
||||
|
||||
String toString(const AccessControlManager & manager) const;
|
||||
Strings toStrings(const AccessControlManager & manager) const;
|
||||
std::shared_ptr<ASTGenericRoleSet> toAST() const;
|
||||
String toString() const;
|
||||
Strings toStrings() const;
|
||||
|
||||
std::shared_ptr<ASTGenericRoleSet> toASTWithNames(const AccessControlManager & manager) const;
|
||||
String toStringWithNames(const AccessControlManager & manager) const;
|
||||
Strings toStringsWithNames(const AccessControlManager & manager) const;
|
||||
|
||||
bool empty() const;
|
||||
void clear();
|
||||
@ -61,6 +69,9 @@ struct GenericRoleSet
|
||||
boost::container::flat_set<UUID> ids;
|
||||
bool all = false;
|
||||
boost::container::flat_set<UUID> except_ids;
|
||||
|
||||
private:
|
||||
void init(const ASTGenericRoleSet & ast, const AccessControlManager * manager = nullptr, const UUID * current_user_id = nullptr);
|
||||
};
|
||||
|
||||
}
|
||||
|
@ -15,7 +15,7 @@ namespace ErrorCodes
|
||||
extern const int BAD_CAST;
|
||||
extern const int ACCESS_ENTITY_NOT_FOUND;
|
||||
extern const int ACCESS_ENTITY_ALREADY_EXISTS;
|
||||
extern const int ACCESS_ENTITY_STORAGE_READONLY;
|
||||
extern const int ACCESS_STORAGE_READONLY;
|
||||
extern const int UNKNOWN_USER;
|
||||
extern const int UNKNOWN_ROLE;
|
||||
}
|
||||
@ -72,7 +72,6 @@ bool IAccessStorage::exists(const UUID & id) const
|
||||
}
|
||||
|
||||
|
||||
|
||||
AccessEntityPtr IAccessStorage::tryReadBase(const UUID & id) const
|
||||
{
|
||||
try
|
||||
@ -419,7 +418,7 @@ void IAccessStorage::throwReadonlyCannotInsert(std::type_index type, const Strin
|
||||
{
|
||||
throw Exception(
|
||||
"Cannot insert " + getTypeName(type) + " " + backQuote(name) + " to " + getStorageName() + " because this storage is readonly",
|
||||
ErrorCodes::ACCESS_ENTITY_STORAGE_READONLY);
|
||||
ErrorCodes::ACCESS_STORAGE_READONLY);
|
||||
}
|
||||
|
||||
|
||||
@ -427,7 +426,7 @@ void IAccessStorage::throwReadonlyCannotUpdate(std::type_index type, const Strin
|
||||
{
|
||||
throw Exception(
|
||||
"Cannot update " + getTypeName(type) + " " + backQuote(name) + " in " + getStorageName() + " because this storage is readonly",
|
||||
ErrorCodes::ACCESS_ENTITY_STORAGE_READONLY);
|
||||
ErrorCodes::ACCESS_STORAGE_READONLY);
|
||||
}
|
||||
|
||||
|
||||
@ -435,6 +434,6 @@ void IAccessStorage::throwReadonlyCannotRemove(std::type_index type, const Strin
|
||||
{
|
||||
throw Exception(
|
||||
"Cannot remove " + getTypeName(type) + " " + backQuote(name) + " from " + getStorageName() + " because this storage is readonly",
|
||||
ErrorCodes::ACCESS_ENTITY_STORAGE_READONLY);
|
||||
ErrorCodes::ACCESS_STORAGE_READONLY);
|
||||
}
|
||||
}
|
||||
|
@ -74,6 +74,10 @@ public:
|
||||
String readName(const UUID & id) const;
|
||||
std::optional<String> tryReadName(const UUID & id) const;
|
||||
|
||||
/// Returns true if a specified entity can be inserted into this storage.
|
||||
/// This function doesn't check whether there are no entities with such name in the storage.
|
||||
bool canInsert(const AccessEntityPtr & entity) const { return canInsertImpl(entity); }
|
||||
|
||||
/// Inserts an entity to the storage. Returns ID of a new entry in the storage.
|
||||
/// Throws an exception if the specified name already exists.
|
||||
UUID insert(const AccessEntityPtr & entity);
|
||||
@ -133,6 +137,7 @@ protected:
|
||||
virtual bool existsImpl(const UUID & id) const = 0;
|
||||
virtual AccessEntityPtr readImpl(const UUID & id) const = 0;
|
||||
virtual String readNameImpl(const UUID & id) const = 0;
|
||||
virtual bool canInsertImpl(const AccessEntityPtr & entity) const = 0;
|
||||
virtual UUID insertImpl(const AccessEntityPtr & entity, bool replace_if_exists) = 0;
|
||||
virtual void removeImpl(const UUID & id) = 0;
|
||||
virtual void updateImpl(const UUID & id, const UpdateFunc & update_func) = 0;
|
||||
|
@ -293,6 +293,7 @@ ext::scope_guard MemoryAccessStorage::subscribeForChangesImpl(const UUID & id, c
|
||||
|
||||
bool MemoryAccessStorage::hasSubscriptionImpl(const UUID & id) const
|
||||
{
|
||||
std::lock_guard lock{mutex};
|
||||
auto it = entries.find(id);
|
||||
if (it != entries.end())
|
||||
{
|
||||
@ -305,6 +306,7 @@ bool MemoryAccessStorage::hasSubscriptionImpl(const UUID & id) const
|
||||
|
||||
bool MemoryAccessStorage::hasSubscriptionImpl(std::type_index type) const
|
||||
{
|
||||
std::lock_guard lock{mutex};
|
||||
auto range = handlers_by_type.equal_range(type);
|
||||
return range.first != range.second;
|
||||
}
|
||||
|
@ -26,6 +26,7 @@ private:
|
||||
bool existsImpl(const UUID & id) const override;
|
||||
AccessEntityPtr readImpl(const UUID & id) const override;
|
||||
String readNameImpl(const UUID & id) const override;
|
||||
bool canInsertImpl(const AccessEntityPtr &) const override { return true; }
|
||||
UUID insertImpl(const AccessEntityPtr & entity, bool replace_if_exists) override;
|
||||
void removeImpl(const UUID & id) override;
|
||||
void updateImpl(const UUID & id, const UpdateFunc & update_func) override;
|
||||
|
@ -8,6 +8,7 @@ namespace DB
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int ACCESS_ENTITY_FOUND_DUPLICATES;
|
||||
extern const int ACCESS_STORAGE_FOR_INSERTION_NOT_FOUND;
|
||||
}
|
||||
|
||||
|
||||
@ -29,10 +30,9 @@ namespace
|
||||
|
||||
|
||||
MultipleAccessStorage::MultipleAccessStorage(
|
||||
std::vector<std::unique_ptr<Storage>> nested_storages_, size_t index_of_nested_storage_for_insertion_)
|
||||
std::vector<std::unique_ptr<Storage>> nested_storages_)
|
||||
: IAccessStorage(joinStorageNames(nested_storages_))
|
||||
, nested_storages(std::move(nested_storages_))
|
||||
, nested_storage_for_insertion(nested_storages[index_of_nested_storage_for_insertion_].get())
|
||||
, ids_cache(512 /* cache size */)
|
||||
{
|
||||
}
|
||||
@ -161,13 +161,39 @@ String MultipleAccessStorage::readNameImpl(const UUID & id) const
|
||||
}
|
||||
|
||||
|
||||
bool MultipleAccessStorage::canInsertImpl(const AccessEntityPtr & entity) const
|
||||
{
|
||||
for (const auto & nested_storage : nested_storages)
|
||||
{
|
||||
if (nested_storage->canInsert(entity))
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
UUID MultipleAccessStorage::insertImpl(const AccessEntityPtr & entity, bool replace_if_exists)
|
||||
{
|
||||
auto id = replace_if_exists ? nested_storage_for_insertion->insertOrReplace(entity) : nested_storage_for_insertion->insert(entity);
|
||||
IAccessStorage * nested_storage_for_insertion = nullptr;
|
||||
for (const auto & nested_storage : nested_storages)
|
||||
{
|
||||
if (nested_storage->canInsert(entity))
|
||||
{
|
||||
nested_storage_for_insertion = nested_storage.get();
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (!nested_storage_for_insertion)
|
||||
{
|
||||
throw Exception(
|
||||
"Not found a storage to insert " + entity->getTypeName() + backQuote(entity->getName()),
|
||||
ErrorCodes::ACCESS_STORAGE_FOR_INSERTION_NOT_FOUND);
|
||||
}
|
||||
|
||||
auto id = replace_if_exists ? nested_storage_for_insertion->insertOrReplace(entity) : nested_storage_for_insertion->insert(entity);
|
||||
std::lock_guard lock{ids_cache_mutex};
|
||||
ids_cache.set(id, std::make_shared<Storage *>(nested_storage_for_insertion));
|
||||
|
||||
return id;
|
||||
}
|
||||
|
||||
|
@ -13,7 +13,7 @@ class MultipleAccessStorage : public IAccessStorage
|
||||
public:
|
||||
using Storage = IAccessStorage;
|
||||
|
||||
MultipleAccessStorage(std::vector<std::unique_ptr<Storage>> nested_storages_, size_t index_of_nested_storage_for_insertion_ = 0);
|
||||
MultipleAccessStorage(std::vector<std::unique_ptr<Storage>> nested_storages_);
|
||||
~MultipleAccessStorage() override;
|
||||
|
||||
std::vector<UUID> findMultiple(std::type_index type, const String & name) const;
|
||||
@ -35,6 +35,7 @@ protected:
|
||||
bool existsImpl(const UUID & id) const override;
|
||||
AccessEntityPtr readImpl(const UUID & id) const override;
|
||||
String readNameImpl(const UUID &id) const override;
|
||||
bool canInsertImpl(const AccessEntityPtr & entity) const override;
|
||||
UUID insertImpl(const AccessEntityPtr & entity, bool replace_if_exists) override;
|
||||
void removeImpl(const UUID & id) override;
|
||||
void updateImpl(const UUID & id, const UpdateFunc & update_func) override;
|
||||
@ -45,7 +46,6 @@ protected:
|
||||
|
||||
private:
|
||||
std::vector<std::unique_ptr<Storage>> nested_storages;
|
||||
IAccessStorage * nested_storage_for_insertion;
|
||||
mutable LRUCache<UUID, Storage *> ids_cache;
|
||||
mutable std::mutex ids_cache_mutex;
|
||||
};
|
||||
|
@ -5,7 +5,6 @@
|
||||
#include <chrono>
|
||||
|
||||
|
||||
|
||||
namespace DB
|
||||
{
|
||||
/** Quota for resources consumption for specific interval.
|
||||
|
@ -199,6 +199,77 @@ void SettingsConstraints::check(const Settings & current_settings, const Setting
|
||||
}
|
||||
|
||||
|
||||
void SettingsConstraints::clamp(const Settings & current_settings, SettingChange & change) const
|
||||
{
|
||||
const String & name = change.name;
|
||||
size_t setting_index = Settings::findIndex(name);
|
||||
if (setting_index == Settings::npos)
|
||||
return;
|
||||
|
||||
Field new_value = Settings::valueToCorrespondingType(setting_index, change.value);
|
||||
Field current_value = current_settings.get(setting_index);
|
||||
|
||||
/// Setting isn't checked if value wasn't changed.
|
||||
if (current_value == new_value)
|
||||
return;
|
||||
|
||||
if (!current_settings.allow_ddl && name == "allow_ddl")
|
||||
{
|
||||
change.value = current_value;
|
||||
return;
|
||||
}
|
||||
|
||||
/** The `readonly` value is understood as follows:
|
||||
* 0 - everything allowed.
|
||||
* 1 - only read queries can be made; you can not change the settings.
|
||||
* 2 - You can only do read queries and you can change the settings, except for the `readonly` setting.
|
||||
*/
|
||||
if (current_settings.readonly == 1)
|
||||
{
|
||||
change.value = current_value;
|
||||
return;
|
||||
}
|
||||
|
||||
if (current_settings.readonly > 1 && name == "readonly")
|
||||
{
|
||||
change.value = current_value;
|
||||
return;
|
||||
}
|
||||
|
||||
const Constraint * constraint = tryGetConstraint(setting_index);
|
||||
if (constraint)
|
||||
{
|
||||
if (constraint->read_only)
|
||||
{
|
||||
change.value = current_value;
|
||||
return;
|
||||
}
|
||||
|
||||
if (!constraint->min_value.isNull() && (new_value < constraint->min_value))
|
||||
{
|
||||
if (!constraint->max_value.isNull() && (constraint->min_value > constraint->max_value))
|
||||
change.value = current_value;
|
||||
else
|
||||
change.value = constraint->min_value;
|
||||
return;
|
||||
}
|
||||
|
||||
if (!constraint->max_value.isNull() && (new_value > constraint->max_value))
|
||||
{
|
||||
change.value = constraint->max_value;
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void SettingsConstraints::clamp(const Settings & current_settings, SettingsChanges & changes) const
|
||||
{
|
||||
for (auto & change : changes)
|
||||
clamp(current_settings, change);
|
||||
}
|
||||
|
||||
|
||||
SettingsConstraints::Constraint & SettingsConstraints::getConstraintRef(size_t index)
|
||||
{
|
||||
auto it = constraints_by_index.find(index);
|
||||
|
@ -85,9 +85,14 @@ public:
|
||||
|
||||
Infos getInfo() const;
|
||||
|
||||
/// Checks whether `change` violates these constraints and throws an exception if so.
|
||||
void check(const Settings & current_settings, const SettingChange & change) const;
|
||||
void check(const Settings & current_settings, const SettingsChanges & changes) const;
|
||||
|
||||
/// Checks whether `change` violates these and clamps the `change` if so.
|
||||
void clamp(const Settings & current_settings, SettingChange & change) const;
|
||||
void clamp(const Settings & current_settings, SettingsChanges & changes) const;
|
||||
|
||||
/** Set multiple settings from "profile" (in server configuration file (users.xml), profiles contain groups of multiple settings).
|
||||
* The profile can also be set using the `set` functions, like the profile setting.
|
||||
*/
|
||||
|
@ -342,7 +342,7 @@ UsersConfigAccessStorage::UsersConfigAccessStorage() : IAccessStorage("users.xml
|
||||
UsersConfigAccessStorage::~UsersConfigAccessStorage() {}
|
||||
|
||||
|
||||
void UsersConfigAccessStorage::loadFromConfig(const Poco::Util::AbstractConfiguration & config)
|
||||
void UsersConfigAccessStorage::setConfiguration(const Poco::Util::AbstractConfiguration & config)
|
||||
{
|
||||
std::vector<std::pair<UUID, AccessEntityPtr>> all_entities;
|
||||
for (const auto & entity : parseUsers(config, getLogger()))
|
||||
|
@ -21,7 +21,7 @@ public:
|
||||
UsersConfigAccessStorage();
|
||||
~UsersConfigAccessStorage() override;
|
||||
|
||||
void loadFromConfig(const Poco::Util::AbstractConfiguration & config);
|
||||
void setConfiguration(const Poco::Util::AbstractConfiguration & config);
|
||||
|
||||
private:
|
||||
std::optional<UUID> findImpl(std::type_index type, const String & name) const override;
|
||||
@ -29,6 +29,7 @@ private:
|
||||
bool existsImpl(const UUID & id) const override;
|
||||
AccessEntityPtr readImpl(const UUID & id) const override;
|
||||
String readNameImpl(const UUID & id) const override;
|
||||
bool canInsertImpl(const AccessEntityPtr &) const override { return false; }
|
||||
UUID insertImpl(const AccessEntityPtr & entity, bool replace_if_exists) override;
|
||||
void removeImpl(const UUID & id) override;
|
||||
void updateImpl(const UUID & id, const UpdateFunc & update_func) override;
|
||||
|
@ -86,7 +86,6 @@ struct MovingAvgData
|
||||
};
|
||||
|
||||
|
||||
|
||||
template <typename T, typename Tlimit_num_elems, typename Data>
|
||||
class MovingImpl final
|
||||
: public IAggregateFunctionDataHelper<Data, MovingImpl<T, Tlimit_num_elems, Data>>
|
||||
|
@ -158,6 +158,21 @@ void ColumnAggregateFunction::ensureOwnership()
|
||||
}
|
||||
|
||||
|
||||
bool ColumnAggregateFunction::structureEquals(const IColumn & to) const
|
||||
{
|
||||
const auto * to_concrete = typeid_cast<const ColumnAggregateFunction *>(&to);
|
||||
if (!to_concrete)
|
||||
return false;
|
||||
|
||||
/// AggregateFunctions must be the same.
|
||||
|
||||
const IAggregateFunction & func_this = *func;
|
||||
const IAggregateFunction & func_to = *to_concrete->func;
|
||||
|
||||
return typeid(func_this) == typeid(func_to);
|
||||
}
|
||||
|
||||
|
||||
void ColumnAggregateFunction::insertRangeFrom(const IColumn & from, size_t start, size_t length)
|
||||
{
|
||||
const ColumnAggregateFunction & from_concrete = assert_cast<const ColumnAggregateFunction &>(from);
|
||||
|
@ -204,6 +204,8 @@ public:
|
||||
}
|
||||
|
||||
void getExtremes(Field & min, Field & max) const override;
|
||||
|
||||
bool structureEquals(const IColumn &) const override;
|
||||
};
|
||||
|
||||
|
||||
|
@ -287,5 +287,4 @@ private:
|
||||
};
|
||||
|
||||
|
||||
|
||||
}
|
||||
|
@ -339,7 +339,7 @@ ColumnPtr ColumnVector<T>::index(const IColumn & indexes, size_t limit) const
|
||||
template <typename T>
|
||||
ColumnPtr ColumnVector<T>::replicate(const IColumn::Offsets & offsets) const
|
||||
{
|
||||
size_t size = data.size();
|
||||
const size_t size = data.size();
|
||||
if (size != offsets.size())
|
||||
throw Exception("Size of offsets doesn't match size of column.", ErrorCodes::SIZES_OF_COLUMNS_DOESNT_MATCH);
|
||||
|
||||
@ -352,7 +352,7 @@ ColumnPtr ColumnVector<T>::replicate(const IColumn::Offsets & offsets) const
|
||||
for (size_t i = 0; i < size; ++i)
|
||||
{
|
||||
const auto span_end = res->getData().begin() + offsets[i];
|
||||
for (; it < span_end; ++it)
|
||||
for (; it != span_end; ++it)
|
||||
*it = data[i];
|
||||
}
|
||||
|
||||
|
@ -12,6 +12,8 @@
|
||||
M(BackgroundPoolTask, "Number of active tasks in BackgroundProcessingPool (merges, mutations, fetches, or replication queue bookkeeping)") \
|
||||
M(BackgroundMovePoolTask, "Number of active tasks in BackgroundProcessingPool for moves") \
|
||||
M(BackgroundSchedulePoolTask, "Number of active tasks in BackgroundSchedulePool. This pool is used for periodic ReplicatedMergeTree tasks, like cleaning old data parts, altering data parts, replica re-initialization, etc.") \
|
||||
M(CacheDictionaryUpdateQueueBatches, "Number of 'batches' (a set of keys) in update queue in CacheDictionaries.") \
|
||||
M(CacheDictionaryUpdateQueueKeys, "Exact number of keys in update queue in CacheDictionaries.") \
|
||||
M(DiskSpaceReservedForMerge, "Disk space reserved for currently running background merges. It is slightly more than the total size of currently merging parts.") \
|
||||
M(DistributedSend, "Number of connections to remote servers sending data that was INSERTed into Distributed tables. Both synchronous and asynchronous mode.") \
|
||||
M(QueryPreempted, "Number of queries that are stopped and waiting due to 'priority' setting.") \
|
||||
|
@ -467,7 +467,7 @@ namespace ErrorCodes
|
||||
extern const int ACCESS_ENTITY_NOT_FOUND = 492;
|
||||
extern const int ACCESS_ENTITY_ALREADY_EXISTS = 493;
|
||||
extern const int ACCESS_ENTITY_FOUND_DUPLICATES = 494;
|
||||
extern const int ACCESS_ENTITY_STORAGE_READONLY = 495;
|
||||
extern const int ACCESS_STORAGE_READONLY = 495;
|
||||
extern const int QUOTA_REQUIRES_CLIENT_KEY = 496;
|
||||
extern const int ACCESS_DENIED = 497;
|
||||
extern const int LIMIT_BY_WITH_TIES_IS_NOT_SUPPORTED = 498;
|
||||
@ -485,6 +485,10 @@ namespace ErrorCodes
|
||||
extern const int UNKNOWN_ROLE = 511;
|
||||
extern const int SET_NON_GRANTED_ROLE = 512;
|
||||
extern const int UNKNOWN_PART_TYPE = 513;
|
||||
extern const int ACCESS_STORAGE_FOR_INSERTION_NOT_FOUND = 514;
|
||||
extern const int INCORRECT_ACCESS_ENTITY_DEFINITION = 515;
|
||||
extern const int AUTHENTICATION_FAILED = 516;
|
||||
extern const int CANNOT_ASSIGN_ALTER = 517;
|
||||
|
||||
extern const int KEEPER_EXCEPTION = 999;
|
||||
extern const int POCO_EXCEPTION = 1000;
|
||||
|
@ -6,7 +6,6 @@
|
||||
#include <map>
|
||||
|
||||
|
||||
|
||||
namespace Poco
|
||||
{
|
||||
namespace Util
|
||||
|
@ -7,7 +7,6 @@
|
||||
#include <Common/formatReadable.h>
|
||||
#include <common/likely.h>
|
||||
#include <common/logger_useful.h>
|
||||
#include <ext/singleton.h>
|
||||
|
||||
#include <atomic>
|
||||
#include <cmath>
|
||||
@ -71,14 +70,13 @@ static void logMemoryUsage(Int64 amount)
|
||||
}
|
||||
|
||||
|
||||
|
||||
void MemoryTracker::alloc(Int64 size)
|
||||
{
|
||||
if (blocker.isCancelled())
|
||||
return;
|
||||
|
||||
/** Using memory_order_relaxed means that if allocations are done simultaneously,
|
||||
* we allow exception about memory limit exceeded to be thrown only on next allocation.
|
||||
* we allow exception about memory limit exceeded to be thrown only on next allocation.
|
||||
* So, we allow over-allocations.
|
||||
*/
|
||||
Int64 will_be = size + amount.fetch_add(size, std::memory_order_relaxed);
|
||||
@ -112,8 +110,8 @@ void MemoryTracker::alloc(Int64 size)
|
||||
if (unlikely(current_profiler_limit && will_be > current_profiler_limit))
|
||||
{
|
||||
auto no_track = blocker.cancel();
|
||||
ext::Singleton<DB::TraceCollector>()->collect(size);
|
||||
setOrRaiseProfilerLimit(current_profiler_limit + Int64(std::ceil((will_be - current_profiler_limit) / profiler_step)) * profiler_step);
|
||||
DB::TraceCollector::collect(DB::TraceType::Memory, StackTrace(), size);
|
||||
setOrRaiseProfilerLimit((will_be + profiler_step - 1) / profiler_step * profiler_step);
|
||||
}
|
||||
|
||||
if (unlikely(current_hard_limit && will_be > current_hard_limit))
|
||||
@ -212,7 +210,6 @@ void MemoryTracker::setOrRaiseHardLimit(Int64 value)
|
||||
|
||||
void MemoryTracker::setOrRaiseProfilerLimit(Int64 value)
|
||||
{
|
||||
/// This is just atomic set to maximum.
|
||||
Int64 old_value = profiler_limit.load(std::memory_order_relaxed);
|
||||
while (old_value < value && !profiler_limit.compare_exchange_weak(old_value, value))
|
||||
;
|
||||
|
@ -7,7 +7,6 @@
|
||||
#include <memory>
|
||||
|
||||
#include <boost/noncopyable.hpp>
|
||||
#include <boost/iterator_adaptors.hpp>
|
||||
|
||||
#include <common/likely.h>
|
||||
#include <common/strong_typedef.h>
|
||||
@ -275,18 +274,11 @@ protected:
|
||||
public:
|
||||
using value_type = T;
|
||||
|
||||
/// You can not just use `typedef`, because there is ambiguity for the constructors and `assign` functions.
|
||||
struct iterator : public boost::iterator_adaptor<iterator, T*>
|
||||
{
|
||||
iterator() {}
|
||||
iterator(T * ptr_) : iterator::iterator_adaptor_(ptr_) {}
|
||||
};
|
||||
/// We cannot use boost::iterator_adaptor, because it defeats loop vectorization,
|
||||
/// see https://github.com/ClickHouse/ClickHouse/pull/9442
|
||||
|
||||
struct const_iterator : public boost::iterator_adaptor<const_iterator, const T*>
|
||||
{
|
||||
const_iterator() {}
|
||||
const_iterator(const T * ptr_) : const_iterator::iterator_adaptor_(ptr_) {}
|
||||
};
|
||||
using iterator = T *;
|
||||
using const_iterator = const T *;
|
||||
|
||||
|
||||
PODArray() {}
|
||||
|
@ -7,21 +7,51 @@
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
class ProfilingScopedWriteUnlocker;
|
||||
|
||||
class ProfilingScopedWriteRWLock
|
||||
{
|
||||
public:
|
||||
ProfilingScopedWriteRWLock(std::shared_mutex & rwl, ProfileEvents::Event event) :
|
||||
friend class ProfilingScopedWriteUnlocker;
|
||||
|
||||
ProfilingScopedWriteRWLock(std::shared_mutex & rwl_, ProfileEvents::Event event_) :
|
||||
watch(),
|
||||
scoped_write_lock(rwl)
|
||||
event(event_),
|
||||
scoped_write_lock(rwl_)
|
||||
{
|
||||
ProfileEvents::increment(event, watch.elapsed());
|
||||
}
|
||||
|
||||
private:
|
||||
Stopwatch watch;
|
||||
ProfileEvents::Event event;
|
||||
std::unique_lock<std::shared_mutex> scoped_write_lock;
|
||||
};
|
||||
|
||||
/// Inversed RAII
|
||||
/// Used to unlock current writelock for various purposes.
|
||||
class ProfilingScopedWriteUnlocker
|
||||
{
|
||||
public:
|
||||
ProfilingScopedWriteUnlocker() = delete;
|
||||
|
||||
ProfilingScopedWriteUnlocker(ProfilingScopedWriteRWLock & parent_lock_) : parent_lock(parent_lock_)
|
||||
{
|
||||
parent_lock.scoped_write_lock.unlock();
|
||||
}
|
||||
|
||||
~ProfilingScopedWriteUnlocker()
|
||||
{
|
||||
Stopwatch watch;
|
||||
parent_lock.scoped_write_lock.lock();
|
||||
ProfileEvents::increment(parent_lock.event, watch.elapsed());
|
||||
}
|
||||
|
||||
private:
|
||||
ProfilingScopedWriteRWLock & parent_lock;
|
||||
};
|
||||
|
||||
class ProfilingScopedReadRWLock
|
||||
{
|
||||
public:
|
||||
|
@ -9,22 +9,48 @@
|
||||
#include <common/config_common.h>
|
||||
#include <common/logger_useful.h>
|
||||
#include <common/phdr_cache.h>
|
||||
#include <ext/singleton.h>
|
||||
|
||||
#include <random>
|
||||
|
||||
|
||||
namespace ProfileEvents
|
||||
{
|
||||
extern const Event QueryProfilerSignalOverruns;
|
||||
}
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
namespace
|
||||
{
|
||||
thread_local size_t write_trace_iteration = 0;
|
||||
|
||||
void writeTraceInfo(TraceType trace_type, int /* sig */, siginfo_t * info, void * context)
|
||||
{
|
||||
int overrun_count = 0;
|
||||
auto saved_errno = errno; /// We must restore previous value of errno in signal handler.
|
||||
|
||||
#if defined(OS_LINUX)
|
||||
if (info)
|
||||
overrun_count = info->si_overrun;
|
||||
{
|
||||
int overrun_count = info->si_overrun;
|
||||
|
||||
/// Quickly drop if signal handler is called too frequently.
|
||||
/// Otherwise we may end up infinitelly processing signals instead of doing any useful work.
|
||||
++write_trace_iteration;
|
||||
if (overrun_count)
|
||||
{
|
||||
/// But pass with some frequency to avoid drop of all traces.
|
||||
if (write_trace_iteration % overrun_count == 0)
|
||||
{
|
||||
ProfileEvents::increment(ProfileEvents::QueryProfilerSignalOverruns, overrun_count);
|
||||
}
|
||||
else
|
||||
{
|
||||
ProfileEvents::increment(ProfileEvents::QueryProfilerSignalOverruns, overrun_count + 1);
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
#else
|
||||
UNUSED(info);
|
||||
#endif
|
||||
@ -32,10 +58,12 @@ namespace
|
||||
const auto signal_context = *reinterpret_cast<ucontext_t *>(context);
|
||||
const StackTrace stack_trace(signal_context);
|
||||
|
||||
ext::Singleton<TraceCollector>()->collect(trace_type, stack_trace, overrun_count);
|
||||
TraceCollector::collect(trace_type, stack_trace, 0);
|
||||
|
||||
errno = saved_errno;
|
||||
}
|
||||
|
||||
[[maybe_unused]] const UInt32 TIMER_PRECISION = 1e9;
|
||||
[[maybe_unused]] constexpr UInt32 TIMER_PRECISION = 1e9;
|
||||
}
|
||||
|
||||
namespace ErrorCodes
|
||||
@ -77,7 +105,7 @@ QueryProfilerBase<ProfilerImpl>::QueryProfilerBase(const UInt64 thread_id, const
|
||||
|
||||
try
|
||||
{
|
||||
struct sigevent sev;
|
||||
struct sigevent sev {};
|
||||
sev.sigev_notify = SIGEV_THREAD_ID;
|
||||
sev.sigev_signo = pause_signal;
|
||||
|
||||
@ -152,7 +180,7 @@ QueryProfilerReal::QueryProfilerReal(const UInt64 thread_id, const UInt32 period
|
||||
|
||||
void QueryProfilerReal::signalHandler(int sig, siginfo_t * info, void * context)
|
||||
{
|
||||
writeTraceInfo(TraceType::REAL_TIME, sig, info, context);
|
||||
writeTraceInfo(TraceType::Real, sig, info, context);
|
||||
}
|
||||
|
||||
QueryProfilerCpu::QueryProfilerCpu(const UInt64 thread_id, const UInt32 period)
|
||||
@ -161,7 +189,7 @@ QueryProfilerCpu::QueryProfilerCpu(const UInt64 thread_id, const UInt32 period)
|
||||
|
||||
void QueryProfilerCpu::signalHandler(int sig, siginfo_t * info, void * context)
|
||||
{
|
||||
writeTraceInfo(TraceType::CPU_TIME, sig, info, context);
|
||||
writeTraceInfo(TraceType::CPU, sig, info, context);
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -109,7 +109,6 @@ struct RadixSortIdentityTransform
|
||||
};
|
||||
|
||||
|
||||
|
||||
template <typename TElement>
|
||||
struct RadixSortUIntTraits
|
||||
{
|
||||
|
133
dbms/src/Common/ThreadFuzzer.cpp
Normal file
133
dbms/src/Common/ThreadFuzzer.cpp
Normal file
@ -0,0 +1,133 @@
|
||||
#include <signal.h>
|
||||
#include <time.h>
|
||||
#include <sys/time.h>
|
||||
#if OS_LINUX
|
||||
#include <sys/sysinfo.h>
|
||||
#endif
|
||||
#include <sched.h>
|
||||
|
||||
#include <random>
|
||||
|
||||
#include <common/sleep.h>
|
||||
#include <common/getThreadId.h>
|
||||
|
||||
#include <IO/ReadHelpers.h>
|
||||
|
||||
#include <Common/Exception.h>
|
||||
#include <Common/thread_local_rng.h>
|
||||
|
||||
#include <Common/ThreadFuzzer.h>
|
||||
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int CANNOT_MANIPULATE_SIGSET;
|
||||
extern const int CANNOT_SET_SIGNAL_HANDLER;
|
||||
extern const int CANNOT_CREATE_TIMER;
|
||||
}
|
||||
|
||||
|
||||
ThreadFuzzer::ThreadFuzzer()
|
||||
{
|
||||
initConfiguration();
|
||||
if (!isEffective())
|
||||
return;
|
||||
setup();
|
||||
}
|
||||
|
||||
|
||||
template <typename T>
|
||||
static void initFromEnv(T & what, const char * name)
|
||||
{
|
||||
const char * env = getenv(name);
|
||||
if (!env)
|
||||
return;
|
||||
what = parse<T>(env);
|
||||
}
|
||||
|
||||
void ThreadFuzzer::initConfiguration()
|
||||
{
|
||||
#if OS_LINUX
|
||||
num_cpus = get_nprocs();
|
||||
#else
|
||||
(void)num_cpus;
|
||||
#endif
|
||||
|
||||
initFromEnv(cpu_time_period_us, "THREAD_FUZZER_CPU_TIME_PERIOD_US");
|
||||
if (!cpu_time_period_us)
|
||||
return;
|
||||
initFromEnv(yield_probability, "THREAD_FUZZER_YIELD_PROBABILITY");
|
||||
initFromEnv(migrate_probability, "THREAD_FUZZER_MIGRATE_PROBABILITY");
|
||||
initFromEnv(sleep_probability, "THREAD_FUZZER_SLEEP_PROBABILITY");
|
||||
initFromEnv(chaos_sleep_time_us, "THREAD_FUZZER_SLEEP_TIME_US");
|
||||
}
|
||||
|
||||
void ThreadFuzzer::signalHandler(int)
|
||||
{
|
||||
auto saved_errno = errno;
|
||||
|
||||
auto & fuzzer = ThreadFuzzer::instance();
|
||||
|
||||
if (fuzzer.yield_probability > 0
|
||||
&& std::bernoulli_distribution(fuzzer.yield_probability)(thread_local_rng))
|
||||
{
|
||||
sched_yield();
|
||||
}
|
||||
|
||||
#if OS_LINUX
|
||||
if (fuzzer.num_cpus > 0
|
||||
&& fuzzer.migrate_probability > 0
|
||||
&& std::bernoulli_distribution(fuzzer.migrate_probability)(thread_local_rng))
|
||||
{
|
||||
int migrate_to = std::uniform_int_distribution<>(0, fuzzer.num_cpus - 1)(thread_local_rng);
|
||||
|
||||
cpu_set_t set;
|
||||
CPU_ZERO(&set);
|
||||
CPU_SET(migrate_to, &set);
|
||||
|
||||
(void)sched_setaffinity(0, sizeof(set), &set);
|
||||
}
|
||||
#endif
|
||||
|
||||
if (fuzzer.sleep_probability > 0
|
||||
&& fuzzer.chaos_sleep_time_us > 0
|
||||
&& std::bernoulli_distribution(fuzzer.sleep_probability)(thread_local_rng))
|
||||
{
|
||||
sleepForNanoseconds(fuzzer.chaos_sleep_time_us * 1000);
|
||||
}
|
||||
|
||||
errno = saved_errno;
|
||||
}
|
||||
|
||||
void ThreadFuzzer::setup()
|
||||
{
|
||||
struct sigaction sa{};
|
||||
sa.sa_handler = signalHandler;
|
||||
sa.sa_flags = SA_RESTART;
|
||||
|
||||
if (sigemptyset(&sa.sa_mask))
|
||||
throwFromErrno("Failed to clean signal mask for thread fuzzer", ErrorCodes::CANNOT_MANIPULATE_SIGSET);
|
||||
|
||||
if (sigaddset(&sa.sa_mask, SIGPROF))
|
||||
throwFromErrno("Failed to add signal to mask for thread fuzzer", ErrorCodes::CANNOT_MANIPULATE_SIGSET);
|
||||
|
||||
if (sigaction(SIGPROF, &sa, nullptr))
|
||||
throwFromErrno("Failed to setup signal handler for thread fuzzer", ErrorCodes::CANNOT_SET_SIGNAL_HANDLER);
|
||||
|
||||
static constexpr UInt32 TIMER_PRECISION = 1000000;
|
||||
|
||||
struct timeval interval;
|
||||
interval.tv_sec = cpu_time_period_us / TIMER_PRECISION;
|
||||
interval.tv_usec = cpu_time_period_us % TIMER_PRECISION;
|
||||
|
||||
struct itimerval timer = {.it_interval = interval, .it_value = interval};
|
||||
|
||||
if (0 != setitimer(ITIMER_PROF, &timer, nullptr))
|
||||
throwFromErrno("Failed to create profiling timer", ErrorCodes::CANNOT_CREATE_TIMER);
|
||||
}
|
||||
|
||||
|
||||
}
|
74
dbms/src/Common/ThreadFuzzer.h
Normal file
74
dbms/src/Common/ThreadFuzzer.h
Normal file
@ -0,0 +1,74 @@
|
||||
#include <cstdint>
|
||||
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
/** Allows to randomize thread scheduling and insert various glitches across whole program for testing purposes.
|
||||
* It is done by setting up a timer that will send PROF signal to every thread when certain amount of CPU time has passed.
|
||||
*
|
||||
* To initialize ThreadFuzzer, call ThreadFuzzer::instance().
|
||||
* The behaviour is controlled by environment variables:
|
||||
*
|
||||
* THREAD_FUZZER_CPU_TIME_PERIOD_US - period of signals in microseconds.
|
||||
* THREAD_FUZZER_YIELD_PROBABILITY - probability to do 'sched_yield'.
|
||||
* THREAD_FUZZER_MIGRATE_PROBABILITY - probability to set CPU affinity to random CPU core.
|
||||
* THREAD_FUZZER_SLEEP_PROBABILITY - probability to sleep.
|
||||
* THREAD_FUZZER_SLEEP_TIME_US - amount of time to sleep in microseconds.
|
||||
*
|
||||
* ThreadFuzzer will do nothing if environment variables are not set accordingly.
|
||||
*
|
||||
* The intention is to reproduce thread synchronization bugs (race conditions and deadlocks) more frequently in tests.
|
||||
* We already have tests with TSan. But TSan only covers "physical" synchronization bugs, but not "logical" ones,
|
||||
* where all data is protected by synchronization primitives, but we still have race conditions.
|
||||
* Obviously, TSan cannot debug distributed synchronization bugs.
|
||||
*
|
||||
* The motivation for this tool is an evidence, that concurrency bugs are more likely to reproduce
|
||||
* on bad unstable virtual machines in a dirty environments.
|
||||
*
|
||||
* The idea is not new, see also:
|
||||
* https://channel9.msdn.com/blogs/peli/concurrency-fuzzing-with-cuzz
|
||||
*
|
||||
* Notes:
|
||||
* - it can be also implemented with instrumentation (example: LLVM Xray) instead of signals.
|
||||
* - it's also reasonable to insert glitches around interesting functions (example: mutex lock/unlock, starting of threads, etc.),
|
||||
* it is doable with wrapping these functions (todo).
|
||||
* - we should also make the sleep time random.
|
||||
* - sleep obviously helps, but the effect of yield and migration is unclear.
|
||||
*/
|
||||
class ThreadFuzzer
|
||||
{
|
||||
public:
|
||||
static ThreadFuzzer & instance()
|
||||
{
|
||||
static ThreadFuzzer res;
|
||||
return res;
|
||||
}
|
||||
|
||||
bool isEffective() const
|
||||
{
|
||||
return cpu_time_period_us != 0
|
||||
&& (yield_probability > 0
|
||||
|| migrate_probability > 0
|
||||
|| (sleep_probability > 0 && chaos_sleep_time_us > 0));
|
||||
}
|
||||
|
||||
private:
|
||||
uint64_t cpu_time_period_us = 0;
|
||||
double yield_probability = 0;
|
||||
double migrate_probability = 0;
|
||||
double sleep_probability = 0;
|
||||
double chaos_sleep_time_us = 0;
|
||||
|
||||
int num_cpus = 0;
|
||||
|
||||
|
||||
ThreadFuzzer();
|
||||
|
||||
void initConfiguration();
|
||||
void setup();
|
||||
|
||||
static void signalHandler(int);
|
||||
};
|
||||
|
||||
}
|
@ -17,11 +17,6 @@
|
||||
#include <fcntl.h>
|
||||
|
||||
|
||||
namespace ProfileEvents
|
||||
{
|
||||
extern const Event QueryProfilerSignalOverruns;
|
||||
}
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
@ -30,15 +25,13 @@ namespace
|
||||
/// Normally query_id is a UUID (string with a fixed length) but user can provide custom query_id.
|
||||
/// Thus upper bound on query_id length should be introduced to avoid buffer overflow in signal handler.
|
||||
constexpr size_t QUERY_ID_MAX_LEN = 1024;
|
||||
|
||||
thread_local size_t write_trace_iteration = 0;
|
||||
}
|
||||
|
||||
namespace ErrorCodes
|
||||
{
|
||||
}
|
||||
LazyPipeFDs pipe;
|
||||
|
||||
TraceCollector::TraceCollector()
|
||||
|
||||
TraceCollector::TraceCollector(std::shared_ptr<TraceLog> trace_log_)
|
||||
: trace_log(std::move(trace_log_))
|
||||
{
|
||||
pipe.open();
|
||||
|
||||
@ -51,38 +44,20 @@ TraceCollector::TraceCollector()
|
||||
thread = ThreadFromGlobalPool(&TraceCollector::run, this);
|
||||
}
|
||||
|
||||
|
||||
TraceCollector::~TraceCollector()
|
||||
{
|
||||
if (!thread.joinable())
|
||||
LOG_ERROR(&Poco::Logger::get("TraceCollector"), "TraceCollector thread is malformed and cannot be joined");
|
||||
else
|
||||
{
|
||||
stop();
|
||||
thread.join();
|
||||
}
|
||||
|
||||
pipe.close();
|
||||
}
|
||||
|
||||
void TraceCollector::collect(TraceType trace_type, const StackTrace & stack_trace, int overrun_count)
|
||||
{
|
||||
/// Quickly drop if signal handler is called too frequently.
|
||||
/// Otherwise we may end up infinitelly processing signals instead of doing any useful work.
|
||||
++write_trace_iteration;
|
||||
if (overrun_count)
|
||||
{
|
||||
/// But pass with some frequency to avoid drop of all traces.
|
||||
if (write_trace_iteration % overrun_count == 0)
|
||||
{
|
||||
ProfileEvents::increment(ProfileEvents::QueryProfilerSignalOverruns, overrun_count);
|
||||
}
|
||||
else
|
||||
{
|
||||
ProfileEvents::increment(ProfileEvents::QueryProfilerSignalOverruns, overrun_count + 1);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
void TraceCollector::collect(TraceType trace_type, const StackTrace & stack_trace, UInt64 size)
|
||||
{
|
||||
constexpr size_t buf_size = sizeof(char) + // TraceCollector stop flag
|
||||
8 * sizeof(char) + // maximum VarUInt length for string size
|
||||
QUERY_ID_MAX_LEN * sizeof(char) + // maximum query_id length
|
||||
@ -99,7 +74,7 @@ void TraceCollector::collect(TraceType trace_type, const StackTrace & stack_trac
|
||||
|
||||
auto thread_id = CurrentThread::get().thread_id;
|
||||
|
||||
writeChar(false, out);
|
||||
writeChar(false, out); /// true if requested to stop the collecting thread.
|
||||
writeStringBinary(query_id, out);
|
||||
|
||||
size_t stack_trace_size = stack_trace.getSize();
|
||||
@ -110,64 +85,27 @@ void TraceCollector::collect(TraceType trace_type, const StackTrace & stack_trac
|
||||
|
||||
writePODBinary(trace_type, out);
|
||||
writePODBinary(thread_id, out);
|
||||
writePODBinary(UInt64(0), out);
|
||||
|
||||
out.next();
|
||||
}
|
||||
|
||||
void TraceCollector::collect(UInt64 size)
|
||||
{
|
||||
constexpr size_t buf_size = sizeof(char) + // TraceCollector stop flag
|
||||
8 * sizeof(char) + // maximum VarUInt length for string size
|
||||
QUERY_ID_MAX_LEN * sizeof(char) + // maximum query_id length
|
||||
sizeof(UInt8) + // number of stack frames
|
||||
sizeof(StackTrace::Frames) + // collected stack trace, maximum capacity
|
||||
sizeof(TraceType) + // trace type
|
||||
sizeof(UInt64) + // thread_id
|
||||
sizeof(UInt64); // size
|
||||
char buffer[buf_size];
|
||||
WriteBufferFromFileDescriptorDiscardOnFailure out(pipe.fds_rw[1], buf_size, buffer);
|
||||
|
||||
StringRef query_id = CurrentThread::getQueryId();
|
||||
query_id.size = std::min(query_id.size, QUERY_ID_MAX_LEN);
|
||||
|
||||
auto thread_id = CurrentThread::get().thread_id;
|
||||
|
||||
writeChar(false, out);
|
||||
writeStringBinary(query_id, out);
|
||||
|
||||
const auto & stack_trace = StackTrace();
|
||||
|
||||
size_t stack_trace_size = stack_trace.getSize();
|
||||
size_t stack_trace_offset = stack_trace.getOffset();
|
||||
writeIntBinary(UInt8(stack_trace_size - stack_trace_offset), out);
|
||||
for (size_t i = stack_trace_offset; i < stack_trace_size; ++i)
|
||||
writePODBinary(stack_trace.getFrames()[i], out);
|
||||
|
||||
writePODBinary(TraceType::MEMORY, out);
|
||||
writePODBinary(thread_id, out);
|
||||
writePODBinary(size, out);
|
||||
|
||||
out.next();
|
||||
}
|
||||
|
||||
/**
|
||||
* Sends TraceCollector stop message
|
||||
|
||||
/** Sends TraceCollector stop message
|
||||
*
|
||||
* Each sequence of data for TraceCollector thread starts with a boolean flag.
|
||||
* If this flag is true, TraceCollector must stop reading trace_pipe and exit.
|
||||
* This function sends flag with a true value to stop TraceCollector gracefully.
|
||||
*
|
||||
* NOTE: TraceCollector will NOT stop immediately as there may be some data left in the pipe
|
||||
* before stop message.
|
||||
*/
|
||||
void TraceCollector::stop()
|
||||
{
|
||||
WriteBufferFromFileDescriptor out(pipe.fds_rw[1]);
|
||||
writeChar(true, out);
|
||||
out.next();
|
||||
thread.join();
|
||||
}
|
||||
|
||||
|
||||
void TraceCollector::run()
|
||||
{
|
||||
ReadBufferFromFileDescriptor in(pipe.fds_rw[0]);
|
||||
|
@ -15,28 +15,27 @@ namespace DB
|
||||
|
||||
class TraceLog;
|
||||
|
||||
enum class TraceType : UInt8
|
||||
enum class TraceType : uint8_t
|
||||
{
|
||||
REAL_TIME,
|
||||
CPU_TIME,
|
||||
MEMORY,
|
||||
Real,
|
||||
CPU,
|
||||
Memory,
|
||||
};
|
||||
|
||||
class TraceCollector
|
||||
{
|
||||
public:
|
||||
TraceCollector();
|
||||
TraceCollector(std::shared_ptr<TraceLog> trace_log_);
|
||||
~TraceCollector();
|
||||
|
||||
void setTraceLog(const std::shared_ptr<TraceLog> & trace_log_) { trace_log = trace_log_; }
|
||||
|
||||
void collect(TraceType type, const StackTrace & stack_trace, int overrun_count = 0);
|
||||
void collect(UInt64 size);
|
||||
/// Collect a stack trace. This method is signal safe.
|
||||
/// Precondition: the TraceCollector object must be created.
|
||||
/// size - for memory tracing is the amount of memory allocated; for other trace types it is 0.
|
||||
static void collect(TraceType trace_type, const StackTrace & stack_trace, UInt64 size);
|
||||
|
||||
private:
|
||||
std::shared_ptr<TraceLog> trace_log;
|
||||
ThreadFromGlobalPool thread;
|
||||
LazyPipeFDs pipe;
|
||||
|
||||
void run();
|
||||
void stop();
|
||||
|
@ -109,7 +109,6 @@ struct UInt128TrivialHash
|
||||
};
|
||||
|
||||
|
||||
|
||||
/** Used for aggregation, for putting a large number of constant-length keys in a hash table.
|
||||
*/
|
||||
struct UInt256
|
||||
|
@ -88,7 +88,6 @@ using namespace DB;
|
||||
struct ZooKeeperRequest;
|
||||
|
||||
|
||||
|
||||
/** Usage scenario: look at the documentation for IKeeper class.
|
||||
*/
|
||||
class ZooKeeper : public IKeeper
|
||||
|
@ -200,4 +200,3 @@ TEST(zkutil, multi_create_sequential)
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
@ -10,7 +10,6 @@
|
||||
#endif
|
||||
|
||||
|
||||
|
||||
unsigned getNumberOfPhysicalCPUCores()
|
||||
{
|
||||
#if USE_CPUID
|
||||
|
@ -50,7 +50,6 @@ static bool parseNumber(const String & description, size_t l, size_t r, size_t &
|
||||
}
|
||||
|
||||
|
||||
|
||||
/* Parse a string that generates shards and replicas. Separator - one of two characters | or ,
|
||||
* depending on whether shards or replicas are generated.
|
||||
* For example:
|
||||
|
@ -75,3 +75,6 @@ target_link_libraries (stopwatch PRIVATE clickhouse_common_io)
|
||||
|
||||
add_executable (symbol_index symbol_index.cpp)
|
||||
target_link_libraries (symbol_index PRIVATE clickhouse_common_io)
|
||||
|
||||
add_executable (chaos_sanitizer chaos_sanitizer.cpp)
|
||||
target_link_libraries (chaos_sanitizer PRIVATE clickhouse_common_io)
|
||||
|
56
dbms/src/Common/tests/chaos_sanitizer.cpp
Normal file
56
dbms/src/Common/tests/chaos_sanitizer.cpp
Normal file
@ -0,0 +1,56 @@
|
||||
#include <thread>
|
||||
#include <iostream>
|
||||
|
||||
#include <common/sleep.h>
|
||||
|
||||
#include <IO/ReadHelpers.h>
|
||||
|
||||
#include <Common/Exception.h>
|
||||
#include <Common/ThreadFuzzer.h>
|
||||
|
||||
|
||||
/** Prooves that ThreadFuzzer helps to find concurrency bugs.
|
||||
*
|
||||
* for i in {1..10}; do ./chaos_sanitizer 1000000; done
|
||||
* for i in {1..10}; do THREAD_FUZZER_CPU_TIME_PERIOD_US=1000 THREAD_FUZZER_SLEEP_PROBABILITY=0.1 THREAD_FUZZER_SLEEP_TIME_US=100000 ./chaos_sanitizer 1000000; done
|
||||
*/
|
||||
int main(int argc, char ** argv)
|
||||
{
|
||||
const size_t num_iterations = argc >= 2 ? DB::parse<size_t>(argv[1]) : 1000000000;
|
||||
|
||||
std::cerr << (DB::ThreadFuzzer::instance().isEffective() ? "ThreadFuzzer is enabled.\n" : "ThreadFuzzer is not enabled.\n");
|
||||
|
||||
volatile size_t counter1 = 0;
|
||||
volatile size_t counter2 = 0;
|
||||
|
||||
/// These threads are synchronized by sleep (that's intentionally incorrect).
|
||||
|
||||
std::thread t1([&]
|
||||
{
|
||||
for (size_t i = 0; i < num_iterations; ++i)
|
||||
++counter1;
|
||||
|
||||
sleepForNanoseconds(100000000);
|
||||
|
||||
for (size_t i = 0; i < num_iterations; ++i)
|
||||
++counter2;
|
||||
});
|
||||
|
||||
std::thread t2([&]
|
||||
{
|
||||
for (size_t i = 0; i < num_iterations; ++i)
|
||||
++counter2;
|
||||
|
||||
sleepForNanoseconds(100000000);
|
||||
|
||||
for (size_t i = 0; i < num_iterations; ++i)
|
||||
++counter1;
|
||||
});
|
||||
|
||||
t1.join();
|
||||
t2.join();
|
||||
|
||||
std::cerr << "Result: " << counter1 << ", " << counter2 << "\n";
|
||||
|
||||
return 0;
|
||||
}
|
@ -7,7 +7,6 @@
|
||||
#include <Common/HashTable/HashSet.h>
|
||||
|
||||
|
||||
|
||||
int main(int, char **)
|
||||
{
|
||||
{
|
||||
|
@ -22,7 +22,6 @@ using Key = UInt64;
|
||||
using Value = UInt64;
|
||||
|
||||
|
||||
|
||||
/// Various hash functions to test
|
||||
|
||||
namespace Hashes
|
||||
@ -336,7 +335,6 @@ static void NO_INLINE testForEachMapAndHash(const Key * data, size_t size)
|
||||
}
|
||||
|
||||
|
||||
|
||||
int main(int argc, char ** argv)
|
||||
{
|
||||
if (argc < 2)
|
||||
|
@ -244,7 +244,6 @@ void aggregate5(Map & local_map, MapSmallLocks & global_map, Source::const_itera
|
||||
}*/
|
||||
|
||||
|
||||
|
||||
int main(int argc, char ** argv)
|
||||
{
|
||||
size_t n = atoi(argv[1]);
|
||||
|
@ -283,7 +283,6 @@ struct Merger
|
||||
};
|
||||
|
||||
|
||||
|
||||
int main(int argc, char ** argv)
|
||||
{
|
||||
size_t n = atoi(argv[1]);
|
||||
|
@ -6,7 +6,6 @@
|
||||
#include <Common/HashTable/SmallTable.h>
|
||||
|
||||
|
||||
|
||||
int main(int, char **)
|
||||
{
|
||||
{
|
||||
|
@ -358,7 +358,6 @@ void Block::setColumns(const Columns & columns)
|
||||
}
|
||||
|
||||
|
||||
|
||||
Block Block::cloneWithColumns(MutableColumns && columns) const
|
||||
{
|
||||
Block res;
|
||||
|
@ -12,7 +12,6 @@
|
||||
#include <Core/ColumnsWithTypeAndName.h>
|
||||
|
||||
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
|
@ -74,4 +74,13 @@ const BlockMissingValues::RowsBitMask & BlockMissingValues::getDefaultsBitmask(s
|
||||
return none;
|
||||
}
|
||||
|
||||
bool BlockMissingValues::hasDefaultBits(size_t column_idx) const
|
||||
{
|
||||
auto it = rows_mask_by_column_id.find(column_idx);
|
||||
if (it == rows_mask_by_column_id.end())
|
||||
return false;
|
||||
|
||||
const auto & col_mask = it->second;
|
||||
return std::find(col_mask.begin(), col_mask.end(), true) != col_mask.end();
|
||||
}
|
||||
}
|
||||
|
@ -51,7 +51,10 @@ class BlockMissingValues
|
||||
public:
|
||||
using RowsBitMask = std::vector<bool>; /// a bit per row for a column
|
||||
|
||||
/// Get mask for column, column_idx is index inside corresponding block
|
||||
const RowsBitMask & getDefaultsBitmask(size_t column_idx) const;
|
||||
/// Check that we have to replace default value at least in one of columns
|
||||
bool hasDefaultBits(size_t column_idx) const;
|
||||
void setBit(size_t column_idx, size_t row_idx);
|
||||
bool empty() const { return rows_mask_by_column_id.empty(); }
|
||||
size_t size() const { return rows_mask_by_column_id.size(); }
|
||||
|
@ -165,4 +165,13 @@ bool NamesAndTypesList::contains(const String & name) const
|
||||
return false;
|
||||
}
|
||||
|
||||
std::optional<NameAndTypePair> NamesAndTypesList::tryGetByName(const std::string & name) const
|
||||
{
|
||||
for (const NameAndTypePair & column : *this)
|
||||
{
|
||||
if (column.name == name)
|
||||
return column;
|
||||
}
|
||||
return {};
|
||||
}
|
||||
}
|
||||
|
@ -73,7 +73,11 @@ public:
|
||||
/// Unlike `filter`, returns columns in the order in which they go in `names`.
|
||||
NamesAndTypesList addTypes(const Names & names) const;
|
||||
|
||||
/// Check that column contains in list
|
||||
bool contains(const String & name) const;
|
||||
|
||||
/// Try to get column by name, return empty optional if column not found
|
||||
std::optional<NameAndTypePair> tryGetByName(const std::string & name) const;
|
||||
};
|
||||
|
||||
}
|
||||
|
@ -53,7 +53,7 @@ struct Settings : public SettingsCollection<Settings>
|
||||
M(SettingUInt64, min_insert_block_size_rows, DEFAULT_INSERT_BLOCK_SIZE, "Squash blocks passed to INSERT query to specified size in rows, if blocks are not big enough.", 0) \
|
||||
M(SettingUInt64, min_insert_block_size_bytes, (DEFAULT_INSERT_BLOCK_SIZE * 256), "Squash blocks passed to INSERT query to specified size in bytes, if blocks are not big enough.", 0) \
|
||||
M(SettingUInt64, max_joined_block_size_rows, DEFAULT_BLOCK_SIZE, "Maximum block size for JOIN result (if join algorithm supports it). 0 means unlimited.", 0) \
|
||||
M(SettingUInt64, max_insert_threads, 0, "The maximum number of threads to execute the INSERT SELECT query. By default, it is determined automatically.", 0) \
|
||||
M(SettingUInt64, max_insert_threads, 0, "The maximum number of threads to execute the INSERT SELECT query. Values 0 or 1 means that INSERT SELECT is not run in parallel. Higher values will lead to higher memory usage. Parallel INSERT SELECT has effect only if the SELECT part is run on parallel, see 'max_threads' setting.", 0) \
|
||||
M(SettingMaxThreads, max_threads, 0, "The maximum number of threads to execute the request. By default, it is determined automatically.", 0) \
|
||||
M(SettingMaxThreads, max_alter_threads, 0, "The maximum number of threads to execute the ALTER requests. By default, it is determined automatically.", 0) \
|
||||
M(SettingUInt64, max_read_buffer_size, DBMS_DEFAULT_BUFFER_SIZE, "The maximum size of the buffer to read from the filesystem.", 0) \
|
||||
@ -333,7 +333,7 @@ struct Settings : public SettingsCollection<Settings>
|
||||
M(SettingUInt64, max_memory_usage, 0, "Maximum memory usage for processing of single query. Zero means unlimited.", 0) \
|
||||
M(SettingUInt64, max_memory_usage_for_user, 0, "Maximum memory usage for processing all concurrently running queries for the user. Zero means unlimited.", 0) \
|
||||
M(SettingUInt64, max_memory_usage_for_all_queries, 0, "Maximum memory usage for processing all concurrently running queries on the server. Zero means unlimited.", 0) \
|
||||
M(SettingUInt64, memory_profiler_step, 0, "Every number of bytes the memory profiler will dump the allocating stacktrace. Zero means disabled memory profiler.", 0) \
|
||||
M(SettingUInt64, memory_profiler_step, 0, "Every number of bytes the memory profiler will collect the allocating stack trace. The minimal effective step is 4 MiB (less values will work as clamped to 4 MiB). Zero means disabled memory profiler.", 0) \
|
||||
\
|
||||
M(SettingUInt64, max_network_bandwidth, 0, "The maximum speed of data exchange over the network in bytes per second for a query. Zero means unlimited.", 0) \
|
||||
M(SettingUInt64, max_network_bytes, 0, "The maximum number of bytes (compressed) to receive or transmit over the network for execution of the query.", 0) \
|
||||
|
@ -391,7 +391,6 @@ void SettingEnum<EnumType, Tag>::set(const Field & x)
|
||||
}
|
||||
|
||||
|
||||
|
||||
String SettingURI::toString() const
|
||||
{
|
||||
return value.toString();
|
||||
|
@ -15,7 +15,6 @@ struct TypePair
|
||||
};
|
||||
|
||||
|
||||
|
||||
template <typename T, bool _int, bool _float, bool _decimal, bool _datetime, typename F>
|
||||
bool callOnBasicType(TypeIndex number, F && f)
|
||||
{
|
||||
|
@ -1,7 +1,7 @@
|
||||
#include <Common/typeid_cast.h>
|
||||
#include <Functions/FunctionHelpers.h>
|
||||
#include <Interpreters/ExpressionActions.h>
|
||||
#include <Interpreters/evaluateMissingDefaults.h>
|
||||
#include <Interpreters/inplaceBlockConversions.h>
|
||||
#include <DataStreams/AddingDefaultsBlockInputStream.h>
|
||||
|
||||
#include <Columns/ColumnsNumber.h>
|
||||
@ -56,11 +56,20 @@ Block AddingDefaultsBlockInputStream::readImpl()
|
||||
if (block_missing_values.empty())
|
||||
return res;
|
||||
|
||||
/// res block alredy has all columns values, with default value for type
|
||||
/// (not value specified in table). We identify which columns we need to
|
||||
/// recalculate with help of block_missing_values.
|
||||
Block evaluate_block{res};
|
||||
/// remove columns for recalculation
|
||||
for (const auto & column : column_defaults)
|
||||
{
|
||||
if (evaluate_block.has(column.first))
|
||||
evaluate_block.erase(column.first);
|
||||
{
|
||||
size_t column_idx = res.getPositionByName(column.first);
|
||||
if (block_missing_values.hasDefaultBits(column_idx))
|
||||
evaluate_block.erase(column.first);
|
||||
}
|
||||
}
|
||||
|
||||
if (!evaluate_block.columns())
|
||||
evaluate_block.insert({ColumnConst::create(ColumnUInt8::create(1, 0), res.rows()), std::make_shared<DataTypeUInt8>(), "_dummy"});
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user