Remove inherited create() method + disallow copying

The original motivation for this commit was that shared_ptr_helper used
std::shared_ptr<>() which does two heap allocations instead of
make_shared<>() which does a single allocation. Turned out that
1. the affected code (--> Storages/) is not on a hot path (rendering the
performance argument moot ...)
2. yet copying Storage objects is potentially dangerous and was
   previously allowed.

Hence, this change

- removes shared_ptr_helper and as a result all inherited create() methods,

- instead, Storage objects are now created using make_shared<>() by the
  caller (for that to work, many constructors had to be made public), and

- all Storage classes were marked as noncopyable using boost::noncopyable.

In sum, we are (likely) not making things faster but the code becomes
cleaner and harder to misuse.
This commit is contained in:
Robert Schulze 2022-04-19 22:47:29 +02:00 committed by Robert Schulze
parent 9621d44345
commit 330212e0f4
No known key found for this signature in database
GPG Key ID: 6501C40A34BE4E0E
202 changed files with 844 additions and 1055 deletions

View File

@ -1,36 +0,0 @@
#pragma once
#include <memory>
/** Allows to make std::shared_ptr from T with protected constructor.
*
* Derive your T class from shared_ptr_helper<T> and add shared_ptr_helper<T> as a friend
* and you will have static 'create' method in your class.
*/
template <typename T>
struct shared_ptr_helper
{
template <typename... TArgs>
static std::shared_ptr<T> create(TArgs &&... args)
{
return std::shared_ptr<T>(new T(std::forward<TArgs>(args)...));
}
};
template <typename T>
struct is_shared_ptr
{
static constexpr bool value = false;
};
template <typename T>
struct is_shared_ptr<std::shared_ptr<T>>
{
static constexpr bool value = true;
};
template <typename T>
inline constexpr bool is_shared_ptr_v = is_shared_ptr<T>::value;

View File

@ -5,7 +5,6 @@
#include <Interpreters/ClientInfo.h>
#include <Core/UUID.h>
#include <base/scope_guard.h>
#include <base/shared_ptr_helper.h>
#include <boost/container/flat_set.hpp>
#include <mutex>
#include <optional>

View File

@ -1146,7 +1146,7 @@ void ClientBase::sendData(Block & sample, const ColumnsDescription & columns_des
ConstraintsDescription{},
String{},
};
StoragePtr storage = StorageFile::create(in_file, global_context->getUserFilesPath(), args);
StoragePtr storage = std::make_shared<StorageFile>(in_file, global_context->getUserFilesPath(), args);
storage->startup();
SelectQueryInfo query_info;

View File

@ -6,7 +6,6 @@
#include <memory>
#include <string>
#include <base/shared_ptr_helper.h>
#include <Common/Exception.h>
#include <base/demangle.h>
@ -60,9 +59,26 @@ To typeid_cast(From * from)
}
}
namespace detail
{
template <typename T>
struct is_shared_ptr : std::false_type
{
};
template <typename T>
struct is_shared_ptr<std::shared_ptr<T>> : std::true_type
{
};
template <typename T>
inline constexpr bool is_shared_ptr_v = is_shared_ptr<T>::value;
}
template <typename To, typename From>
requires is_shared_ptr_v<To>
requires detail::is_shared_ptr_v<To>
To typeid_cast(const std::shared_ptr<From> & from)
{
try

View File

@ -56,7 +56,7 @@ void MySQLClient::connect()
in = std::make_shared<ReadBufferFromPocoSocket>(*socket);
out = std::make_shared<WriteBufferFromPocoSocket>(*socket);
packet_endpoint = MySQLProtocol::PacketEndpoint::create(*in, *out, sequence_id);
packet_endpoint = std::make_shared<MySQLProtocol::PacketEndpoint>(*in, *out, sequence_id);
handshake();
}

View File

@ -56,7 +56,7 @@ private:
std::shared_ptr<WriteBuffer> out;
std::unique_ptr<Poco::Net::StreamSocket> socket;
std::optional<Poco::Net::SocketAddress> address;
std::shared_ptr<PacketEndpoint> packet_endpoint;
MySQLProtocol::PacketEndpointPtr packet_endpoint;
void handshake();
void registerSlaveOnMaster(UInt32 slave_id);

View File

@ -1,11 +1,11 @@
#pragma once
#include <boost/noncopyable.hpp>
#include <IO/ReadBuffer.h>
#include <IO/WriteBuffer.h>
#include "IMySQLReadPacket.h"
#include "IMySQLWritePacket.h"
#include "IO/MySQLPacketPayloadReadBuffer.h"
#include <base/shared_ptr_helper.h>
namespace DB
{
@ -16,13 +16,19 @@ namespace MySQLProtocol
/* Writes and reads packets, keeping sequence-id.
* Throws ProtocolError, if packet with incorrect sequence-id was received.
*/
class PacketEndpoint : public shared_ptr_helper<PacketEndpoint>
class PacketEndpoint : boost::noncopyable
{
public:
uint8_t & sequence_id;
ReadBuffer * in;
WriteBuffer * out;
/// For writing.
PacketEndpoint(WriteBuffer & out_, uint8_t & sequence_id_);
/// For reading and writing.
PacketEndpoint(ReadBuffer & in_, WriteBuffer & out_, uint8_t & sequence_id_);
MySQLPacketPayloadReadBuffer getPayload();
void receivePacket(IMySQLReadPacket & packet);
@ -43,15 +49,6 @@ public:
/// Converts packet to text. Is used for debug output.
static String packetToText(const String & payload);
protected:
/// For writing.
PacketEndpoint(WriteBuffer & out_, uint8_t & sequence_id_);
/// For reading and writing.
PacketEndpoint(ReadBuffer & in_, WriteBuffer & out_, uint8_t & sequence_id_);
friend struct shared_ptr_helper<PacketEndpoint>;
};
using PacketEndpointPtr = std::shared_ptr<PacketEndpoint>;

View File

@ -31,7 +31,7 @@ namespace
DictionaryStructure dictionary_structure = ExternalDictionariesLoader::getDictionaryStructure(*load_result.config);
auto comment = load_result.config->config->getString("dictionary.comment", "");
return StorageDictionary::create(
return std::make_shared<StorageDictionary>(
StorageID(database_name, load_result.name),
load_result.name,
dictionary_structure,

View File

@ -137,7 +137,7 @@ StoragePtr DatabaseMaterializedMySQL::tryGetTable(const String & name, ContextPt
if (context_->isInternalQuery())
return nested_storage;
if (nested_storage)
return std::make_shared<StorageMaterializedMySQL>(std::move(nested_storage), this);
return StorageMaterializedMySQL::create(std::move(nested_storage), this);
return nullptr;
}

View File

@ -23,7 +23,7 @@ public:
const StoragePtr & table() const override
{
StoragePtr storage = std::make_shared<StorageMaterializedMySQL>(nested_iterator->table(), database);
StoragePtr storage = StorageMaterializedMySQL::create(nested_iterator->table(), database);
return tables.emplace_back(storage);
}

View File

@ -94,12 +94,12 @@ void DatabaseMaterializedPostgreSQL::startSynchronization()
if (storage)
{
/// Nested table was already created and synchronized.
storage = StorageMaterializedPostgreSQL::create(storage, getContext(), remote_database_name, table_name);
storage = std::make_shared<StorageMaterializedPostgreSQL>(storage, getContext(), remote_database_name, table_name);
}
else
{
/// Nested table does not exist and will be created by replication thread.
storage = StorageMaterializedPostgreSQL::create(StorageID(database_name, table_name), getContext(), remote_database_name, table_name);
storage = std::make_shared<StorageMaterializedPostgreSQL>(StorageID(database_name, table_name), getContext(), remote_database_name, table_name);
}
/// Cache MaterializedPostgreSQL wrapper over nested table.
@ -210,7 +210,7 @@ ASTPtr DatabaseMaterializedPostgreSQL::getCreateTableQueryImpl(const String & ta
std::lock_guard lock(handler_mutex);
auto storage = StorageMaterializedPostgreSQL::create(StorageID(database_name, table_name), getContext(), remote_database_name, table_name);
auto storage = std::make_shared<StorageMaterializedPostgreSQL>(StorageID(database_name, table_name), getContext(), remote_database_name, table_name);
auto ast_storage = replication_handler->getCreateNestedTableQuery(storage.get(), table_name);
assert_cast<ASTCreateQuery *>(ast_storage.get())->uuid = UUIDHelpers::generateV4();
return ast_storage;
@ -291,7 +291,7 @@ void DatabaseMaterializedPostgreSQL::attachTable(ContextPtr context_, const Stri
InterpreterAlterQuery(alter_query, current_context).execute();
auto storage = StorageMaterializedPostgreSQL::create(table, getContext(), remote_database_name, table_name);
auto storage = std::make_shared<StorageMaterializedPostgreSQL>(table, getContext(), remote_database_name, table_name);
materialized_tables[table_name] = storage;
std::lock_guard lock(handler_mutex);

View File

@ -187,7 +187,7 @@ StoragePtr DatabasePostgreSQL::fetchTable(const String & table_name, ContextPtr,
if (!columns_info)
return StoragePtr{};
auto storage = StoragePostgreSQL::create(
auto storage = std::make_shared<StoragePostgreSQL>(
StorageID(database_name, table_name), pool, table_name,
ColumnsDescription{columns_info->columns}, ConstraintsDescription{}, String{}, configuration.schema, configuration.on_conflict);

View File

@ -145,7 +145,7 @@ StoragePtr DatabaseSQLite::fetchTable(const String & table_name, ContextPtr loca
if (!columns)
return StoragePtr{};
auto storage = StorageSQLite::create(
auto storage = std::make_shared<StorageSQLite>(
StorageID(database_name, table_name),
sqlite_db,
database_path,

View File

@ -504,7 +504,7 @@ Pipe CacheDictionary<dictionary_key_type>::read(const Names & column_names, size
}
std::shared_ptr<const IDictionary> dictionary = shared_from_this();
auto coordinator = DictionarySourceCoordinator::create(dictionary, column_names, std::move(key_columns), max_block_size);
auto coordinator = std::make_shared<DictionarySourceCoordinator>(dictionary, column_names, std::move(key_columns), max_block_size);
auto result = coordinator->read(num_streams);
return result;

View File

@ -1,5 +1,6 @@
#pragma once
#include <boost/noncopyable.hpp>
#include <memory>
#include <Columns/IColumn.h>
#include <Core/Names.h>
@ -13,18 +14,14 @@ namespace DB
class DictionarySource;
class DictionarySourceCoordinator final : public shared_ptr_helper<DictionarySourceCoordinator>, public std::enable_shared_from_this<DictionarySourceCoordinator>
class DictionarySourceCoordinator final : public std::enable_shared_from_this<DictionarySourceCoordinator>
, private boost::noncopyable
{
friend struct shared_ptr_helper<DictionarySourceCoordinator>;
public:
using ReadColumnsFunc = std::function<Columns (const Strings &, const DataTypes &, const Columns &, const DataTypes &, const Columns &)>;
Pipe read(size_t num_streams);
private:
explicit DictionarySourceCoordinator(
std::shared_ptr<const IDictionary> dictionary_,
const Names & column_names,
@ -85,6 +82,8 @@ private:
initialize(column_names);
}
private:
friend class DictionarySource;
bool getKeyColumnsNextRangeToRead(ColumnsWithTypeAndName & key_columns, ColumnsWithTypeAndName & data_columns);

View File

@ -573,7 +573,7 @@ Pipe FlatDictionary::read(const Names & column_names, size_t max_block_size, siz
ColumnsWithTypeAndName key_columns = {ColumnWithTypeAndName(keys_column, std::make_shared<DataTypeUInt64>(), dict_struct.id->name)};
std::shared_ptr<const IDictionary> dictionary = shared_from_this();
auto coordinator = DictionarySourceCoordinator::create(dictionary, column_names, std::move(key_columns), max_block_size);
auto coordinator =std::make_shared<DictionarySourceCoordinator>(dictionary, column_names, std::move(key_columns), max_block_size);
auto result = coordinator->read(num_streams);
return result;

View File

@ -758,7 +758,7 @@ Pipe HashedArrayDictionary<dictionary_key_type>::read(const Names & column_names
}
std::shared_ptr<const IDictionary> dictionary = shared_from_this();
auto coordinator = DictionarySourceCoordinator::create(dictionary, column_names, std::move(key_columns), max_block_size);
auto coordinator = std::make_shared<DictionarySourceCoordinator>(dictionary, column_names, std::move(key_columns), max_block_size);
auto result = coordinator->read(num_streams);
return result;

View File

@ -745,7 +745,7 @@ Pipe HashedDictionary<dictionary_key_type, sparse>::read(const Names & column_na
}
std::shared_ptr<const IDictionary> dictionary = shared_from_this();
auto coordinator = DictionarySourceCoordinator::create(dictionary, column_names, std::move(key_columns), max_block_size);
auto coordinator = std::make_shared<DictionarySourceCoordinator>(dictionary, column_names, std::move(key_columns), max_block_size);
auto result = coordinator->read(num_streams);
return result;

View File

@ -873,7 +873,7 @@ Pipe IPAddressDictionary::read(const Names & column_names, size_t max_block_size
}
std::shared_ptr<const IDictionary> dictionary = shared_from_this();
auto coordinator = DictionarySourceCoordinator::create(dictionary, column_names, std::move(key_columns_with_type), std::move(view_columns), max_block_size);
auto coordinator = std::make_shared<DictionarySourceCoordinator>(dictionary, column_names, std::move(key_columns_with_type), std::move(view_columns), max_block_size);
auto result = coordinator->read(num_streams);
return result;

View File

@ -1005,7 +1005,7 @@ Pipe RangeHashedDictionary<dictionary_key_type>::read(const Names & column_names
return result;
};
auto coordinator = DictionarySourceCoordinator::create(
auto coordinator = std::make_shared<DictionarySourceCoordinator>(
dictionary,
column_names,
std::move(key_columns),

View File

@ -3,8 +3,8 @@
#include <Common/config.h>
#if USE_MINIZIP
#include <boost/noncopyable.hpp>
#include <IO/Archives/IArchiveReader.h>
#include <base/shared_ptr_helper.h>
#include <mutex>
#include <vector>
@ -16,9 +16,16 @@ class ReadBufferFromFileBase;
class SeekableReadBuffer;
/// Implementation of IArchiveReader for reading zip archives.
class ZipArchiveReader : public shared_ptr_helper<ZipArchiveReader>, public IArchiveReader
class ZipArchiveReader : public IArchiveReader, boost::noncopyable
{
public:
/// Constructs an archive's reader that will read from a file in the local filesystem.
explicit ZipArchiveReader(const String & path_to_archive_);
/// Constructs an archive's reader that will read by making a read buffer by using
/// a specified function.
ZipArchiveReader(const String & path_to_archive_, const ReadArchiveFunction & archive_read_function_, UInt64 archive_size_);
~ZipArchiveReader() override;
/// Returns true if there is a specified file in the archive.
@ -43,14 +50,6 @@ public:
void setPassword(const String & password_) override;
private:
/// Constructs an archive's reader that will read from a file in the local filesystem.
explicit ZipArchiveReader(const String & path_to_archive_);
/// Constructs an archive's reader that will read by making a read buffer by using
/// a specified function.
ZipArchiveReader(const String & path_to_archive_, const ReadArchiveFunction & archive_read_function_, UInt64 archive_size_);
friend struct shared_ptr_helper<ZipArchiveReader>;
class ReadBufferFromZipArchive;
class FileEnumeratorImpl;
class HandleHolder;

View File

@ -3,8 +3,8 @@
#include <Common/config.h>
#if USE_MINIZIP
#include <boost/noncopyable.hpp>
#include <IO/Archives/IArchiveWriter.h>
#include <base/shared_ptr_helper.h>
#include <mutex>
@ -14,9 +14,15 @@ class WriteBuffer;
class WriteBufferFromFileBase;
/// Implementation of IArchiveWriter for writing zip archives.
class ZipArchiveWriter : public shared_ptr_helper<ZipArchiveWriter>, public IArchiveWriter
class ZipArchiveWriter : public IArchiveWriter, boost::noncopyable
{
public:
/// Constructs an archive that will be written as a file in the local filesystem.
explicit ZipArchiveWriter(const String & path_to_archive_);
/// Constructs an archive that will be written by using a specified `archive_write_buffer_`.
ZipArchiveWriter(const String & path_to_archive_, std::unique_ptr<WriteBuffer> archive_write_buffer_);
/// Destructors finalizes writing the archive.
~ZipArchiveWriter() override;
@ -63,13 +69,6 @@ public:
static void checkEncryptionIsEnabled();
private:
/// Constructs an archive that will be written as a file in the local filesystem.
explicit ZipArchiveWriter(const String & path_to_archive_);
/// Constructs an archive that will be written by using a specified `archive_write_buffer_`.
ZipArchiveWriter(const String & path_to_archive_, std::unique_ptr<WriteBuffer> archive_write_buffer_);
friend struct shared_ptr_helper<ZipArchiveWriter>;
class WriteBufferFromZipArchive;
class HandleHolder;
using RawHandle = void *;

View File

@ -26,7 +26,7 @@ std::shared_ptr<IArchiveReader> createArchiveReader(
if (path_to_archive.ends_with(".zip") || path_to_archive.ends_with(".zipx"))
{
#if USE_MINIZIP
return ZipArchiveReader::create(path_to_archive, archive_read_function, archive_size);
return std::make_shared<ZipArchiveReader>(path_to_archive, archive_read_function, archive_size);
#else
throw Exception("minizip library is disabled", ErrorCodes::SUPPORT_IS_DISABLED);
#endif

View File

@ -26,7 +26,7 @@ std::shared_ptr<IArchiveWriter> createArchiveWriter(
if (path_to_archive.ends_with(".zip") || path_to_archive.ends_with(".zipx"))
{
#if USE_MINIZIP
return ZipArchiveWriter::create(path_to_archive, std::move(archive_write_buffer));
return std::make_shared<ZipArchiveWriter>(path_to_archive, std::move(archive_write_buffer));
#else
throw Exception("minizip library is disabled", ErrorCodes::SUPPORT_IS_DISABLED);
#endif

View File

@ -3314,7 +3314,7 @@ void Context::initializeBackgroundExecutorsIfNeeded()
background_common_pool_size = config.getUInt64("profiles.default.background_common_pool_size");
/// With this executor we can execute more tasks than threads we have
shared->merge_mutate_executor = MergeMutateBackgroundExecutor::create
shared->merge_mutate_executor = std::make_shared<MergeMutateBackgroundExecutor>
(
"MergeMutate",
/*max_threads_count*/background_pool_size,
@ -3324,7 +3324,7 @@ void Context::initializeBackgroundExecutorsIfNeeded()
LOG_INFO(shared->log, "Initialized background executor for merges and mutations with num_threads={}, num_tasks={}",
background_pool_size, background_pool_size * background_merges_mutations_concurrency_ratio);
shared->moves_executor = OrdinaryBackgroundExecutor::create
shared->moves_executor = std::make_shared<OrdinaryBackgroundExecutor>
(
"Move",
background_move_pool_size,
@ -3333,7 +3333,7 @@ void Context::initializeBackgroundExecutorsIfNeeded()
);
LOG_INFO(shared->log, "Initialized background executor for move operations with num_threads={}, num_tasks={}", background_move_pool_size, background_move_pool_size);
shared->fetch_executor = OrdinaryBackgroundExecutor::create
shared->fetch_executor = std::make_shared<OrdinaryBackgroundExecutor>
(
"Fetch",
background_fetches_pool_size,
@ -3342,7 +3342,7 @@ void Context::initializeBackgroundExecutorsIfNeeded()
);
LOG_INFO(shared->log, "Initialized background executor for fetches with num_threads={}, num_tasks={}", background_fetches_pool_size, background_fetches_pool_size);
shared->common_executor = OrdinaryBackgroundExecutor::create
shared->common_executor = std::make_shared<OrdinaryBackgroundExecutor>
(
"Common",
background_common_pool_size,

View File

@ -92,7 +92,7 @@ TemporaryTableHolder::TemporaryTableHolder(
context_,
[&](const StorageID & table_id)
{
auto storage = StorageMemory::create(table_id, ColumnsDescription{columns}, ConstraintsDescription{constraints}, String{});
auto storage = std::make_shared<StorageMemory>(table_id, ColumnsDescription{columns}, ConstraintsDescription{constraints}, String{});
if (create_for_global_subquery)
storage->delayReadForGlobalSubqueries();
@ -250,7 +250,7 @@ DatabaseAndTable DatabaseCatalog::getTableImpl(
/// It's definitely not the best place for this logic, but behaviour must be consistent with DatabaseMaterializedMySQL::tryGetTable(...)
if (!context_->isInternalQuery() && db_and_table.first->getEngineName() == "MaterializedMySQL")
{
db_and_table.second = std::make_shared<StorageMaterializedMySQL>(std::move(db_and_table.second), db_and_table.first.get());
db_and_table.second = StorageMaterializedMySQL::create(std::move(db_and_table.second), db_and_table.first.get());
}
#endif
return db_and_table;

View File

@ -32,7 +32,7 @@ MySQLOutputFormat::MySQLOutputFormat(WriteBuffer & out_, const Block & header_,
for (const auto & type : data_types)
serializations.emplace_back(type->getDefaultSerialization());
packet_endpoint = MySQLProtocol::PacketEndpoint::create(out, *sequence_id);
packet_endpoint = std::make_shared<MySQLProtocol::PacketEndpoint>(out, *sequence_id);
}
void MySQLOutputFormat::setContext(ContextPtr context_)

View File

@ -420,7 +420,7 @@ static QueryPipeline process(Block block, ViewRuntimeData & view, const ViewsDat
/// but it will contain single block (that is INSERT-ed into main table).
/// InterpreterSelectQuery will do processing of alias columns.
auto local_context = Context::createCopy(context);
local_context->addViewSource(StorageValues::create(
local_context->addViewSource(std::make_shared<StorageValues>(
views_data.source_storage_id,
views_data.source_metadata_snapshot->getColumns(),
std::move(block),

View File

@ -94,7 +94,7 @@ void MySQLHandler::run()
in = std::make_shared<ReadBufferFromPocoSocket>(socket());
out = std::make_shared<WriteBufferFromPocoSocket>(socket());
packet_endpoint = MySQLProtocol::PacketEndpoint::create(*in, *out, sequence_id);
packet_endpoint = std::make_shared<MySQLProtocol::PacketEndpoint>(*in, *out, sequence_id);
try
{
@ -403,7 +403,7 @@ void MySQLHandlerSSL::finishHandshakeSSL(
in = std::make_shared<ReadBufferFromPocoSocket>(*ss);
out = std::make_shared<WriteBufferFromPocoSocket>(*ss);
sequence_id = 2;
packet_endpoint = MySQLProtocol::PacketEndpoint::create(*in, *out, sequence_id);
packet_endpoint = std::make_shared<MySQLProtocol::PacketEndpoint>(*in, *out, sequence_id);
packet_endpoint->receivePacket(packet); /// Reading HandshakeResponse from secure socket.
}

View File

@ -808,7 +808,7 @@ void registerStorageFileLog(StorageFactory & factory)
auto path = path_ast->as<ASTLiteral &>().value.safeGet<String>();
auto format = format_ast->as<ASTLiteral &>().value.safeGet<String>();
return StorageFileLog::create(
return std::make_shared<StorageFileLog>(
args.table_id,
args.getContext(),
args.columns,

View File

@ -1,5 +1,6 @@
#pragma once
#include <boost/noncopyable.hpp>
#include <Storages/FileLog/Buffer_fwd.h>
#include <Storages/FileLog/FileLogDirectoryWatcher.h>
#include <Storages/FileLog/FileLogSettings.h>
@ -8,8 +9,6 @@
#include <Storages/IStorage.h>
#include <Common/SettingsChanges.h>
#include <base/shared_ptr_helper.h>
#include <atomic>
#include <condition_variable>
#include <filesystem>
@ -26,11 +25,19 @@ namespace ErrorCodes
class FileLogDirectoryWatcher;
class StorageFileLog final : public shared_ptr_helper<StorageFileLog>, public IStorage, WithContext
class StorageFileLog final : public IStorage, WithContext, boost::noncopyable
{
friend struct shared_ptr_helper<StorageFileLog>;
public:
StorageFileLog(
const StorageID & table_id_,
ContextPtr context_,
const ColumnsDescription & columns_,
const String & path_,
const String & metadata_base_path_,
const String & format_name_,
std::unique_ptr<FileLogSettings> settings,
const String & comment,
bool attach);
using Files = std::vector<String>;
@ -125,18 +132,6 @@ public:
const auto & getFileLogSettings() const { return filelog_settings; }
protected:
StorageFileLog(
const StorageID & table_id_,
ContextPtr context_,
const ColumnsDescription & columns_,
const String & path_,
const String & metadata_base_path_,
const String & format_name_,
std::unique_ptr<FileLogSettings> settings,
const String & comment,
bool attach);
private:
std::unique_ptr<FileLogSettings> filelog_settings;

View File

@ -4,11 +4,11 @@
#if USE_HDFS
#include <boost/noncopyable.hpp>
#include <Processors/Sources/SourceWithProgress.h>
#include <Storages/IStorage.h>
#include <Poco/URI.h>
#include <Common/logger_useful.h>
#include <base/shared_ptr_helper.h>
namespace DB
{
@ -16,10 +16,21 @@ namespace DB
* This class represents table engine for external hdfs files.
* Read method is supported for now.
*/
class StorageHDFS final : public shared_ptr_helper<StorageHDFS>, public IStorage, WithContext
class StorageHDFS final : public IStorage, WithContext, boost::noncopyable
{
friend struct shared_ptr_helper<StorageHDFS>;
public:
StorageHDFS(
const String & uri_,
const StorageID & table_id_,
const String & format_name_,
const ColumnsDescription & columns_,
const ConstraintsDescription & constraints_,
const String & comment,
ContextPtr context_,
const String & compression_method_ = "",
bool distributed_processing_ = false,
ASTPtr partition_by = nullptr);
String getName() const override { return "HDFS"; }
Pipe read(
@ -57,17 +68,6 @@ public:
protected:
friend class HDFSSource;
StorageHDFS(
const String & uri_,
const StorageID & table_id_,
const String & format_name_,
const ColumnsDescription & columns_,
const ConstraintsDescription & constraints_,
const String & comment,
ContextPtr context_,
const String & compression_method_ = "",
bool distributed_processing_ = false,
ASTPtr partition_by = nullptr);
private:
std::vector<const String> uris;

View File

@ -7,8 +7,7 @@
#include <memory>
#include <optional>
#include <base/shared_ptr_helper.h>
#include <boost/noncopyable.hpp>
#include <Client/Connection.h>
#include <Interpreters/Cluster.h>
#include <Storages/HDFS/StorageHDFS.h>
@ -18,10 +17,18 @@ namespace DB
class Context;
class StorageHDFSCluster : public shared_ptr_helper<StorageHDFSCluster>, public IStorage
class StorageHDFSCluster : public IStorage, boost::noncopyable
{
friend struct shared_ptr_helper<StorageHDFSCluster>;
public:
StorageHDFSCluster(
String cluster_name_,
const String & uri_,
const StorageID & table_id_,
const String & format_name_,
const ColumnsDescription & columns_,
const ConstraintsDescription & constraints_,
const String & compression_method_);
std::string getName() const override { return "HDFSCluster"; }
Pipe read(const Names &, const StorageSnapshotPtr &, SelectQueryInfo &,
@ -32,16 +39,6 @@ public:
NamesAndTypesList getVirtuals() const override;
protected:
StorageHDFSCluster(
String cluster_name_,
const String & uri_,
const StorageID & table_id_,
const String & format_name_,
const ColumnsDescription & columns_,
const ConstraintsDescription & constraints_,
const String & compression_method_);
private:
String cluster_name;
String uri;

View File

@ -7,8 +7,8 @@
#include <Poco/URI.h>
#include <ThriftHiveMetastore.h>
#include <boost/noncopyable.hpp>
#include <Common/logger_useful.h>
#include <base/shared_ptr_helper.h>
#include <Interpreters/Context.h>
#include <Storages/IStorage.h>
#include <Storages/HDFS/HDFSCommon.h>
@ -23,10 +23,23 @@ class HiveSettings;
* This class represents table engine for external hdfs files.
* Read method is supported for now.
*/
class StorageHive final : public shared_ptr_helper<StorageHive>, public IStorage, WithContext
class StorageHive final : public IStorage, WithContext, boost::noncopyable
{
friend struct shared_ptr_helper<StorageHive>;
public:
friend class StorageHiveSource;
StorageHive(
const String & hive_metastore_url_,
const String & hive_database_,
const String & hive_table_,
const StorageID & table_id_,
const ColumnsDescription & columns_,
const ConstraintsDescription & constraints_,
const String & comment_,
const ASTPtr & partition_by_ast_,
std::unique_ptr<HiveSettings> storage_settings_,
ContextPtr context_);
String getName() const override { return "Hive"; }
bool supportsIndexForIn() const override { return true; }
@ -56,20 +69,6 @@ public:
std::optional<UInt64> totalRows(const Settings & settings) const override;
std::optional<UInt64> totalRowsByPartitionPredicate(const SelectQueryInfo & query_info, ContextPtr context_) const override;
protected:
friend class StorageHiveSource;
StorageHive(
const String & hive_metastore_url_,
const String & hive_database_,
const String & hive_table_,
const StorageID & table_id_,
const ColumnsDescription & columns_,
const ConstraintsDescription & constraints_,
const String & comment_,
const ASTPtr & partition_by_ast_,
std::unique_ptr<HiveSettings> storage_settings_,
ContextPtr context_);
private:
using FileFormat = IHiveFile::FileFormat;
using FileInfo = HiveMetastoreClient::FileInfo;

View File

@ -842,7 +842,7 @@ void registerStorageKafka(StorageFactory & factory)
throw Exception("kafka_poll_max_batch_size can not be lower than 1", ErrorCodes::BAD_ARGUMENTS);
}
return StorageKafka::create(args.table_id, args.getContext(), args.columns, std::move(kafka_settings), collection_name);
return std::make_shared<StorageKafka>(args.table_id, args.getContext(), args.columns, std::move(kafka_settings), collection_name);
};
factory.registerStorage("Kafka", creator_fn, StorageFactory::StorageFeatures{ .supports_settings = true, });

View File

@ -1,5 +1,6 @@
#pragma once
#include <boost/noncopyable.hpp>
#include <Core/BackgroundSchedulePool.h>
#include <Storages/IStorage.h>
#include <Storages/Kafka/Buffer_fwd.h>
@ -7,7 +8,6 @@
#include <Common/SettingsChanges.h>
#include <Poco/Semaphore.h>
#include <base/shared_ptr_helper.h>
#include <mutex>
#include <list>
@ -28,12 +28,18 @@ struct StorageKafkaInterceptors;
/** Implements a Kafka queue table engine that can be used as a persistent queue / buffer,
* or as a basic building block for creating pipelines with a continuous insertion / ETL.
*/
class StorageKafka final : public shared_ptr_helper<StorageKafka>, public IStorage, WithContext
class StorageKafka final : public IStorage, WithContext, boost::noncopyable
{
friend struct shared_ptr_helper<StorageKafka>;
friend struct StorageKafkaInterceptors;
public:
StorageKafka(
const StorageID & table_id_,
ContextPtr context_,
const ColumnsDescription & columns_,
std::unique_ptr<KafkaSettings> kafka_settings_,
const String & collection_name_);
std::string getName() const override { return "Kafka"; }
bool noPushingToViews() const override { return true; }
@ -66,13 +72,6 @@ public:
NamesAndTypesList getVirtuals() const override;
Names getVirtualColumnNames() const;
HandleKafkaErrorMode getHandleKafkaErrorMode() const { return kafka_settings->kafka_handle_error_mode; }
protected:
StorageKafka(
const StorageID & table_id_,
ContextPtr context_,
const ColumnsDescription & columns_,
std::unique_ptr<KafkaSettings> kafka_settings_,
const String & collection_name_);
private:
// Configuration and state
@ -117,7 +116,7 @@ private:
std::list<std::shared_ptr<ThreadStatus>> thread_statuses;
/// Handle error mode
HandleKafkaErrorMode handle_error_mode;
// HandleKafkaErrorMode handle_error_mode; // NOLINT -- actually not used, needs checing
SettingsChanges createSettingsAdjustments();
ConsumerBufferPtr createReadBuffer(size_t consumer_number);

View File

@ -625,7 +625,7 @@ void registerStorageLiveView(StorageFactory & factory)
"Experimental LIVE VIEW feature is not enabled (the setting 'allow_experimental_live_view')",
ErrorCodes::SUPPORT_IS_DISABLED);
return StorageLiveView::create(args.table_id, args.getLocalContext(), args.query, args.columns, args.comment);
return std::make_shared<StorageLiveView>(args.table_id, args.getLocalContext(), args.query, args.columns, args.comment);
});
}

View File

@ -11,7 +11,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <base/shared_ptr_helper.h>
#include <boost/noncopyable.hpp>
#include <Storages/IStorage.h>
#include <Core/BackgroundSchedulePool.h>
@ -49,14 +49,20 @@ class Pipe;
using Pipes = std::vector<Pipe>;
class StorageLiveView final : public shared_ptr_helper<StorageLiveView>, public IStorage, WithContext
class StorageLiveView final : public IStorage, WithContext, boost::noncopyable
{
friend struct shared_ptr_helper<StorageLiveView>;
friend class LiveViewSource;
friend class LiveViewEventsSource;
friend class LiveViewSink;
public:
StorageLiveView(
const StorageID & table_id_,
ContextPtr context_,
const ASTCreateQuery & query,
const ColumnsDescription & columns,
const String & comment);
~StorageLiveView() override;
String getName() const override { return "LiveView"; }
bool isView() const override { return true; }
@ -227,13 +233,6 @@ private:
/// Must be called with mutex locked
void scheduleNextPeriodicRefresh();
StorageLiveView(
const StorageID & table_id_,
ContextPtr context_,
const ASTCreateQuery & query,
const ColumnsDescription & columns,
const String & comment);
};
}

View File

@ -3,7 +3,7 @@
#include <memory>
#include <functional>
#include <base/shared_ptr_helper.h>
#include <boost/noncopyable.hpp>
#include <Interpreters/StorageID.h>
namespace DB
@ -42,10 +42,9 @@ using ExecutableTaskPtr = std::shared_ptr<IExecutableTask>;
/**
* Some background operations won't represent a coroutines (don't want to be executed step-by-step). For this we have this wrapper.
*/
class ExecutableLambdaAdapter : public shared_ptr_helper<ExecutableLambdaAdapter>, public IExecutableTask
class ExecutableLambdaAdapter : public IExecutableTask, boost::noncopyable
{
public:
template <typename Job, typename Callback>
explicit ExecutableLambdaAdapter(
Job && job_to_execute_,

View File

@ -3,6 +3,7 @@
#include <memory>
#include <utility>
#include <boost/noncopyable.hpp>
#include <Storages/MergeTree/IExecutableTask.h>
#include <Storages/MergeTree/MergeTask.h>
#include <Storages/MergeTree/ReplicatedMergeTreeQueue.h>
@ -14,7 +15,7 @@
namespace DB
{
class MergeFromLogEntryTask : public shared_ptr_helper<MergeFromLogEntryTask>, public ReplicatedMergeMutateTaskBase
class MergeFromLogEntryTask : public ReplicatedMergeMutateTaskBase, boost::noncopyable
{
public:
template <class Callback>

View File

@ -10,8 +10,8 @@
#include <iostream>
#include <boost/circular_buffer.hpp>
#include <boost/noncopyable.hpp>
#include <base/shared_ptr_helper.h>
#include <Common/logger_useful.h>
#include <Common/ThreadPool.h>
#include <Common/Stopwatch.h>
@ -156,7 +156,7 @@ private:
* So, when a Storage want to shutdown, it must wait until all its background operaions are finished.
*/
template <class Queue>
class MergeTreeBackgroundExecutor final : public shared_ptr_helper<MergeTreeBackgroundExecutor<Queue>>
class MergeTreeBackgroundExecutor final : boost::noncopyable
{
public:
MergeTreeBackgroundExecutor(

View File

@ -6185,7 +6185,7 @@ bool MergeTreeData::scheduleDataMovingJob(BackgroundJobsAssignee & assignee)
if (moving_tagger->parts_to_move.empty())
return false;
assignee.scheduleMoveTask(ExecutableLambdaAdapter::create(
assignee.scheduleMoveTask(std::make_shared<ExecutableLambdaAdapter>(
[this, moving_tagger] () mutable
{
return moveParts(moving_tagger);

View File

@ -223,7 +223,7 @@ QueryPlanPtr MergeTreeDataSelectExecutor::read(
if (query_info.projection->merge_tree_normal_select_result_ptr)
{
auto storage_from_base_parts_of_projection
= StorageFromMergeTreeDataPart::create(data, query_info.projection->merge_tree_normal_select_result_ptr);
= std::make_shared<StorageFromMergeTreeDataPart>(data, query_info.projection->merge_tree_normal_select_result_ptr);
auto interpreter = InterpreterSelectQuery(
query_info.query,
context,

View File

@ -88,7 +88,7 @@ ReplicatedMergeMutateTaskBase::PrepareResult MutateFromLogEntryTask::prepare()
}
new_part_info = MergeTreePartInfo::fromPartName(entry.new_part_name, storage.format_version);
commands = MutationCommands::create(storage.queue.getMutationCommands(source_part, new_part_info.mutation));
commands = std::make_shared<MutationCommands>(storage.queue.getMutationCommands(source_part, new_part_info.mutation));
/// Once we mutate part, we must reserve space on the same disk, because mutations can possibly create hardlinks.
/// Can throw an exception.

View File

@ -1,7 +1,6 @@
#pragma once
#include <base/shared_ptr_helper.h>
#include <boost/noncopyable.hpp>
#include <Storages/MergeTree/IExecutableTask.h>
#include <Storages/MergeTree/MutateTask.h>
#include <Storages/MergeTree/ReplicatedMergeMutateTaskBase.h>
@ -12,7 +11,7 @@
namespace DB
{
class MutateFromLogEntryTask : public shared_ptr_helper<MutateFromLogEntryTask>, public ReplicatedMergeMutateTaskBase
class MutateFromLogEntryTask : public ReplicatedMergeMutateTaskBase, boost::noncopyable
{
public:
template <typename Callback>
@ -22,6 +21,7 @@ public:
Callback && task_result_callback_)
: ReplicatedMergeMutateTaskBase(&Poco::Logger::get("MutateFromLogEntryTask"), storage_, selected_entry_, task_result_callback_) {}
UInt64 getPriority() override { return priority; }
private:

View File

@ -1406,7 +1406,7 @@ bool MutateTask::prepare()
ctx->num_mutations = std::make_unique<CurrentMetrics::Increment>(CurrentMetrics::PartMutation);
ctx->source_part = ctx->future_part->parts[0];
auto storage_from_source_part = StorageFromMergeTreeDataPart::create(ctx->source_part);
auto storage_from_source_part = std::make_shared<StorageFromMergeTreeDataPart>(ctx->source_part);
auto context_for_reading = Context::createCopy(ctx->context);
context_for_reading->setSetting("max_streams_to_max_threads_ratio", 1);

View File

@ -1,5 +1,6 @@
#pragma once
#include <boost/noncopyable.hpp>
#include <Storages/IStorage.h>
#include <Storages/MergeTree/IMergeTreeDataPart.h>
#include <Storages/MergeTree/MergeTreeDataSelectExecutor.h>
@ -9,17 +10,31 @@
#include <QueryPipeline/QueryPipelineBuilder.h>
#include <Core/Defines.h>
#include <base/shared_ptr_helper.h>
namespace DB
{
/// A Storage that allows reading from a single MergeTree data part.
class StorageFromMergeTreeDataPart final : public shared_ptr_helper<StorageFromMergeTreeDataPart>, public IStorage
class StorageFromMergeTreeDataPart final : public IStorage, boost::noncopyable
{
friend struct shared_ptr_helper<StorageFromMergeTreeDataPart>;
public:
/// Used in part mutation.
explicit StorageFromMergeTreeDataPart(const MergeTreeData::DataPartPtr & part_)
: IStorage(getIDFromPart(part_))
, parts({part_})
, storage(part_->storage)
, partition_id(part_->info.partition_id)
{
setInMemoryMetadata(storage.getInMemoryMetadata());
}
/// Used in queries with projection.
StorageFromMergeTreeDataPart(const MergeTreeData & storage_, MergeTreeDataSelectAnalysisResultPtr analysis_result_ptr_)
: IStorage(storage_.getStorageID()), storage(storage_), analysis_result_ptr(analysis_result_ptr_)
{
setInMemoryMetadata(storage.getInMemoryMetadata());
}
String getName() const override { return "FromMergeTreeDataPart"; }
Pipe read(
@ -77,24 +92,6 @@ public:
return parts.front()->storage.getSettings()->materialize_ttl_recalculate_only;
}
protected:
/// Used in part mutation.
explicit StorageFromMergeTreeDataPart(const MergeTreeData::DataPartPtr & part_)
: IStorage(getIDFromPart(part_))
, parts({part_})
, storage(part_->storage)
, partition_id(part_->info.partition_id)
{
setInMemoryMetadata(storage.getInMemoryMetadata());
}
/// Used in queries with projection.
StorageFromMergeTreeDataPart(const MergeTreeData & storage_, MergeTreeDataSelectAnalysisResultPtr analysis_result_ptr_)
: IStorage(storage_.getStorageID()), storage(storage_), analysis_result_ptr(analysis_result_ptr_)
{
setInMemoryMetadata(storage.getInMemoryMetadata());
}
private:
MergeTreeData::DataPartsVector parts;
const MergeTreeData & storage;

View File

@ -672,7 +672,7 @@ static StoragePtr create(const StorageFactory::Arguments & args)
throw Exception("Wrong number of engine arguments.", ErrorCodes::BAD_ARGUMENTS);
if (replicated)
return StorageReplicatedMergeTree::create(
return std::make_shared<StorageReplicatedMergeTree>(
zookeeper_path,
replica_name,
args.attach,
@ -686,7 +686,7 @@ static StoragePtr create(const StorageFactory::Arguments & args)
args.has_force_restore_data_flag,
renaming_restrictions);
else
return StorageMergeTree::create(
return std::make_shared<StorageMergeTree>(
args.table_id,
args.relative_data_path,
metadata,

View File

@ -64,7 +64,7 @@ TEST(Executor, RemoveTasks)
const size_t tasks_kinds = 25;
const size_t batch = 100;
auto executor = DB::OrdinaryBackgroundExecutor::create
auto executor = std::make_shared<DB::OrdinaryBackgroundExecutor>
(
"GTest",
tasks_kinds,
@ -105,7 +105,7 @@ TEST(Executor, RemoveTasksStress)
const size_t schedulers_count = 5;
const size_t removers_count = 5;
auto executor = DB::OrdinaryBackgroundExecutor::create
auto executor = std::make_shared<DB::OrdinaryBackgroundExecutor>
(
"GTest",
tasks_kinds,

View File

@ -5,7 +5,7 @@
#include <memory>
#include <unordered_map>
#include <base/shared_ptr_helper.h>
#include <boost/noncopyable.hpp>
#include <Parsers/ASTAlterQuery.h>
#include <Storages/IStorage_fwd.h>
#include <DataTypes/IDataType.h>
@ -70,7 +70,7 @@ struct MutationCommand
};
/// Multiple mutation commands, possible from different ALTER queries
class MutationCommands : public shared_ptr_helper<MutationCommands>, public std::vector<MutationCommand>
class MutationCommands : public std::vector<MutationCommand>
{
public:
std::shared_ptr<ASTExpressionList> ast() const;

View File

@ -147,7 +147,7 @@ StoragePtr StorageMaterializedPostgreSQL::createTemporary() const
}
auto new_context = Context::createCopy(context);
return StorageMaterializedPostgreSQL::create(tmp_table_id, new_context, "temporary", table_id.table_name);
return std::make_shared<StorageMaterializedPostgreSQL>(tmp_table_id, new_context, "temporary", table_id.table_name);
}
@ -569,7 +569,7 @@ void registerStorageMaterializedPostgreSQL(StorageFactory & factory)
if (has_settings)
postgresql_replication_settings->loadFromQuery(*args.storage_def);
return StorageMaterializedPostgreSQL::create(
return std::make_shared<StorageMaterializedPostgreSQL>(
args.table_id, args.attach, configuration.database, configuration.table, connection_info,
metadata, args.getContext(),
std::move(postgresql_replication_settings));

View File

@ -6,13 +6,13 @@
#include "PostgreSQLReplicationHandler.h"
#include "MaterializedPostgreSQLSettings.h"
#include <boost/noncopyable.hpp>
#include <Parsers/IAST.h>
#include <Parsers/ASTCreateQuery.h>
#include <Parsers/ASTColumnDeclaration.h>
#include <Interpreters/evaluateConstantExpression.h>
#include <Interpreters/InterpreterCreateQuery.h>
#include <Interpreters/ExpressionAnalyzer.h>
#include <base/shared_ptr_helper.h>
#include <memory>
@ -61,10 +61,8 @@ namespace DB
*
**/
class StorageMaterializedPostgreSQL final : public shared_ptr_helper<StorageMaterializedPostgreSQL>, public IStorage, WithContext
class StorageMaterializedPostgreSQL final : public IStorage, WithContext, boost::noncopyable
{
friend struct shared_ptr_helper<StorageMaterializedPostgreSQL>;
public:
StorageMaterializedPostgreSQL(const StorageID & table_id_, ContextPtr context_,
const String & postgres_database_name, const String & postgres_table_name);
@ -72,6 +70,16 @@ public:
StorageMaterializedPostgreSQL(StoragePtr nested_storage_, ContextPtr context_,
const String & postgres_database_name, const String & postgres_table_name);
StorageMaterializedPostgreSQL(
const StorageID & table_id_,
bool is_attach_,
const String & remote_database_name,
const String & remote_table_name,
const postgres::ConnectionInfo & connection_info,
const StorageInMemoryMetadata & storage_metadata,
ContextPtr context_,
std::unique_ptr<MaterializedPostgreSQLSettings> replication_settings);
String getName() const override { return "MaterializedPostgreSQL"; }
void shutdown() override;
@ -122,17 +130,6 @@ public:
bool supportsFinal() const override { return true; }
protected:
StorageMaterializedPostgreSQL(
const StorageID & table_id_,
bool is_attach_,
const String & remote_database_name,
const String & remote_table_name,
const postgres::ConnectionInfo & connection_info,
const StorageInMemoryMetadata & storage_metadata,
ContextPtr context_,
std::unique_ptr<MaterializedPostgreSQLSettings> replication_settings);
private:
static std::shared_ptr<ASTColumnDeclaration> getMaterializedColumnsDeclaration(
String name, String type, UInt64 default_value);

View File

@ -1164,7 +1164,7 @@ void registerStorageRabbitMQ(StorageFactory & factory)
if (!rabbitmq_settings->rabbitmq_format.changed)
throw Exception("You must specify `rabbitmq_format` setting", ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH);
return StorageRabbitMQ::create(args.table_id, args.getContext(), args.columns, std::move(rabbitmq_settings), args.attach);
return std::make_shared<StorageRabbitMQ>(args.table_id, args.getContext(), args.columns, std::move(rabbitmq_settings), args.attach);
};
factory.registerStorage("RabbitMQ", creator_fn, StorageFactory::StorageFeatures{ .supports_settings = true, });

View File

@ -1,9 +1,9 @@
#pragma once
#include <boost/noncopyable.hpp>
#include <Core/BackgroundSchedulePool.h>
#include <Storages/IStorage.h>
#include <Poco/Semaphore.h>
#include <base/shared_ptr_helper.h>
#include <mutex>
#include <atomic>
#include <Storages/RabbitMQ/Buffer_fwd.h>
@ -18,11 +18,16 @@
namespace DB
{
class StorageRabbitMQ final: public shared_ptr_helper<StorageRabbitMQ>, public IStorage, WithContext
class StorageRabbitMQ final: public IStorage, WithContext, boost::noncopyable
{
friend struct shared_ptr_helper<StorageRabbitMQ>;
public:
StorageRabbitMQ(
const StorageID & table_id_,
ContextPtr context_,
const ColumnsDescription & columns_,
std::unique_ptr<RabbitMQSettings> rabbitmq_settings_,
bool is_attach_);
std::string getName() const override { return "RabbitMQ"; }
bool noPushingToViews() const override { return true; }
@ -71,14 +76,6 @@ public:
void incrementReader();
void decrementReader();
protected:
StorageRabbitMQ(
const StorageID & table_id_,
ContextPtr context_,
const ColumnsDescription & columns_,
std::unique_ptr<RabbitMQSettings> rabbitmq_settings_,
bool is_attach_);
private:
ContextMutablePtr rabbitmq_context;
std::unique_ptr<RabbitMQSettings> rabbitmq_settings;

View File

@ -508,7 +508,7 @@ static StoragePtr create(const StorageFactory::Arguments & args)
{
throw Exception("StorageEmbeddedRocksDB must require one column in primary key", ErrorCodes::BAD_ARGUMENTS);
}
return StorageEmbeddedRocksDB::create(args.table_id, args.relative_data_path, metadata, args.attach, args.getContext(), primary_key_names[0]);
return std::make_shared<StorageEmbeddedRocksDB>(args.table_id, args.relative_data_path, metadata, args.attach, args.getContext(), primary_key_names[0]);
}
std::shared_ptr<rocksdb::Statistics> StorageEmbeddedRocksDB::getRocksDBStatistics() const

View File

@ -2,7 +2,7 @@
#include <memory>
#include <shared_mutex>
#include <base/shared_ptr_helper.h>
#include <boost/noncopyable.hpp>
#include <Storages/IStorage.h>
#include <rocksdb/status.h>
@ -19,11 +19,17 @@ namespace DB
class Context;
class StorageEmbeddedRocksDB final : public shared_ptr_helper<StorageEmbeddedRocksDB>, public IStorage, WithContext
class StorageEmbeddedRocksDB final : public IStorage, WithContext, boost::noncopyable
{
friend struct shared_ptr_helper<StorageEmbeddedRocksDB>;
friend class EmbeddedRocksDBSink;
public:
StorageEmbeddedRocksDB(const StorageID & table_id_,
const String & relative_data_path_,
const StorageInMemoryMetadata & metadata,
bool attach,
ContextPtr context_,
const String & primary_key_);
std::string getName() const override { return "EmbeddedRocksDB"; }
Pipe read(
@ -53,14 +59,6 @@ public:
std::vector<rocksdb::Status> multiGet(const std::vector<rocksdb::Slice> & slices_keys, std::vector<String> & values) const;
const String & getPrimaryKey() const { return primary_key; }
protected:
StorageEmbeddedRocksDB(const StorageID & table_id_,
const String & relative_data_path_,
const StorageInMemoryMetadata & metadata,
bool attach,
ContextPtr context_,
const String & primary_key_);
private:
const String primary_key;
using RocksDBPtr = std::unique_ptr<rocksdb::DB>;

View File

@ -1,6 +1,6 @@
#pragma once
#include <base/shared_ptr_helper.h>
#include <boost/noncopyable.hpp>
#include <Storages/System/IStorageSystemOneBlock.h>
@ -12,9 +12,8 @@ class Context;
/** Implements the `rocksdb` system table, which expose various rocksdb metrics.
*/
class StorageSystemRocksDB final : public shared_ptr_helper<StorageSystemRocksDB>, public IStorageSystemOneBlock<StorageSystemRocksDB>
class StorageSystemRocksDB final : public IStorageSystemOneBlock<StorageSystemRocksDB>, boost::noncopyable
{
friend struct shared_ptr_helper<StorageSystemRocksDB>;
public:
std::string getName() const override { return "SystemRocksDB"; }

View File

@ -1145,7 +1145,7 @@ void registerStorageBuffer(StorageFactory & factory)
destination_id.table_name = destination_table;
}
return StorageBuffer::create(
return std::make_shared<StorageBuffer>(
args.table_id,
args.columns,
args.constraints,

View File

@ -1,9 +1,9 @@
#pragma once
#include <boost/noncopyable.hpp>
#include <Core/BackgroundSchedulePool.h>
#include <Core/NamesAndTypes.h>
#include <Storages/IStorage.h>
#include <base/shared_ptr_helper.h>
#include <Poco/Event.h>
@ -41,9 +41,8 @@ namespace DB
* When you destroy a Buffer table, all remaining data is flushed to the subordinate table.
* The data in the buffer is not replicated, not logged to disk, not indexed. With a rough restart of the server, the data is lost.
*/
class StorageBuffer final : public shared_ptr_helper<StorageBuffer>, public IStorage, WithContext
class StorageBuffer final : public IStorage, WithContext, boost::noncopyable
{
friend struct shared_ptr_helper<StorageBuffer>;
friend class BufferSource;
friend class BufferSink;
@ -55,6 +54,22 @@ public:
size_t bytes = 0; /// The number of (uncompressed) bytes in the block.
};
/** num_shards - the level of internal parallelism (the number of independent buffers)
* The buffer is flushed if all minimum thresholds or at least one of the maximum thresholds are exceeded.
*/
StorageBuffer(
const StorageID & table_id_,
const ColumnsDescription & columns_,
const ConstraintsDescription & constraints_,
const String & comment,
ContextPtr context_,
size_t num_shards_,
const Thresholds & min_thresholds_,
const Thresholds & max_thresholds_,
const Thresholds & flush_thresholds_,
const StorageID & destination_id,
bool allow_materialized_);
std::string getName() const override { return "Buffer"; }
QueryProcessingStage::Enum
@ -166,23 +181,6 @@ private:
BackgroundSchedulePool & bg_pool;
BackgroundSchedulePoolTaskHolder flush_handle;
protected:
/** num_shards - the level of internal parallelism (the number of independent buffers)
* The buffer is flushed if all minimum thresholds or at least one of the maximum thresholds are exceeded.
*/
StorageBuffer(
const StorageID & table_id_,
const ColumnsDescription & columns_,
const ConstraintsDescription & constraints_,
const String & comment,
ContextPtr context_,
size_t num_shards_,
const Thresholds & min_thresholds_,
const Thresholds & max_thresholds_,
const Thresholds & flush_thresholds_,
const StorageID & destination_id,
bool allow_materialized_);
};
}

View File

@ -318,7 +318,7 @@ void registerStorageDictionary(StorageFactory & factory)
/// Create dictionary storage that owns underlying dictionary
auto abstract_dictionary_configuration = getDictionaryConfigurationFromAST(args.query, local_context, dictionary_id.database_name);
auto result_storage = StorageDictionary::create(dictionary_id, abstract_dictionary_configuration, local_context);
auto result_storage = std::make_shared<StorageDictionary>(dictionary_id, abstract_dictionary_configuration, local_context);
bool lazy_load = local_context->getConfigRef().getBool("dictionaries_lazy_load", true);
if (!args.attach && !lazy_load)
@ -348,7 +348,7 @@ void registerStorageDictionary(StorageFactory & factory)
checkNamesAndTypesCompatibleWithDictionary(dictionary_name, args.columns, dictionary_structure);
}
return StorageDictionary::create(
return std::make_shared<StorageDictionary>(
args.table_id, dictionary_name, args.columns, args.comment, StorageDictionary::Location::Custom, local_context);
}
});

View File

@ -1,8 +1,8 @@
#pragma once
#include <atomic>
#include <base/shared_ptr_helper.h>
#include <boost/noncopyable.hpp>
#include <Storages/IStorage.h>
#include <Interpreters/IExternalLoaderConfigRepository.h>
@ -12,11 +12,50 @@ namespace DB
struct DictionaryStructure;
class TableFunctionDictionary;
class StorageDictionary final : public shared_ptr_helper<StorageDictionary>, public IStorage, public WithContext
class StorageDictionary final : public IStorage, public WithContext, boost::noncopyable
{
friend struct shared_ptr_helper<StorageDictionary>;
friend class TableFunctionDictionary;
friend class TableFunctionDictionary;
public:
/// Specifies where the table is located relative to the dictionary.
enum class Location
{
/// Table was created automatically as an element of a database with the Dictionary engine.
DictionaryDatabase,
/// Table was created automatically along with a dictionary
/// and has the same database and name as the dictionary.
/// It provides table-like access to the dictionary.
/// User cannot drop that table.
SameDatabaseAndNameAsDictionary,
/// Table was created explicitly by a statement like
/// CREATE TABLE ... ENGINE=Dictionary
/// User chose the table's database and name and can drop that table.
Custom,
};
StorageDictionary(
const StorageID & table_id_,
const String & dictionary_name_,
const ColumnsDescription & columns_,
const String & comment,
Location location_,
ContextPtr context_);
StorageDictionary(
const StorageID & table_id_,
const String & dictionary_name_,
const DictionaryStructure & dictionary_structure,
const String & comment,
Location location_,
ContextPtr context_);
StorageDictionary(
const StorageID & table_id_,
LoadablesConfigurationPtr dictionary_configuration_,
ContextPtr context_);
std::string getName() const override { return "Dictionary"; }
~StorageDictionary() override;
@ -51,24 +90,6 @@ public:
String getDictionaryName() const { return dictionary_name; }
/// Specifies where the table is located relative to the dictionary.
enum class Location
{
/// Table was created automatically as an element of a database with the Dictionary engine.
DictionaryDatabase,
/// Table was created automatically along with a dictionary
/// and has the same database and name as the dictionary.
/// It provides table-like access to the dictionary.
/// User cannot drop that table.
SameDatabaseAndNameAsDictionary,
/// Table was created explicitly by a statement like
/// CREATE TABLE ... ENGINE=Dictionary
/// User chose the table's database and name and can drop that table.
Custom,
};
private:
String dictionary_name;
const Location location;
@ -80,27 +101,6 @@ private:
scope_guard remove_repository_callback;
void removeDictionaryConfigurationFromRepository();
StorageDictionary(
const StorageID & table_id_,
const String & dictionary_name_,
const ColumnsDescription & columns_,
const String & comment,
Location location_,
ContextPtr context_);
StorageDictionary(
const StorageID & table_id_,
const String & dictionary_name_,
const DictionaryStructure & dictionary_structure,
const String & comment,
Location location_,
ContextPtr context_);
StorageDictionary(
const StorageID & table_id_,
LoadablesConfigurationPtr dictionary_configuration_,
ContextPtr context_);
};
}

View File

@ -1498,7 +1498,7 @@ void registerStorageDistributed(StorageFactory & factory)
if (!distributed_settings.monitor_max_sleep_time_ms.changed)
distributed_settings.monitor_max_sleep_time_ms = Poco::Timespan(context->getSettingsRef().distributed_directory_monitor_max_sleep_time_ms);
return StorageDistributed::create(
return std::make_shared<StorageDistributed>(
args.table_id,
args.columns,
args.constraints,

View File

@ -1,7 +1,6 @@
#pragma once
#include <base/shared_ptr_helper.h>
#include <boost/noncopyable.hpp>
#include <Storages/IStorage.h>
#include <Storages/Distributed/DirectoryMonitor.h>
#include <Storages/Distributed/DistributedSettings.h>
@ -36,14 +35,44 @@ using ExpressionActionsPtr = std::shared_ptr<ExpressionActions>;
* You can pass one address, not several.
* In this case, the table can be considered remote, rather than distributed.
*/
class StorageDistributed final : public shared_ptr_helper<StorageDistributed>, public IStorage, WithContext
class StorageDistributed final : public IStorage, WithContext, boost::noncopyable
{
friend struct shared_ptr_helper<StorageDistributed>;
friend class DistributedSink;
friend class StorageDistributedDirectoryMonitor;
friend class StorageSystemDistributionQueue;
public:
StorageDistributed(
const StorageID & id_,
const ColumnsDescription & columns_,
const ConstraintsDescription & constraints_,
const String & comment,
const String & remote_database_,
const String & remote_table_,
const String & cluster_name_,
ContextPtr context_,
const ASTPtr & sharding_key_,
const String & storage_policy_name_,
const String & relative_data_path_,
const DistributedSettings & distributed_settings_,
bool attach_,
ClusterPtr owned_cluster_ = {},
ASTPtr remote_table_function_ptr_ = {});
StorageDistributed(
const StorageID & id_,
const ColumnsDescription & columns_,
const ConstraintsDescription & constraints_,
ASTPtr remote_table_function_ptr_,
const String & cluster_name_,
ContextPtr context_,
const ASTPtr & sharding_key_,
const String & storage_policy_name_,
const String & relative_data_path_,
const DistributedSettings & distributed_settings_,
bool attach,
ClusterPtr owned_cluster_ = {});
~StorageDistributed() override;
std::string getName() const override { return "Distributed"; }
@ -137,37 +166,6 @@ public:
size_t getShardCount() const;
private:
StorageDistributed(
const StorageID & id_,
const ColumnsDescription & columns_,
const ConstraintsDescription & constraints_,
const String & comment,
const String & remote_database_,
const String & remote_table_,
const String & cluster_name_,
ContextPtr context_,
const ASTPtr & sharding_key_,
const String & storage_policy_name_,
const String & relative_data_path_,
const DistributedSettings & distributed_settings_,
bool attach_,
ClusterPtr owned_cluster_ = {},
ASTPtr remote_table_function_ptr_ = {});
StorageDistributed(
const StorageID & id_,
const ColumnsDescription & columns_,
const ConstraintsDescription & constraints_,
ASTPtr remote_table_function_ptr_,
const String & cluster_name_,
ContextPtr context_,
const ASTPtr & sharding_key_,
const String & storage_policy_name_,
const String & relative_data_path_,
const DistributedSettings & distributed_settings_,
bool attach,
ClusterPtr owned_cluster_ = {});
void renameOnDisk(const String & new_path_to_table_data);
const ExpressionActionsPtr & getShardingKeyExpr() const { return sharding_key_expr; }

View File

@ -217,7 +217,7 @@ void registerStorageExecutable(StorageFactory & factory)
settings.loadFromQuery(*args.storage_def);
auto global_context = args.getContext()->getGlobalContext();
return StorageExecutable::create(args.table_id, format, settings, input_queries, columns, constraints);
return std::make_shared<StorageExecutable>(args.table_id, format, settings, input_queries, columns, constraints);
};
StorageFactory::StorageFeatures storage_features;

View File

@ -1,7 +1,7 @@
#pragma once
#include <boost/noncopyable.hpp>
#include <Common/logger_useful.h>
#include <base/shared_ptr_helper.h>
#include <Storages/IStorage.h>
#include <Processors/Sources/ShellCommandSource.h>
#include <Storages/ExecutableSettings.h>
@ -15,11 +15,16 @@ namespace DB
* Executable storage that will start process for read.
* ExecutablePool storage maintain pool of processes and take process from pool for read.
*/
class StorageExecutable final : public shared_ptr_helper<StorageExecutable>, public IStorage
class StorageExecutable final : public IStorage, boost::noncopyable
{
friend struct shared_ptr_helper<StorageExecutable>;
public:
StorageExecutable(
const StorageID & table_id,
const String & format,
const ExecutableSettings & settings,
const std::vector<ASTPtr> & input_queries,
const ColumnsDescription & columns,
const ConstraintsDescription & constraints);
String getName() const override
{
@ -38,16 +43,6 @@ public:
size_t max_block_size,
unsigned threads) override;
protected:
StorageExecutable(
const StorageID & table_id,
const String & format,
const ExecutableSettings & settings,
const std::vector<ASTPtr> & input_queries,
const ColumnsDescription & columns,
const ConstraintsDescription & constraints);
private:
ExecutableSettings settings;
std::vector<ASTPtr> input_queries;

View File

@ -97,7 +97,7 @@ StorageExternalDistributed::StorageExternalDistributed(
context->getSettingsRef().postgresql_connection_pool_size,
context->getSettingsRef().postgresql_connection_pool_wait_timeout);
shard = StoragePostgreSQL::create(table_id_, std::move(pool), configuration.table, columns_, constraints_, String{});
shard = std::make_shared<StoragePostgreSQL>(table_id_, std::move(pool), configuration.table, columns_, constraints_, String{});
break;
}
#endif
@ -257,7 +257,7 @@ void registerStorageExternalDistributed(StorageFactory & factory)
auto format_settings = StorageURL::getFormatSettingsFromArgs(args);
return StorageExternalDistributed::create(
return std::make_shared<StorageExternalDistributed>(
cluster_description,
args.table_id,
configuration.format,
@ -304,7 +304,7 @@ void registerStorageExternalDistributed(StorageFactory & factory)
}
return StorageExternalDistributed::create(
return std::make_shared<StorageExternalDistributed>(
args.table_id,
table_engine,
cluster_description,

View File

@ -2,7 +2,7 @@
#include "config_core.h"
#include <base/shared_ptr_helper.h>
#include <boost/noncopyable.hpp>
#include <Storages/IStorage.h>
@ -16,10 +16,8 @@ struct ExternalDataSourceConfiguration;
/// A query to external database is passed to one replica on each shard, the result is united.
/// Replicas on each shard have the same priority, traversed replicas are moved to the end of the queue.
/// Similar approach is used for URL storage.
class StorageExternalDistributed final : public shared_ptr_helper<StorageExternalDistributed>, public DB::IStorage
class StorageExternalDistributed final : public DB::IStorage, boost::noncopyable
{
friend struct shared_ptr_helper<StorageExternalDistributed>;
public:
enum class ExternalStorageEngine
{
@ -28,18 +26,6 @@ public:
URL
};
std::string getName() const override { return "ExternalDistributed"; }
Pipe read(
const Names & column_names,
const StorageSnapshotPtr & storage_snapshot,
SelectQueryInfo & query_info,
ContextPtr context,
QueryProcessingStage::Enum processed_stage,
size_t max_block_size,
unsigned num_streams) override;
protected:
StorageExternalDistributed(
const StorageID & table_id_,
ExternalStorageEngine table_engine,
@ -60,6 +46,17 @@ protected:
const ConstraintsDescription & constraints,
ContextPtr context);
std::string getName() const override { return "ExternalDistributed"; }
Pipe read(
const Names & column_names,
const StorageSnapshotPtr & storage_snapshot,
SelectQueryInfo & query_info,
ContextPtr context,
QueryProcessingStage::Enum processed_stage,
size_t max_block_size,
unsigned num_streams) override;
private:
using Shards = std::unordered_set<StoragePtr>;
Shards shards;

View File

@ -1091,7 +1091,7 @@ void registerStorageFile(StorageFactory & factory)
}
if (engine_args_ast.size() == 1) /// Table in database
return StorageFile::create(factory_args.relative_data_path, storage_args);
return std::make_shared<StorageFile>(factory_args.relative_data_path, storage_args);
/// Will use FD if engine_args[1] is int literal or identifier with std* name
int source_fd = -1;
@ -1131,9 +1131,9 @@ void registerStorageFile(StorageFactory & factory)
storage_args.compression_method = "auto";
if (0 <= source_fd) /// File descriptor
return StorageFile::create(source_fd, storage_args);
return std::make_shared<StorageFile>(source_fd, storage_args);
else /// User's file
return StorageFile::create(source_path, factory_args.getContext()->getUserFilesPath(), storage_args);
return std::make_shared<StorageFile>(source_path, factory_args.getContext()->getUserFilesPath(), storage_args);
},
storage_features);
}

View File

@ -3,21 +3,42 @@
#include <Storages/IStorage.h>
#include <Common/logger_useful.h>
#include <boost/noncopyable.hpp>
#include <atomic>
#include <shared_mutex>
#include <base/shared_ptr_helper.h>
namespace DB
{
class StorageFile final : public shared_ptr_helper<StorageFile>, public IStorage
class StorageFile final : public IStorage
{
friend struct shared_ptr_helper<StorageFile>;
friend class PartitionedStorageFileSink;
friend class partitionedstoragefilesink;
public:
struct CommonArguments : public WithContext
{
StorageID table_id;
std::string format_name;
std::optional<FormatSettings> format_settings;
std::string compression_method;
const ColumnsDescription & columns;
const ConstraintsDescription & constraints;
const String & comment;
};
/// From file descriptor
StorageFile(int table_fd_, CommonArguments args);
/// From user's file
StorageFile(const std::string & table_path_, const std::string & user_files_path, CommonArguments args);
/// From table in database
StorageFile(const std::string & relative_table_dir_path, CommonArguments args);
explicit StorageFile(CommonArguments args);
std::string getName() const override { return "File"; }
Pipe read(
@ -45,17 +66,6 @@ public:
bool storesDataOnDisk() const override;
Strings getDataPaths() const override;
struct CommonArguments : public WithContext
{
StorageID table_id;
std::string format_name;
std::optional<FormatSettings> format_settings;
std::string compression_method;
const ColumnsDescription & columns;
const ConstraintsDescription & constraints;
const String & comment;
};
NamesAndTypesList getVirtuals() const override;
static Strings getPathsList(const String & table_path, const String & user_files_path, ContextPtr context, size_t & total_bytes_to_read);
@ -81,18 +91,7 @@ protected:
friend class StorageFileSource;
friend class StorageFileSink;
/// From file descriptor
StorageFile(int table_fd_, CommonArguments args);
/// From user's file
StorageFile(const std::string & table_path_, const std::string & user_files_path, CommonArguments args);
/// From table in database
StorageFile(const std::string & relative_table_dir_path, CommonArguments args);
private:
explicit StorageFile(CommonArguments args);
void setStorageMetadata(CommonArguments args);
std::string format_name;

View File

@ -480,7 +480,7 @@ void registerStorageGenerateRandom(StorageFactory & factory)
if (engine_args.size() == 3)
max_array_length = engine_args[2]->as<const ASTLiteral &>().value.safeGet<UInt64>();
return StorageGenerateRandom::create(args.table_id, args.columns, args.comment, max_array_length, max_string_length, random_seed);
return std::make_shared<StorageGenerateRandom>(args.table_id, args.columns, args.comment, max_array_length, max_string_length, random_seed);
});
}

View File

@ -1,7 +1,7 @@
#pragma once
#include <optional>
#include <base/shared_ptr_helper.h>
#include <boost/noncopyable.hpp>
#include <Storages/IStorage.h>
@ -9,10 +9,17 @@ namespace DB
{
/* Generates random data for given schema.
*/
class StorageGenerateRandom final : public shared_ptr_helper<StorageGenerateRandom>, public IStorage
class StorageGenerateRandom final : public IStorage, boost::noncopyable
{
friend struct shared_ptr_helper<StorageGenerateRandom>;
public:
StorageGenerateRandom(
const StorageID & table_id_,
const ColumnsDescription & columns_,
const String & comment,
UInt64 max_array_length,
UInt64 max_string_length,
std::optional<UInt64> random_seed);
std::string getName() const override { return "GenerateRandom"; }
Pipe read(
@ -29,15 +36,6 @@ private:
UInt64 max_array_length = 10;
UInt64 max_string_length = 10;
UInt64 random_seed = 0;
protected:
StorageGenerateRandom(
const StorageID & table_id_,
const ColumnsDescription & columns_,
const String & comment,
UInt64 max_array_length,
UInt64 max_string_length,
std::optional<UInt64> random_seed);
};
}

View File

@ -1,18 +1,19 @@
#pragma once
#include <Storages/IStorage.h>
#include <base/shared_ptr_helper.h>
#include <boost/noncopyable.hpp>
#include <QueryPipeline/Pipe.h>
#include <Storages/IStorage.h>
namespace DB
{
/** Internal temporary storage for table function input(...)
*/
class StorageInput final : public shared_ptr_helper<StorageInput>, public IStorage
class StorageInput final : public IStorage, boost::noncopyable
{
friend struct shared_ptr_helper<StorageInput>;
public:
StorageInput(const StorageID & table_id, const ColumnsDescription & columns_);
String getName() const override { return "Input"; }
/// A table will read from this stream.
@ -29,8 +30,5 @@ public:
private:
Pipe pipe;
protected:
StorageInput(const StorageID & table_id, const ColumnsDescription & columns_);
};
}

View File

@ -337,7 +337,7 @@ void registerStorageJoin(StorageFactory & factory)
key_names.push_back(*opt_key);
}
return StorageJoin::create(
return std::make_shared<StorageJoin>(
disk,
args.relative_data_path,
args.table_id,

View File

@ -1,7 +1,6 @@
#pragma once
#include <base/shared_ptr_helper.h>
#include <boost/noncopyable.hpp>
#include <Common/RWLock.h>
#include <Storages/StorageSet.h>
#include <Storages/TableLockHolder.h>
@ -23,10 +22,24 @@ using HashJoinPtr = std::shared_ptr<HashJoin>;
*
* When using, JOIN must be of the appropriate type (ANY|ALL LEFT|INNER ...).
*/
class StorageJoin final : public shared_ptr_helper<StorageJoin>, public StorageSetOrJoinBase
class StorageJoin final : public StorageSetOrJoinBase, boost::noncopyable
{
friend struct shared_ptr_helper<StorageJoin>;
public:
StorageJoin(
DiskPtr disk_,
const String & relative_path_,
const StorageID & table_id_,
const Names & key_names_,
bool use_nulls_,
SizeLimits limits_,
ASTTableJoin::Kind kind_,
ASTTableJoin::Strictness strictness_,
const ColumnsDescription & columns_,
const ConstraintsDescription & constraints_,
const String & comment,
bool overwrite,
bool persistent_);
String getName() const override { return "Join"; }
void truncate(const ASTPtr &, const StorageMetadataPtr & metadata_snapshot, ContextPtr, TableExclusiveLockHolder &) override;
@ -98,22 +111,6 @@ private:
void finishInsert() override {}
size_t getSize(ContextPtr context) const override;
RWLockImpl::LockHolder tryLockTimedWithContext(const RWLock & lock, RWLockImpl::Type type, ContextPtr context) const;
protected:
StorageJoin(
DiskPtr disk_,
const String & relative_path_,
const StorageID & table_id_,
const Names & key_names_,
bool use_nulls_,
SizeLimits limits_,
ASTTableJoin::Kind kind_,
ASTTableJoin::Strictness strictness_,
const ColumnsDescription & columns_,
const ConstraintsDescription & constraints_,
const String & comment,
bool overwrite,
bool persistent_);
};
}

View File

@ -1089,7 +1089,7 @@ void registerStorageLog(StorageFactory & factory)
String disk_name = getDiskName(*args.storage_def);
DiskPtr disk = args.getContext()->getDisk(disk_name);
return StorageLog::create(
return std::make_shared<StorageLog>(
args.engine_name,
disk,
args.relative_data_path,

View File

@ -2,8 +2,8 @@
#include <map>
#include <shared_mutex>
#include <base/shared_ptr_helper.h>
#include <boost/noncopyable.hpp>
#include <Disks/IDisk.h>
#include <Storages/IStorage.h>
#include <Common/FileChecker.h>
@ -19,14 +19,28 @@ namespace DB
* Also implements TinyLog - a table engine that is suitable for small chunks of the log.
* It differs from Log in the absence of mark files.
*/
class StorageLog final : public shared_ptr_helper<StorageLog>, public IStorage
class StorageLog final : public IStorage, boost::noncopyable
{
friend class LogSource;
friend class LogSink;
friend class LogRestoreTask;
friend struct shared_ptr_helper<StorageLog>;
public:
/** Attach the table with the appropriate name, along the appropriate path (with / at the end),
* (the correctness of names and paths is not verified)
* consisting of the specified columns; Create files if they do not exist.
*/
StorageLog(
const String & engine_name_,
DiskPtr disk_,
const std::string & relative_path_,
const StorageID & table_id_,
const ColumnsDescription & columns_,
const ConstraintsDescription & constraints_,
const String & comment,
bool attach,
size_t max_compress_block_size_);
~StorageLog() override;
String getName() const override { return engine_name; }
@ -56,22 +70,6 @@ public:
BackupEntries backupData(ContextPtr context, const ASTs & partitions) override;
RestoreTaskPtr restoreData(ContextMutablePtr context, const ASTs & partitions, const BackupPtr & backup, const String & data_path_in_backup, const StorageRestoreSettings & restore_settings, const std::shared_ptr<IRestoreCoordination> & restore_coordination) override;
protected:
/** Attach the table with the appropriate name, along the appropriate path (with / at the end),
* (the correctness of names and paths is not verified)
* consisting of the specified columns; Create files if they do not exist.
*/
StorageLog(
const String & engine_name_,
DiskPtr disk_,
const std::string & relative_path_,
const StorageID & table_id_,
const ColumnsDescription & columns_,
const ConstraintsDescription & constraints_,
const String & comment,
bool attach,
size_t max_compress_block_size_);
private:
using ReadLock = std::shared_lock<std::shared_timed_mutex>;
using WriteLock = std::unique_lock<std::shared_timed_mutex>;

View File

@ -4,6 +4,7 @@
#if USE_MYSQL
#include <boost/noncopyable.hpp>
#include <Storages/StorageProxy.h>
namespace DB
@ -14,14 +15,13 @@ namespace ErrorCodes
extern const int NOT_IMPLEMENTED;
}
class StorageMaterializedMySQL final : public shared_ptr_helper<StorageMaterializedMySQL>, public StorageProxy
class StorageMaterializedMySQL final : public StorageProxy, boost::noncopyable
{
friend struct shared_ptr_helper<StorageMaterializedMySQL>;
public:
String getName() const override { return "MaterializedMySQL"; }
StorageMaterializedMySQL(const StoragePtr & nested_storage_, const IDatabase * database_);
String getName() const override { return "MaterializedMySQL"; }
bool needRewriteQueryWithFinal(const Names & column_names) const override;
Pipe read(

View File

@ -462,7 +462,7 @@ void registerStorageMaterializedView(StorageFactory & factory)
factory.registerStorage("MaterializedView", [](const StorageFactory::Arguments & args)
{
/// Pass local_context here to convey setting for inner table
return StorageMaterializedView::create(
return std::make_shared<StorageMaterializedView>(
args.table_id, args.getLocalContext(), args.query,
args.columns, args.attach, args.comment);
});

View File

@ -1,9 +1,8 @@
#pragma once
#include <base/shared_ptr_helper.h>
#include <Parsers/IAST_fwd.h>
#include <boost/noncopyable.hpp>
#include <Storages/IStorage.h>
#include <Storages/StorageInMemoryMetadata.h>
@ -11,10 +10,17 @@
namespace DB
{
class StorageMaterializedView final : public shared_ptr_helper<StorageMaterializedView>, public IStorage, WithMutableContext
class StorageMaterializedView final : public IStorage, WithMutableContext, boost::noncopyable
{
friend struct shared_ptr_helper<StorageMaterializedView>;
public:
StorageMaterializedView(
const StorageID & table_id_,
ContextPtr local_context,
const ASTCreateQuery & query,
const ColumnsDescription & columns_,
bool attach_,
const String & comment);
std::string getName() const override { return "MaterializedView"; }
bool isView() const override { return true; }
@ -109,15 +115,6 @@ private:
bool has_inner_table = false;
void checkStatementCanBeForwarded() const;
protected:
StorageMaterializedView(
const StorageID & table_id_,
ContextPtr local_context,
const ASTCreateQuery & query,
const ColumnsDescription & columns_,
bool attach_,
const String & comment);
};
}

View File

@ -1,6 +1,7 @@
#include <cassert>
#include <Common/Exception.h>
#include <boost/noncopyable.hpp>
#include <Interpreters/MutationsInterpreter.h>
#include <Interpreters/getColumnFromBlock.h>
#include <Interpreters/inplaceBlockConversions.h>
@ -377,11 +378,9 @@ void StorageMemory::truncate(
}
class MemoryBackupEntriesBatch : public shared_ptr_helper<MemoryBackupEntriesBatch>, public IBackupEntriesBatch
class MemoryBackupEntriesBatch : public IBackupEntriesBatch, boost::noncopyable
{
private:
friend struct shared_ptr_helper<MemoryBackupEntriesBatch>;
public:
MemoryBackupEntriesBatch(
const StorageMetadataPtr & metadata_snapshot_, const std::shared_ptr<const Blocks> blocks_, UInt64 max_compress_block_size_)
: IBackupEntriesBatch({"data.bin", "index.mrk", "sizes.json"})
@ -391,6 +390,7 @@ private:
{
}
private:
static constexpr const size_t kDataBinPos = 0;
static constexpr const size_t kIndexMrkPos = 1;
static constexpr const size_t kSizesJsonPos = 2;
@ -475,7 +475,7 @@ BackupEntries StorageMemory::backupData(ContextPtr context, const ASTs & partiti
if (!partitions.empty())
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Table engine {} doesn't support partitions", getName());
return MemoryBackupEntriesBatch::create(getInMemoryMetadataPtr(), data.get(), context->getSettingsRef().max_compress_block_size)
return std::make_shared<MemoryBackupEntriesBatch>(getInMemoryMetadataPtr(), data.get(), context->getSettingsRef().max_compress_block_size)
->getBackupEntries();
}
@ -589,7 +589,7 @@ void registerStorageMemory(StorageFactory & factory)
if (has_settings)
settings.loadFromQuery(*args.storage_def);
return StorageMemory::create(args.table_id, args.columns, args.constraints, args.comment, settings.compress);
return std::make_shared<StorageMemory>(args.table_id, args.columns, args.constraints, args.comment, settings.compress);
},
{
.supports_settings = true,

View File

@ -4,8 +4,7 @@
#include <optional>
#include <mutex>
#include <base/shared_ptr_helper.h>
#include <boost/noncopyable.hpp>
#include <Core/NamesAndTypes.h>
#include <Storages/IStorage.h>
@ -19,13 +18,19 @@ namespace DB
* It does not support keys.
* Data is stored as a set of blocks and is not stored anywhere else.
*/
class StorageMemory final : public shared_ptr_helper<StorageMemory>, public IStorage
class StorageMemory final : public IStorage, boost::noncopyable
{
friend class MemorySink;
friend class MemoryRestoreTask;
friend struct shared_ptr_helper<StorageMemory>;
public:
StorageMemory(
const StorageID & table_id_,
ColumnsDescription columns_description_,
ConstraintsDescription constraints_,
const String & comment,
bool compress_ = false);
String getName() const override { return "Memory"; }
size_t getSize() const { return data.get()->size(); }
@ -123,14 +128,6 @@ private:
std::atomic<size_t> total_size_rows = 0;
bool compress;
protected:
StorageMemory(
const StorageID & table_id_,
ColumnsDescription columns_description_,
ConstraintsDescription constraints_,
const String & comment,
bool compress_ = false);
};
}

View File

@ -854,7 +854,7 @@ void registerStorageMerge(StorageFactory & factory)
engine_args[1] = evaluateConstantExpressionAsLiteral(engine_args[1], args.getLocalContext());
String table_name_regexp = engine_args[1]->as<ASTLiteral &>().value.safeGet<String>();
return StorageMerge::create(
return std::make_shared<StorageMerge>(
args.table_id, args.columns, args.comment, source_database_name_or_regexp, is_regexp, table_name_regexp, args.getContext());
},
{

View File

@ -1,7 +1,6 @@
#pragma once
#include <base/shared_ptr_helper.h>
#include <boost/noncopyable.hpp>
#include <Common/OptimizedRegularExpression.h>
#include <Storages/IStorage.h>
@ -12,10 +11,29 @@ namespace DB
/** A table that represents the union of an arbitrary number of other tables.
* All tables must have the same structure.
*/
class StorageMerge final : public shared_ptr_helper<StorageMerge>, public IStorage, WithContext
class StorageMerge final : public IStorage, WithContext, boost::noncopyable
{
friend struct shared_ptr_helper<StorageMerge>;
public:
using DBToTableSetMap = std::map<String, std::set<String>>;
StorageMerge(
const StorageID & table_id_,
const ColumnsDescription & columns_,
const String & comment,
const String & source_database_name_or_regexp_,
bool database_is_regexp_,
const DBToTableSetMap & source_databases_and_tables_,
ContextPtr context_);
StorageMerge(
const StorageID & table_id_,
const ColumnsDescription & columns_,
const String & comment,
const String & source_database_name_or_regexp_,
bool database_is_regexp_,
const String & source_table_regexp_,
ContextPtr context_);
std::string getName() const override { return "Merge"; }
bool isRemote() const override;
@ -54,8 +72,6 @@ public:
static std::tuple<bool /* is_regexp */, ASTPtr> evaluateDatabaseName(const ASTPtr & node, ContextPtr context);
private:
using DBToTableSetMap = std::map<String, std::set<String>>;
std::optional<OptimizedRegularExpression> source_database_regexp;
std::optional<OptimizedRegularExpression> source_table_regexp;
std::optional<DBToTableSetMap> source_databases_and_tables;
@ -88,24 +104,6 @@ private:
ColumnSizeByName getColumnSizes() const override;
protected:
StorageMerge(
const StorageID & table_id_,
const ColumnsDescription & columns_,
const String & comment,
const String & source_database_name_or_regexp_,
bool database_is_regexp_,
const DBToTableSetMap & source_databases_and_tables_,
ContextPtr context_);
StorageMerge(
const StorageID & table_id_,
const ColumnsDescription & columns_,
const String & comment,
const String & source_database_name_or_regexp_,
bool database_is_regexp_,
const String & source_table_regexp_,
ContextPtr context_);
struct AliasData
{
String name;

View File

@ -889,7 +889,7 @@ std::shared_ptr<MergeMutateSelectedEntry> StorageMergeTree::selectPartsToMerge(
getContext()->getMergeList().bookMergeWithTTL();
merging_tagger = std::make_unique<CurrentlyMergingPartsTagger>(future_part, MergeTreeDataMergerMutator::estimateNeededDiskSpace(future_part->parts), *this, metadata_snapshot, false);
return std::make_shared<MergeMutateSelectedEntry>(future_part, std::move(merging_tagger), MutationCommands::create());
return std::make_shared<MergeMutateSelectedEntry>(future_part, std::move(merging_tagger), std::make_shared<MutationCommands>());
}
bool StorageMergeTree::merge(
@ -1012,7 +1012,7 @@ std::shared_ptr<MergeMutateSelectedEntry> StorageMergeTree::selectPartsToMutate(
continue;
}
auto commands = MutationCommands::create();
auto commands = std::make_shared<MutationCommands>();
size_t current_ast_elements = 0;
auto last_mutation_to_apply = mutations_end_it;
for (auto it = mutations_begin_it; it != mutations_end_it; ++it)
@ -1183,7 +1183,7 @@ bool StorageMergeTree::scheduleDataProcessingJob(BackgroundJobsAssignee & assign
if (auto lock = time_after_previous_cleanup_temporary_directories.compareAndRestartDeferred(
getSettings()->merge_tree_clear_old_temporary_directories_interval_seconds))
{
assignee.scheduleCommonTask(ExecutableLambdaAdapter::create(
assignee.scheduleCommonTask(std::make_shared<ExecutableLambdaAdapter>(
[this, share_lock] ()
{
return clearOldTemporaryDirectories(getSettings()->temporary_directories_lifetime.totalSeconds());
@ -1193,7 +1193,7 @@ bool StorageMergeTree::scheduleDataProcessingJob(BackgroundJobsAssignee & assign
if (auto lock = time_after_previous_cleanup_parts.compareAndRestartDeferred(
getSettings()->merge_tree_clear_old_parts_interval_seconds))
{
assignee.scheduleCommonTask(ExecutableLambdaAdapter::create(
assignee.scheduleCommonTask(std::make_shared<ExecutableLambdaAdapter>(
[this, share_lock] ()
{
/// All use relative_data_path which changes during rename

View File

@ -1,7 +1,6 @@
#pragma once
#include <base/shared_ptr_helper.h>
#include <boost/noncopyable.hpp>
#include <Core/Names.h>
#include <Storages/AlterCommands.h>
#include <Storages/IStorage.h>
@ -26,13 +25,30 @@ namespace DB
/** See the description of the data structure in MergeTreeData.
*/
class StorageMergeTree final : public shared_ptr_helper<StorageMergeTree>, public MergeTreeData
class StorageMergeTree final : public MergeTreeData, boost::noncopyable
{
friend struct shared_ptr_helper<StorageMergeTree>;
public:
/** Attach the table with the appropriate name, along the appropriate path (with / at the end),
* (correctness of names and paths are not checked)
* consisting of the specified columns.
*
* See MergeTreeData constructor for comments on parameters.
*/
StorageMergeTree(
const StorageID & table_id_,
const String & relative_data_path_,
const StorageInMemoryMetadata & metadata,
bool attach,
ContextMutablePtr context_,
const String & date_column_name,
const MergingParams & merging_params_,
std::unique_ptr<MergeTreeSettings> settings_,
bool has_force_restore_data_flag);
void startup() override;
void flush() override;
void shutdown() override;
~StorageMergeTree() override;
std::string getName() const override { return merging_params.getModeName() + "MergeTree"; }
@ -257,23 +273,6 @@ private:
protected:
/** Attach the table with the appropriate name, along the appropriate path (with / at the end),
* (correctness of names and paths are not checked)
* consisting of the specified columns.
*
* See MergeTreeData constructor for comments on parameters.
*/
StorageMergeTree(
const StorageID & table_id_,
const String & relative_data_path_,
const StorageInMemoryMetadata & metadata,
bool attach,
ContextMutablePtr context_,
const String & date_column_name,
const MergingParams & merging_params_,
std::unique_ptr<MergeTreeSettings> settings_,
bool has_force_restore_data_flag);
MutationCommands getFirstAlterMutationCommandsForPart(const DataPartPtr & part) const override;
};

View File

@ -39,7 +39,7 @@ StorageMongoDB::StorageMongoDB(
const String & comment)
: IStorage(table_id_)
, host(host_)
, port(port_)
// , port(port_)
, database_name(database_name_)
, collection_name(collection_name_)
, username(username_)
@ -168,7 +168,7 @@ void registerStorageMongoDB(StorageFactory & factory)
{
auto configuration = StorageMongoDB::getConfiguration(args.engine_args, args.getLocalContext());
return StorageMongoDB::create(
return std::make_shared<StorageMongoDB>(
args.table_id,
configuration.host,
configuration.port,

View File

@ -2,12 +2,10 @@
#include <Poco/MongoDB/Connection.h>
#include <base/shared_ptr_helper.h>
#include <boost/noncopyable.hpp>
#include <Storages/IStorage.h>
#include <Storages/ExternalDataSourceConfiguration.h>
namespace DB
{
/* Implements storage in the MongoDB database.
@ -15,9 +13,8 @@ namespace DB
* Read only.
*/
class StorageMongoDB final : public shared_ptr_helper<StorageMongoDB>, public IStorage
class StorageMongoDB final : public IStorage, boost::noncopyable
{
friend struct shared_ptr_helper<StorageMongoDB>;
public:
StorageMongoDB(
const StorageID & table_id_,
@ -49,7 +46,7 @@ private:
void connectIfNotConnected();
const std::string host;
const uint16_t port; /// NOLINT
// const uint16_t port; /// NOLINT -- actually not used, needs checking
const std::string database_name;
const std::string collection_name;
const std::string username;

View File

@ -4,8 +4,7 @@
#if USE_MYSQL
#include <base/shared_ptr_helper.h>
#include <boost/noncopyable.hpp>
#include <Storages/IStorage.h>
#include <Storages/MySQL/MySQLSettings.h>
#include <Storages/ExternalDataSourceConfiguration.h>
@ -23,9 +22,8 @@ namespace DB
* Use ENGINE = mysql(host_port, database_name, table_name, user_name, password)
* Read only.
*/
class StorageMySQL final : public shared_ptr_helper<StorageMySQL>, public IStorage, WithContext
class StorageMySQL final : public IStorage, WithContext, boost::noncopyable
{
friend struct shared_ptr_helper<StorageMySQL>;
public:
StorageMySQL(
const StorageID & table_id_,

View File

@ -29,7 +29,7 @@ void registerStorageNull(StorageFactory & factory)
"Engine " + args.engine_name + " doesn't support any arguments (" + toString(args.engine_args.size()) + " given)",
ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH);
return StorageNull::create(args.table_id, args.columns, args.constraints, args.comment);
return std::make_shared<StorageNull>(args.table_id, args.columns, args.constraints, args.comment);
},
{
.supports_parallel_insert = true,

View File

@ -1,7 +1,6 @@
#pragma once
#include <base/shared_ptr_helper.h>
#include <boost/noncopyable.hpp>
#include <Core/NamesAndTypes.h>
#include <Storages/IStorage.h>
#include <Processors/Sources/NullSource.h>
@ -15,10 +14,20 @@ namespace DB
/** When writing, does nothing.
* When reading, returns nothing.
*/
class StorageNull final : public shared_ptr_helper<StorageNull>, public IStorage
class StorageNull final : public IStorage, boost::noncopyable
{
friend struct shared_ptr_helper<StorageNull>;
public:
StorageNull(
const StorageID & table_id_, ColumnsDescription columns_description_, ConstraintsDescription constraints_, const String & comment)
: IStorage(table_id_)
{
StorageInMemoryMetadata storage_metadata;
storage_metadata.setColumns(columns_description_);
storage_metadata.setConstraints(constraints_);
storage_metadata.setComment(comment);
setInMemoryMetadata(storage_metadata);
}
std::string getName() const override { return "Null"; }
Pipe read(
@ -54,19 +63,6 @@ public:
return {0};
}
private:
protected:
StorageNull(
const StorageID & table_id_, ColumnsDescription columns_description_, ConstraintsDescription constraints_, const String & comment)
: IStorage(table_id_)
{
StorageInMemoryMetadata storage_metadata;
storage_metadata.setColumns(columns_description_);
storage_metadata.setConstraints(constraints_);
storage_metadata.setComment(comment);
setInMemoryMetadata(storage_metadata);
}
};
}

View File

@ -451,7 +451,7 @@ void registerStoragePostgreSQL(StorageFactory & factory)
args.getContext()->getSettingsRef().postgresql_connection_pool_size,
args.getContext()->getSettingsRef().postgresql_connection_pool_wait_timeout);
return StoragePostgreSQL::create(
return std::make_shared<StoragePostgreSQL>(
args.table_id,
std::move(pool),
configuration.table,

View File

@ -3,7 +3,7 @@
#include "config_core.h"
#if USE_LIBPQXX
#include <base/shared_ptr_helper.h>
#include <boost/noncopyable.hpp>
#include <Interpreters/Context.h>
#include <Storages/IStorage.h>
#include <Core/PostgreSQL/PoolWithFailover.h>
@ -17,9 +17,8 @@ class Logger;
namespace DB
{
class StoragePostgreSQL final : public shared_ptr_helper<StoragePostgreSQL>, public IStorage
class StoragePostgreSQL final : public IStorage, boost::noncopyable
{
friend struct shared_ptr_helper<StoragePostgreSQL>;
public:
StoragePostgreSQL(
const StorageID & table_id_,

View File

@ -3042,7 +3042,7 @@ bool StorageReplicatedMergeTree::scheduleDataProcessingJob(BackgroundJobsAssigne
/// Depending on entry type execute in fetches (small) pool or big merge_mutate pool
if (job_type == LogEntry::GET_PART)
{
assignee.scheduleFetchTask(ExecutableLambdaAdapter::create(
assignee.scheduleFetchTask(std::make_shared<ExecutableLambdaAdapter>(
[this, selected_entry] () mutable
{
return processQueueEntry(selected_entry);
@ -3051,19 +3051,19 @@ bool StorageReplicatedMergeTree::scheduleDataProcessingJob(BackgroundJobsAssigne
}
else if (job_type == LogEntry::MERGE_PARTS)
{
auto task = MergeFromLogEntryTask::create(selected_entry, *this, common_assignee_trigger);
auto task = std::make_shared<MergeFromLogEntryTask>(selected_entry, *this, common_assignee_trigger);
assignee.scheduleMergeMutateTask(task);
return true;
}
else if (job_type == LogEntry::MUTATE_PART)
{
auto task = MutateFromLogEntryTask::create(selected_entry, *this, common_assignee_trigger);
auto task = std::make_shared<MutateFromLogEntryTask>(selected_entry, *this, common_assignee_trigger);
assignee.scheduleMergeMutateTask(task);
return true;
}
else
{
assignee.scheduleCommonTask(ExecutableLambdaAdapter::create(
assignee.scheduleCommonTask(std::make_shared<ExecutableLambdaAdapter>(
[this, selected_entry] () mutable
{
return processQueueEntry(selected_entry);

View File

@ -1,9 +1,9 @@
#pragma once
#include <base/shared_ptr_helper.h>
#include <base/UUID.h>
#include <atomic>
#include <pcg_random.hpp>
#include <boost/noncopyable.hpp>
#include <Storages/IStorage.h>
#include <Storages/MergeTree/MergeTreeData.h>
#include <Storages/MergeTree/MergeTreeDataMergerMutator.h>
@ -81,10 +81,32 @@ namespace DB
* as the time will take the time of creation the appropriate part on any of the replicas.
*/
class StorageReplicatedMergeTree final : public shared_ptr_helper<StorageReplicatedMergeTree>, public MergeTreeData
class StorageReplicatedMergeTree final : public MergeTreeData, boost::noncopyable
{
friend struct shared_ptr_helper<StorageReplicatedMergeTree>;
public:
enum RenamingRestrictions
{
ALLOW_ANY,
ALLOW_PRESERVING_UUID,
DO_NOT_ALLOW,
};
/** If not 'attach', either creates a new table in ZK, or adds a replica to an existing table.
*/
StorageReplicatedMergeTree(
const String & zookeeper_path_,
const String & replica_name_,
bool attach,
const StorageID & table_id_,
const String & relative_data_path_,
const StorageInMemoryMetadata & metadata_,
ContextMutablePtr context_,
const String & date_column_name,
const MergingParams & merging_params_,
std::unique_ptr<MergeTreeSettings> settings_,
bool has_force_restore_data_flag,
RenamingRestrictions renaming_restrictions_);
void startup() override;
void shutdown() override;
void flush() override;
@ -143,13 +165,6 @@ public:
void truncate(const ASTPtr &, const StorageMetadataPtr &, ContextPtr query_context, TableExclusiveLockHolder &) override;
enum RenamingRestrictions
{
ALLOW_ANY,
ALLOW_PRESERVING_UUID,
DO_NOT_ALLOW,
};
void checkTableCanBeRenamed(const StorageID & new_name) const override;
void rename(const String & new_path_to_table_data, const StorageID & new_table_id) override;
@ -793,23 +808,6 @@ private:
/// Create ephemeral lock in zookeeper for part and disk which support zero copy replication.
/// If somebody already holding the lock -- return std::nullopt.
std::optional<ZeroCopyLock> tryCreateZeroCopyExclusiveLock(const String & part_name, const DiskPtr & disk) override;
protected:
/** If not 'attach', either creates a new table in ZK, or adds a replica to an existing table.
*/
StorageReplicatedMergeTree(
const String & zookeeper_path_,
const String & replica_name_,
bool attach,
const StorageID & table_id_,
const String & relative_data_path_,
const StorageInMemoryMetadata & metadata_,
ContextMutablePtr context_,
const String & date_column_name,
const MergingParams & merging_params_,
std::unique_ptr<MergeTreeSettings> settings_,
bool has_force_restore_data_flag,
RenamingRestrictions renaming_restrictions_);
};
String getPartNamePossiblyFake(MergeTreeDataFormatVersion format_version, const MergeTreePartInfo & part_info);

View File

@ -1018,7 +1018,7 @@ void registerStorageS3Impl(const String & name, StorageFactory & factory)
if (args.storage_def->partition_by)
partition_by = args.storage_def->partition_by->clone();
return StorageS3::create(
return std::make_shared<StorageS3>(
s3_uri,
configuration.auth_settings.access_key_id,
configuration.auth_settings.secret_access_key,

View File

@ -11,10 +11,10 @@
#include <Storages/IStorage.h>
#include <Storages/StorageS3Settings.h>
#include <boost/noncopyable.hpp>
#include <Processors/Sources/SourceWithProgress.h>
#include <Poco/URI.h>
#include <Common/logger_useful.h>
#include <base/shared_ptr_helper.h>
#include <IO/S3Common.h>
#include <IO/CompressionMethod.h>
#include <Interpreters/Context.h>
@ -120,7 +120,7 @@ private:
* It sends HTTP GET to server when select is called and
* HTTP PUT when insert is called.
*/
class StorageS3 : public shared_ptr_helper<StorageS3>, public IStorage, WithContext
class StorageS3 : public IStorage, WithContext, boost::noncopyable
{
public:
StorageS3(

View File

@ -7,8 +7,7 @@
#include <memory>
#include <optional>
#include <base/shared_ptr_helper.h>
#include <boost/noncopyable.hpp>
#include "Client/Connection.h"
#include <Interpreters/Cluster.h>
#include <IO/S3Common.h>
@ -19,21 +18,9 @@ namespace DB
class Context;
class StorageS3Cluster : public shared_ptr_helper<StorageS3Cluster>, public IStorage
class StorageS3Cluster : public IStorage, boost::noncopyable
{
friend struct shared_ptr_helper<StorageS3Cluster>;
public:
std::string getName() const override { return "S3Cluster"; }
Pipe read(const Names &, const StorageSnapshotPtr &, SelectQueryInfo &,
ContextPtr, QueryProcessingStage::Enum, size_t /*max_block_size*/, unsigned /*num_streams*/) override;
QueryProcessingStage::Enum
getQueryProcessingStage(ContextPtr, QueryProcessingStage::Enum, const StorageSnapshotPtr &, SelectQueryInfo &) const override;
NamesAndTypesList getVirtuals() const override;
protected:
StorageS3Cluster(
const String & filename_,
const String & access_key_id_,
@ -46,6 +33,16 @@ protected:
ContextPtr context_,
const String & compression_method_);
std::string getName() const override { return "S3Cluster"; }
Pipe read(const Names &, const StorageSnapshotPtr &, SelectQueryInfo &,
ContextPtr, QueryProcessingStage::Enum, size_t /*max_block_size*/, unsigned /*num_streams*/) override;
QueryProcessingStage::Enum
getQueryProcessingStage(ContextPtr, QueryProcessingStage::Enum, const StorageSnapshotPtr &, SelectQueryInfo &) const override;
NamesAndTypesList getVirtuals() const override;
private:
StorageS3::S3Configuration s3_configuration;

View File

@ -172,7 +172,7 @@ void registerStorageSQLite(StorageFactory & factory)
auto sqlite_db = openSQLiteDB(database_path, args.getContext(), /* throw_on_error */!args.attach);
return StorageSQLite::create(args.table_id, sqlite_db, database_path,
return std::make_shared<StorageSQLite>(args.table_id, sqlite_db, database_path,
table_name, args.columns, args.constraints, args.getContext());
},
{

View File

@ -3,7 +3,7 @@
#include "config_core.h"
#if USE_SQLITE
#include <base/shared_ptr_helper.h>
#include <boost/noncopyable.hpp>
#include <Storages/IStorage.h>
#include <sqlite3.h>
@ -16,10 +16,8 @@ class Logger;
namespace DB
{
class StorageSQLite final : public shared_ptr_helper<StorageSQLite>, public IStorage, public WithContext
class StorageSQLite final : public IStorage, public WithContext, boost::noncopyable
{
friend struct shared_ptr_helper<StorageSQLite>;
public:
using SQLitePtr = std::shared_ptr<sqlite3>;

View File

@ -258,7 +258,7 @@ void registerStorageSet(StorageFactory & factory)
set_settings.loadFromQuery(*args.storage_def);
DiskPtr disk = args.getContext()->getDisk(set_settings.disk);
return StorageSet::create(
return std::make_shared<StorageSet>(
disk, args.relative_data_path, args.table_id, args.columns, args.constraints, args.comment, set_settings.persistent);
}, StorageFactory::StorageFeatures{ .supports_settings = true, });
}

Some files were not shown because too many files have changed in this diff Show More