Make some classes 'final'

This commit is contained in:
Robert Schulze 2023-02-02 15:47:00 +00:00
parent 57f26f8c7e
commit 3472ed4f0d
No known key found for this signature in database
GPG Key ID: 26703B55FB13728A
17 changed files with 17 additions and 17 deletions

View File

@ -11,7 +11,7 @@
namespace DB
{
class CatBoostLibraryBridgeHelper : public LibraryBridgeHelper
class CatBoostLibraryBridgeHelper final : public LibraryBridgeHelper
{
public:
static constexpr inline auto PING_HANDLER = "/catboost_ping";

View File

@ -14,7 +14,7 @@ namespace DB
class Pipe;
// Class to access the external dictionary part of the clickhouse-library-bridge.
class ExternalDictionaryLibraryBridgeHelper : public LibraryBridgeHelper
class ExternalDictionaryLibraryBridgeHelper final : public LibraryBridgeHelper
{
public:

View File

@ -17,7 +17,7 @@ namespace DB
* Disadvantages:
* - in case you need to read a lot of data in a row, but some of them only a part is cached, you have to do seek-and.
*/
class CachedCompressedReadBuffer : public CompressedReadBufferBase, public ReadBuffer
class CachedCompressedReadBuffer final : public CompressedReadBufferBase, public ReadBuffer
{
private:
std::function<std::unique_ptr<ReadBufferFromFileBase>()> file_in_creator;

View File

@ -11,7 +11,7 @@ namespace DB
/** A buffer for reading from a compressed file with just checking checksums of
* the compressed blocks, without any decompression.
*/
class CheckingCompressedReadBuffer : public CompressedReadBufferBase, public ReadBuffer
class CheckingCompressedReadBuffer final : public CompressedReadBufferBase, public ReadBuffer
{
protected:
bool nextImpl() override;

View File

@ -8,7 +8,7 @@
namespace DB
{
class CompressedReadBuffer : public CompressedReadBufferBase, public BufferWithOwnMemory<ReadBuffer>
class CompressedReadBuffer final : public CompressedReadBufferBase, public BufferWithOwnMemory<ReadBuffer>
{
private:
size_t size_compressed = 0;

View File

@ -14,7 +14,7 @@ class MMappedFileCache;
/// Unlike CompressedReadBuffer, it can do seek.
class CompressedReadBufferFromFile : public CompressedReadBufferBase, public BufferWithOwnMemory<ReadBuffer>
class CompressedReadBufferFromFile final : public CompressedReadBufferBase, public BufferWithOwnMemory<ReadBuffer>
{
private:
/** At any time, one of two things is true:

View File

@ -13,7 +13,7 @@
namespace DB
{
class CompressedWriteBuffer : public BufferWithOwnMemory<WriteBuffer>
class CompressedWriteBuffer final : public BufferWithOwnMemory<WriteBuffer>
{
public:
explicit CompressedWriteBuffer(

View File

@ -87,7 +87,7 @@ private:
Poco::Logger * log;
};
class CompressionCodecDeflateQpl : public ICompressionCodec
class CompressionCodecDeflateQpl final : public ICompressionCodec
{
public:
CompressionCodecDeflateQpl();

View File

@ -44,7 +44,7 @@ enum EncryptionMethod
* as otherwise our engines like ReplicatedMergeTree cannot
* deduplicate data blocks.
*/
class CompressionCodecEncrypted : public ICompressionCodec
class CompressionCodecEncrypted final : public ICompressionCodec
{
public:
/** If a key is available, the server is supposed to

View File

@ -8,7 +8,7 @@
namespace DB
{
class CompressionCodecNone : public ICompressionCodec
class CompressionCodecNone final : public ICompressionCodec
{
public:
CompressionCodecNone();

View File

@ -13,7 +13,7 @@ namespace DB
* Doesn't do any heavy calculations.
* Preserves an order of data.
*/
class ConcatProcessor : public IProcessor
class ConcatProcessor final : public IProcessor
{
public:
ConcatProcessor(const Block & header, size_t num_inputs);

View File

@ -8,7 +8,7 @@ namespace DB
/// Some ports are delayed. Delayed ports are processed after other outputs are all finished.
/// Data between ports is not mixed. It is important because this processor can be used before MergingSortedTransform.
/// Delayed ports are appeared after joins, when some non-matched data need to be processed at the end.
class DelayedPortsProcessor : public IProcessor
class DelayedPortsProcessor final : public IProcessor
{
public:
DelayedPortsProcessor(const Block & header, size_t num_ports, const PortNumbers & delayed_ports, bool assert_main_ports_empty = false);

View File

@ -15,7 +15,7 @@ namespace DB
* Doesn't do any heavy calculations.
* Preserves an order of data.
*/
class ForkProcessor : public IProcessor
class ForkProcessor final : public IProcessor
{
public:
ForkProcessor(const Block & header, size_t num_outputs)

View File

@ -15,7 +15,7 @@ namespace DB
///
/// always_read_till_end - read all data from input ports even if limit was reached.
/// with_ties, description - implementation of LIMIT WITH TIES. It works only for single port.
class LimitTransform : public IProcessor
class LimitTransform final : public IProcessor
{
private:
UInt64 limit;

View File

@ -10,7 +10,7 @@ namespace DB
/// Implementation for OFFSET N (without limit)
/// This processor support multiple inputs and outputs (the same number).
/// Each pair of input and output port works independently.
class OffsetTransform : public IProcessor
class OffsetTransform final : public IProcessor
{
private:
UInt64 offset;

View File

@ -10,7 +10,7 @@ namespace DB
/** Reads all data into queue.
* After all data has been read - output it in the same order.
*/
class QueueBuffer : public IAccumulatingTransform
class QueueBuffer final : public IAccumulatingTransform
{
private:
std::queue<Chunk> chunks;

View File

@ -18,7 +18,7 @@ namespace DB
* - union data from multiple inputs to single output - to serialize data that was processed in parallel.
* - split data from single input to multiple outputs - to allow further parallel processing.
*/
class ResizeProcessor : public IProcessor
class ResizeProcessor final : public IProcessor
{
public:
/// TODO Check that there is non zero number of inputs and outputs.