Merge branch 'master' into more-flexible-drop-cache

This commit is contained in:
Kseniia Sumarokova 2023-07-05 11:43:57 +02:00 committed by GitHub
commit f2d942fbb5
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
212 changed files with 4615 additions and 2137 deletions

View File

@ -121,8 +121,6 @@ jobs:
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
SonarCloud:
# TODO: Remove if: whenever SonarCloud supports c++23
if: ${{ false }}
runs-on: [self-hosted, builder]
env:
SONAR_SCANNER_VERSION: 4.8.0.2856
@ -159,7 +157,7 @@ jobs:
- name: Set Up Build Tools
run: |
sudo apt-get update
sudo apt-get install -yq git cmake ccache ninja-build python3 yasm
sudo apt-get install -yq git cmake ccache ninja-build python3 yasm nasm
sudo bash -c "$(wget -O - https://apt.llvm.org/llvm.sh)"
- name: Run build-wrapper
run: |
@ -178,4 +176,5 @@ jobs:
--define sonar.cfamily.build-wrapper-output="${{ env.BUILD_WRAPPER_OUT_DIR }}" \
--define sonar.projectKey="ClickHouse_ClickHouse" \
--define sonar.organization="clickhouse-java" \
--define sonar.exclusions="**/*.java,**/*.ts,**/*.js,**/*.css,**/*.sql" \
--define sonar.cfamily.cpp23.enabled=true \
--define sonar.exclusions="**/*.java,**/*.ts,**/*.js,**/*.css,**/*.sql"

View File

@ -16,18 +16,19 @@ curl https://clickhouse.com/ | sh
* [YouTube channel](https://www.youtube.com/c/ClickHouseDB) has a lot of content about ClickHouse in video format.
* [Slack](https://clickhouse.com/slack) and [Telegram](https://telegram.me/clickhouse_en) allow chatting with ClickHouse users in real-time.
* [Blog](https://clickhouse.com/blog/) contains various ClickHouse-related articles, as well as announcements and reports about events.
* [Code Browser (Woboq)](https://clickhouse.com/codebrowser/ClickHouse/index.html) with syntax highlight and navigation.
* [Code Browser (github.dev)](https://github.dev/ClickHouse/ClickHouse) with syntax highlight, powered by github.dev.
* [Code Browser (Woboq)](https://clickhouse.com/codebrowser/ClickHouse/index.html) with syntax highlighting and navigation.
* [Code Browser (github.dev)](https://github.dev/ClickHouse/ClickHouse) with syntax highlighting, powered by github.dev.
* [Static Analysis (SonarCloud)](https://sonarcloud.io/project/issues?resolved=false&id=ClickHouse_ClickHouse) proposes C++ quality improvements.
* [Contacts](https://clickhouse.com/company/contact) can help to get your questions answered if there are any.
## Upcoming Events
* [**v23.6 Release Webinar**](https://clickhouse.com/company/events/v23-6-release-call?utm_source=github&utm_medium=social&utm_campaign=release-webinar-2023-06) - Jun 29 - 23.6 is rapidly approaching. Original creator, co-founder, and CTO of ClickHouse Alexey Milovidov will walk us through the highlights of the release.
* [**ClickHouse Meetup in Paris**](https://www.meetup.com/clickhouse-france-user-group/events/294283460) - Jul 4
* [**ClickHouse Meetup in Boston**](https://www.meetup.com/clickhouse-boston-user-group/events/293913596) - Jul 18
* [**ClickHouse Meetup in NYC**](https://www.meetup.com/clickhouse-new-york-user-group/events/293913441) - Jul 19
* [**ClickHouse Meetup in Toronto**](https://www.meetup.com/clickhouse-toronto-user-group/events/294183127) - Jul 20
* [**ClickHouse Meetup in Singapore**](https://www.meetup.com/clickhouse-singapore-meetup-group/events/294428050/) - Jul 27
* [**ClickHouse Meetup in Paris**](https://www.meetup.com/clickhouse-france-user-group/events/294283460) - Sep 12
Also, keep an eye out for upcoming meetups around the world. Somewhere else you want us to be? Please feel free to reach out to tyler <at> clickhouse <dot> com.

View File

@ -4,23 +4,22 @@
#include <cstring>
#include "types.h"
/// Maps 0..15 to 0..9A..F or 0..9a..f correspondingly.
namespace CityHash_v1_0_2 { struct uint128; }
constexpr inline std::string_view hex_digit_to_char_uppercase_table = "0123456789ABCDEF";
constexpr inline std::string_view hex_digit_to_char_lowercase_table = "0123456789abcdef";
constexpr char hexDigitUppercase(unsigned char c)
namespace wide
{
return hex_digit_to_char_uppercase_table[c];
}
constexpr char hexDigitLowercase(unsigned char c)
{
return hex_digit_to_char_lowercase_table[c];
template <size_t Bits, typename Signed>
class integer;
}
/// Maps 0..255 to 00..FF or 00..ff correspondingly
namespace impl
{
/// Maps 0..15 to 0..9A..F or 0..9a..f correspondingly.
constexpr inline std::string_view hex_digit_to_char_uppercase_table = "0123456789ABCDEF";
constexpr inline std::string_view hex_digit_to_char_lowercase_table = "0123456789abcdef";
constexpr inline std::string_view hex_byte_to_char_uppercase_table = //
/// Maps 0..255 to 00..FF or 00..ff correspondingly.
constexpr inline std::string_view hex_byte_to_char_uppercase_table = //
"000102030405060708090A0B0C0D0E0F"
"101112131415161718191A1B1C1D1E1F"
"202122232425262728292A2B2C2D2E2F"
@ -38,7 +37,7 @@ constexpr inline std::string_view hex_byte_to_char_uppercase_table = //
"E0E1E2E3E4E5E6E7E8E9EAEBECEDEEEF"
"F0F1F2F3F4F5F6F7F8F9FAFBFCFDFEFF";
constexpr inline std::string_view hex_byte_to_char_lowercase_table = //
constexpr inline std::string_view hex_byte_to_char_lowercase_table = //
"000102030405060708090a0b0c0d0e0f"
"101112131415161718191a1b1c1d1e1f"
"202122232425262728292a2b2c2d2e2f"
@ -56,17 +55,8 @@ constexpr inline std::string_view hex_byte_to_char_lowercase_table = //
"e0e1e2e3e4e5e6e7e8e9eaebecedeeef"
"f0f1f2f3f4f5f6f7f8f9fafbfcfdfeff";
inline void writeHexByteUppercase(UInt8 byte, void * out)
{
memcpy(out, &hex_byte_to_char_uppercase_table[static_cast<size_t>(byte) * 2], 2);
}
inline void writeHexByteLowercase(UInt8 byte, void * out)
{
memcpy(out, &hex_byte_to_char_lowercase_table[static_cast<size_t>(byte) * 2], 2);
}
constexpr inline std::string_view bin_byte_to_char_table = //
/// Maps 0..255 to 00000000..11111111 correspondingly.
constexpr inline std::string_view bin_byte_to_char_table = //
"0000000000000001000000100000001100000100000001010000011000000111"
"0000100000001001000010100000101100001100000011010000111000001111"
"0001000000010001000100100001001100010100000101010001011000010111"
@ -100,63 +90,8 @@ constexpr inline std::string_view bin_byte_to_char_table = //
"1111000011110001111100101111001111110100111101011111011011110111"
"1111100011111001111110101111101111111100111111011111111011111111";
inline void writeBinByte(UInt8 byte, void * out)
{
memcpy(out, &bin_byte_to_char_table[static_cast<size_t>(byte) * 8], 8);
}
/// Produces hex representation of an unsigned int with leading zeros (for checksums)
template <typename TUInt>
inline void writeHexUIntImpl(TUInt uint_, char * out, std::string_view table)
{
union
{
TUInt value;
UInt8 uint8[sizeof(TUInt)];
};
value = uint_;
for (size_t i = 0; i < sizeof(TUInt); ++i)
{
if constexpr (std::endian::native == std::endian::little)
memcpy(out + i * 2, &table[static_cast<size_t>(uint8[sizeof(TUInt) - 1 - i]) * 2], 2);
else
memcpy(out + i * 2, &table[static_cast<size_t>(uint8[i]) * 2], 2);
}
}
template <typename TUInt>
inline void writeHexUIntUppercase(TUInt uint_, char * out)
{
writeHexUIntImpl(uint_, out, hex_byte_to_char_uppercase_table);
}
template <typename TUInt>
inline void writeHexUIntLowercase(TUInt uint_, char * out)
{
writeHexUIntImpl(uint_, out, hex_byte_to_char_lowercase_table);
}
template <typename TUInt>
std::string getHexUIntUppercase(TUInt uint_)
{
std::string res(sizeof(TUInt) * 2, '\0');
writeHexUIntUppercase(uint_, res.data());
return res;
}
template <typename TUInt>
std::string getHexUIntLowercase(TUInt uint_)
{
std::string res(sizeof(TUInt) * 2, '\0');
writeHexUIntLowercase(uint_, res.data());
return res;
}
/// Maps 0..9, A..F, a..f to 0..15. Other chars are mapped to implementation specific value.
constexpr inline std::string_view hex_char_to_digit_table
/// Maps 0..9, A..F, a..f to 0..15. Other chars are mapped to implementation specific value.
constexpr inline std::string_view hex_char_to_digit_table
= {"\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff"
"\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff"
"\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff"
@ -175,41 +110,182 @@ constexpr inline std::string_view hex_char_to_digit_table
"\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff",
256};
constexpr UInt8 unhex(char c)
{
return hex_char_to_digit_table[static_cast<UInt8>(c)];
}
constexpr UInt8 unhex2(const char * data)
{
return static_cast<UInt8>(unhex(data[0])) * 0x10 + static_cast<UInt8>(unhex(data[1]));
}
constexpr UInt16 unhex4(const char * data)
{
return static_cast<UInt16>(unhex(data[0])) * 0x1000 + static_cast<UInt16>(unhex(data[1])) * 0x100
+ static_cast<UInt16>(unhex(data[2])) * 0x10 + static_cast<UInt16>(unhex(data[3]));
}
template <typename TUInt>
constexpr TUInt unhexUInt(const char * data)
{
TUInt res = 0;
if constexpr ((sizeof(TUInt) <= 8) || ((sizeof(TUInt) % 8) != 0))
/// Converts a hex digit '0'..'f' or '0'..'F' to its value 0..15.
constexpr UInt8 unhexDigit(char c)
{
return hex_char_to_digit_table[static_cast<UInt8>(c)];
}
/// Converts an unsigned integer in the native endian to hexadecimal representation and back. Used as a base class for HexConversion<T>.
template <typename TUInt, typename = void>
struct HexConversionUInt
{
static const constexpr size_t num_hex_digits = sizeof(TUInt) * 2;
static void hex(TUInt uint_, char * out, std::string_view table)
{
union
{
TUInt value;
UInt8 uint8[sizeof(TUInt)];
};
value = uint_;
for (size_t i = 0; i < sizeof(TUInt); ++i)
{
if constexpr (std::endian::native == std::endian::little)
memcpy(out + i * 2, &table[static_cast<size_t>(uint8[sizeof(TUInt) - 1 - i]) * 2], 2);
else
memcpy(out + i * 2, &table[static_cast<size_t>(uint8[i]) * 2], 2);
}
}
static TUInt unhex(const char * data)
{
TUInt res;
if constexpr (sizeof(TUInt) == 1)
{
res = static_cast<UInt8>(unhexDigit(data[0])) * 0x10 + static_cast<UInt8>(unhexDigit(data[1]));
}
else if constexpr (sizeof(TUInt) == 2)
{
res = static_cast<UInt16>(unhexDigit(data[0])) * 0x1000 + static_cast<UInt16>(unhexDigit(data[1])) * 0x100
+ static_cast<UInt16>(unhexDigit(data[2])) * 0x10 + static_cast<UInt16>(unhexDigit(data[3]));
}
else if constexpr ((sizeof(TUInt) <= 8) || ((sizeof(TUInt) % 8) != 0))
{
res = 0;
for (size_t i = 0; i < sizeof(TUInt) * 2; ++i, ++data)
{
res <<= 4;
res += unhex(*data);
res += unhexDigit(*data);
}
}
else
{
res = 0;
for (size_t i = 0; i < sizeof(TUInt) / 8; ++i, data += 16)
{
res <<= 64;
res += unhexUInt<UInt64>(data);
res += HexConversionUInt<UInt64>::unhex(data);
}
}
return res;
}
};
/// Helper template class to convert a value of any supported type to hexadecimal representation and back.
template <typename T, typename SFINAE = void>
struct HexConversion;
template <typename TUInt>
struct HexConversion<TUInt, std::enable_if_t<std::is_integral_v<TUInt>>> : public HexConversionUInt<TUInt> {};
template <size_t Bits, typename Signed>
struct HexConversion<wide::integer<Bits, Signed>> : public HexConversionUInt<wide::integer<Bits, Signed>> {};
template <typename CityHashUInt128> /// Partial specialization here allows not to include <city.h> in this header.
struct HexConversion<CityHashUInt128, std::enable_if_t<std::is_same_v<CityHashUInt128, typename CityHash_v1_0_2::uint128>>>
{
static const constexpr size_t num_hex_digits = 32;
static void hex(const CityHashUInt128 & uint_, char * out, std::string_view table)
{
HexConversion<UInt64>::hex(uint_.high64, out, table);
HexConversion<UInt64>::hex(uint_.low64, out + 16, table);
}
static CityHashUInt128 unhex(const char * data)
{
CityHashUInt128 res;
res.high64 = HexConversion<UInt64>::unhex(data);
res.low64 = HexConversion<UInt64>::unhex(data + 16);
return res;
}
};
}
/// Produces a hexadecimal representation of an integer value with leading zeros (for checksums).
/// The function supports native integer types, wide::integer, CityHash_v1_0_2::uint128.
/// It can be used with signed types as well, however they are written as corresponding unsigned numbers
/// using two's complement (i.e. for example "-1" is written as "0xFF", not as "-0x01").
template <typename T>
void writeHexUIntUppercase(const T & value, char * out)
{
impl::HexConversion<T>::hex(value, out, impl::hex_byte_to_char_uppercase_table);
}
template <typename T>
void writeHexUIntLowercase(const T & value, char * out)
{
impl::HexConversion<T>::hex(value, out, impl::hex_byte_to_char_lowercase_table);
}
template <typename T>
std::string getHexUIntUppercase(const T & value)
{
std::string res(impl::HexConversion<T>::num_hex_digits, '\0');
writeHexUIntUppercase(value, res.data());
return res;
}
template <typename T>
std::string getHexUIntLowercase(const T & value)
{
std::string res(impl::HexConversion<T>::num_hex_digits, '\0');
writeHexUIntLowercase(value, res.data());
return res;
}
constexpr char hexDigitUppercase(unsigned char c)
{
return impl::hex_digit_to_char_uppercase_table[c];
}
constexpr char hexDigitLowercase(unsigned char c)
{
return impl::hex_digit_to_char_lowercase_table[c];
}
inline void writeHexByteUppercase(UInt8 byte, void * out)
{
memcpy(out, &impl::hex_byte_to_char_uppercase_table[static_cast<size_t>(byte) * 2], 2);
}
inline void writeHexByteLowercase(UInt8 byte, void * out)
{
memcpy(out, &impl::hex_byte_to_char_lowercase_table[static_cast<size_t>(byte) * 2], 2);
}
/// Converts a hex representation with leading zeros back to an integer value.
/// The function supports native integer types, wide::integer, CityHash_v1_0_2::uint128.
template <typename T>
constexpr T unhexUInt(const char * data)
{
return impl::HexConversion<T>::unhex(data);
}
/// Converts a hexadecimal digit '0'..'f' or '0'..'F' to UInt8.
constexpr UInt8 unhex(char c)
{
return impl::unhexDigit(c);
}
/// Converts two hexadecimal digits to UInt8.
constexpr UInt8 unhex2(const char * data)
{
return unhexUInt<UInt8>(data);
}
/// Converts four hexadecimal digits to UInt16.
constexpr UInt16 unhex4(const char * data)
{
return unhexUInt<UInt16>(data);
}
/// Produces a binary representation of a single byte.
inline void writeBinByte(UInt8 byte, void * out)
{
memcpy(out, &impl::bin_byte_to_char_table[static_cast<size_t>(byte) * 8], 8);
}

View File

@ -87,7 +87,6 @@ set (SRCS
src/LoggingRegistry.cpp
src/LogStream.cpp
src/MD5Engine.cpp
src/MemoryPool.cpp
src/MemoryStream.cpp
src/Message.cpp
src/Mutex.cpp

View File

@ -1,116 +0,0 @@
//
// MemoryPool.h
//
// Library: Foundation
// Package: Core
// Module: MemoryPool
//
// Definition of the MemoryPool class.
//
// Copyright (c) 2005-2006, Applied Informatics Software Engineering GmbH.
// and Contributors.
//
// SPDX-License-Identifier: BSL-1.0
//
#ifndef Foundation_MemoryPool_INCLUDED
#define Foundation_MemoryPool_INCLUDED
#include <cstddef>
#include <vector>
#include "Poco/Foundation.h"
#include "Poco/Mutex.h"
namespace Poco
{
class Foundation_API MemoryPool
/// A simple pool for fixed-size memory blocks.
///
/// The main purpose of this class is to speed-up
/// memory allocations, as well as to reduce memory
/// fragmentation in situations where the same blocks
/// are allocated all over again, such as in server
/// applications.
///
/// All allocated blocks are retained for future use.
/// A limit on the number of blocks can be specified.
/// Blocks can be preallocated.
{
public:
MemoryPool(std::size_t blockSize, int preAlloc = 0, int maxAlloc = 0);
/// Creates a MemoryPool for blocks with the given blockSize.
/// The number of blocks given in preAlloc are preallocated.
~MemoryPool();
void * get();
/// Returns a memory block. If there are no more blocks
/// in the pool, a new block will be allocated.
///
/// If maxAlloc blocks are already allocated, an
/// OutOfMemoryException is thrown.
void release(void * ptr);
/// Releases a memory block and returns it to the pool.
std::size_t blockSize() const;
/// Returns the block size.
int allocated() const;
/// Returns the number of allocated blocks.
int available() const;
/// Returns the number of available blocks in the pool.
private:
MemoryPool();
MemoryPool(const MemoryPool &);
MemoryPool & operator=(const MemoryPool &);
void clear();
enum
{
BLOCK_RESERVE = 128
};
typedef std::vector<char *> BlockVec;
std::size_t _blockSize;
int _maxAlloc;
int _allocated;
BlockVec _blocks;
FastMutex _mutex;
};
//
// inlines
//
inline std::size_t MemoryPool::blockSize() const
{
return _blockSize;
}
inline int MemoryPool::allocated() const
{
return _allocated;
}
inline int MemoryPool::available() const
{
return (int)_blocks.size();
}
} // namespace Poco
#endif // Foundation_MemoryPool_INCLUDED

View File

@ -1,105 +0,0 @@
//
// MemoryPool.cpp
//
// Library: Foundation
// Package: Core
// Module: MemoryPool
//
// Copyright (c) 2005-2006, Applied Informatics Software Engineering GmbH.
// and Contributors.
//
// SPDX-License-Identifier: BSL-1.0
//
#include "Poco/MemoryPool.h"
#include "Poco/Exception.h"
namespace Poco {
MemoryPool::MemoryPool(std::size_t blockSize, int preAlloc, int maxAlloc):
_blockSize(blockSize),
_maxAlloc(maxAlloc),
_allocated(preAlloc)
{
poco_assert (maxAlloc == 0 || maxAlloc >= preAlloc);
poco_assert (preAlloc >= 0 && maxAlloc >= 0);
int r = BLOCK_RESERVE;
if (preAlloc > r)
r = preAlloc;
if (maxAlloc > 0 && maxAlloc < r)
r = maxAlloc;
_blocks.reserve(r);
try
{
for (int i = 0; i < preAlloc; ++i)
{
_blocks.push_back(new char[_blockSize]);
}
}
catch (...)
{
clear();
throw;
}
}
MemoryPool::~MemoryPool()
{
clear();
}
void MemoryPool::clear()
{
for (BlockVec::iterator it = _blocks.begin(); it != _blocks.end(); ++it)
{
delete [] *it;
}
_blocks.clear();
}
void* MemoryPool::get()
{
FastMutex::ScopedLock lock(_mutex);
if (_blocks.empty())
{
if (_maxAlloc == 0 || _allocated < _maxAlloc)
{
++_allocated;
return new char[_blockSize];
}
else throw OutOfMemoryException("MemoryPool exhausted");
}
else
{
char* ptr = _blocks.back();
_blocks.pop_back();
return ptr;
}
}
void MemoryPool::release(void* ptr)
{
FastMutex::ScopedLock lock(_mutex);
try
{
_blocks.push_back(reinterpret_cast<char*>(ptr));
}
catch (...)
{
delete [] reinterpret_cast<char*>(ptr);
}
}
} // namespace Poco

View File

@ -19,7 +19,6 @@
#include "Poco/BufferedStreamBuf.h"
#include "Poco/Net/HTTPBufferAllocator.h"
#include "Poco/Net/Net.h"
@ -27,9 +26,9 @@ namespace Poco
{
namespace Net
{
constexpr size_t HTTP_DEFAULT_BUFFER_SIZE = 8 * 1024;
typedef Poco::BasicBufferedStreamBuf<char, std::char_traits<char>, HTTPBufferAllocator> HTTPBasicStreamBuf;
typedef Poco::BasicBufferedStreamBuf<char, std::char_traits<char>> HTTPBasicStreamBuf;
}

View File

@ -1,53 +0,0 @@
//
// HTTPBufferAllocator.h
//
// Library: Net
// Package: HTTP
// Module: HTTPBufferAllocator
//
// Definition of the HTTPBufferAllocator class.
//
// Copyright (c) 2005-2006, Applied Informatics Software Engineering GmbH.
// and Contributors.
//
// SPDX-License-Identifier: BSL-1.0
//
#ifndef Net_HTTPBufferAllocator_INCLUDED
#define Net_HTTPBufferAllocator_INCLUDED
#include <ios>
#include "Poco/MemoryPool.h"
#include "Poco/Net/Net.h"
namespace Poco
{
namespace Net
{
class Net_API HTTPBufferAllocator
/// A BufferAllocator for HTTP streams.
{
public:
static char * allocate(std::streamsize size);
static void deallocate(char * ptr, std::streamsize size);
enum
{
BUFFER_SIZE = 128 * 1024
};
private:
static Poco::MemoryPool _pool;
};
}
} // namespace Poco::Net
#endif // Net_HTTPBufferAllocator_INCLUDED

View File

@ -21,7 +21,6 @@
#include <cstddef>
#include <istream>
#include <ostream>
#include "Poco/MemoryPool.h"
#include "Poco/Net/HTTPBasicStreamBuf.h"
#include "Poco/Net/Net.h"
@ -80,12 +79,6 @@ namespace Net
public:
HTTPChunkedInputStream(HTTPSession & session);
~HTTPChunkedInputStream();
void * operator new(std::size_t size);
void operator delete(void * ptr);
private:
static Poco::MemoryPool _pool;
};
@ -95,12 +88,6 @@ namespace Net
public:
HTTPChunkedOutputStream(HTTPSession & session);
~HTTPChunkedOutputStream();
void * operator new(std::size_t size);
void operator delete(void * ptr);
private:
static Poco::MemoryPool _pool;
};

View File

@ -78,12 +78,6 @@ namespace Net
public:
HTTPFixedLengthInputStream(HTTPSession & session, HTTPFixedLengthStreamBuf::ContentLength length);
~HTTPFixedLengthInputStream();
void * operator new(std::size_t size);
void operator delete(void * ptr);
private:
static Poco::MemoryPool _pool;
};
@ -93,12 +87,6 @@ namespace Net
public:
HTTPFixedLengthOutputStream(HTTPSession & session, HTTPFixedLengthStreamBuf::ContentLength length);
~HTTPFixedLengthOutputStream();
void * operator new(std::size_t size);
void operator delete(void * ptr);
private:
static Poco::MemoryPool _pool;
};

View File

@ -21,7 +21,6 @@
#include <cstddef>
#include <istream>
#include <ostream>
#include "Poco/MemoryPool.h"
#include "Poco/Net/HTTPBasicStreamBuf.h"
#include "Poco/Net/Net.h"
@ -74,12 +73,6 @@ namespace Net
public:
HTTPHeaderInputStream(HTTPSession & session);
~HTTPHeaderInputStream();
void * operator new(std::size_t size);
void operator delete(void * ptr);
private:
static Poco::MemoryPool _pool;
};
@ -89,12 +82,6 @@ namespace Net
public:
HTTPHeaderOutputStream(HTTPSession & session);
~HTTPHeaderOutputStream();
void * operator new(std::size_t size);
void operator delete(void * ptr);
private:
static Poco::MemoryPool _pool;
};

View File

@ -192,7 +192,7 @@ namespace Net
HTTPSession & operator=(const HTTPSession &);
StreamSocket _socket;
char * _pBuffer;
std::unique_ptr<char[]> _pBuffer;
char * _pCurrent;
char * _pEnd;
bool _keepAlive;

View File

@ -21,7 +21,6 @@
#include <cstddef>
#include <istream>
#include <ostream>
#include "Poco/MemoryPool.h"
#include "Poco/Net/HTTPBasicStreamBuf.h"
#include "Poco/Net/Net.h"
@ -75,12 +74,6 @@ namespace Net
public:
HTTPInputStream(HTTPSession & session);
~HTTPInputStream();
void * operator new(std::size_t size);
void operator delete(void * ptr);
private:
static Poco::MemoryPool _pool;
};
@ -90,12 +83,6 @@ namespace Net
public:
HTTPOutputStream(HTTPSession & session);
~HTTPOutputStream();
void * operator new(std::size_t size);
void operator delete(void * ptr);
private:
static Poco::MemoryPool _pool;
};

View File

@ -1,44 +0,0 @@
//
// HTTPBufferAllocator.cpp
//
// Library: Net
// Package: HTTP
// Module: HTTPBufferAllocator
//
// Copyright (c) 2005-2006, Applied Informatics Software Engineering GmbH.
// and Contributors.
//
// SPDX-License-Identifier: BSL-1.0
//
#include "Poco/Net/HTTPBufferAllocator.h"
using Poco::MemoryPool;
namespace Poco {
namespace Net {
MemoryPool HTTPBufferAllocator::_pool(HTTPBufferAllocator::BUFFER_SIZE, 16);
char* HTTPBufferAllocator::allocate(std::streamsize size)
{
poco_assert_dbg (size == BUFFER_SIZE);
return reinterpret_cast<char*>(_pool.get());
}
void HTTPBufferAllocator::deallocate(char* ptr, std::streamsize size)
{
poco_assert_dbg (size == BUFFER_SIZE);
_pool.release(ptr);
}
} } // namespace Poco::Net

View File

@ -34,7 +34,7 @@ namespace Net {
HTTPChunkedStreamBuf::HTTPChunkedStreamBuf(HTTPSession& session, openmode mode):
HTTPBasicStreamBuf(HTTPBufferAllocator::BUFFER_SIZE, mode),
HTTPBasicStreamBuf(HTTP_DEFAULT_BUFFER_SIZE, mode),
_session(session),
_mode(mode),
_chunk(0)
@ -181,10 +181,6 @@ HTTPChunkedStreamBuf* HTTPChunkedIOS::rdbuf()
// HTTPChunkedInputStream
//
Poco::MemoryPool HTTPChunkedInputStream::_pool(sizeof(HTTPChunkedInputStream));
HTTPChunkedInputStream::HTTPChunkedInputStream(HTTPSession& session):
HTTPChunkedIOS(session, std::ios::in),
std::istream(&_buf)
@ -196,34 +192,10 @@ HTTPChunkedInputStream::~HTTPChunkedInputStream()
{
}
void* HTTPChunkedInputStream::operator new(std::size_t size)
{
return _pool.get();
}
void HTTPChunkedInputStream::operator delete(void* ptr)
{
try
{
_pool.release(ptr);
}
catch (...)
{
poco_unexpected();
}
}
//
// HTTPChunkedOutputStream
//
Poco::MemoryPool HTTPChunkedOutputStream::_pool(sizeof(HTTPChunkedOutputStream));
HTTPChunkedOutputStream::HTTPChunkedOutputStream(HTTPSession& session):
HTTPChunkedIOS(session, std::ios::out),
std::ostream(&_buf)
@ -235,24 +207,4 @@ HTTPChunkedOutputStream::~HTTPChunkedOutputStream()
{
}
void* HTTPChunkedOutputStream::operator new(std::size_t size)
{
return _pool.get();
}
void HTTPChunkedOutputStream::operator delete(void* ptr)
{
try
{
_pool.release(ptr);
}
catch (...)
{
poco_unexpected();
}
}
} } // namespace Poco::Net

View File

@ -30,7 +30,7 @@ namespace Net {
HTTPFixedLengthStreamBuf::HTTPFixedLengthStreamBuf(HTTPSession& session, ContentLength length, openmode mode):
HTTPBasicStreamBuf(HTTPBufferAllocator::BUFFER_SIZE, mode),
HTTPBasicStreamBuf(HTTP_DEFAULT_BUFFER_SIZE, mode),
_session(session),
_length(length),
_count(0)
@ -109,9 +109,6 @@ HTTPFixedLengthStreamBuf* HTTPFixedLengthIOS::rdbuf()
//
Poco::MemoryPool HTTPFixedLengthInputStream::_pool(sizeof(HTTPFixedLengthInputStream));
HTTPFixedLengthInputStream::HTTPFixedLengthInputStream(HTTPSession& session, HTTPFixedLengthStreamBuf::ContentLength length):
HTTPFixedLengthIOS(session, length, std::ios::in),
std::istream(&_buf)
@ -124,33 +121,10 @@ HTTPFixedLengthInputStream::~HTTPFixedLengthInputStream()
}
void* HTTPFixedLengthInputStream::operator new(std::size_t size)
{
return _pool.get();
}
void HTTPFixedLengthInputStream::operator delete(void* ptr)
{
try
{
_pool.release(ptr);
}
catch (...)
{
poco_unexpected();
}
}
//
// HTTPFixedLengthOutputStream
//
Poco::MemoryPool HTTPFixedLengthOutputStream::_pool(sizeof(HTTPFixedLengthOutputStream));
HTTPFixedLengthOutputStream::HTTPFixedLengthOutputStream(HTTPSession& session, HTTPFixedLengthStreamBuf::ContentLength length):
HTTPFixedLengthIOS(session, length, std::ios::out),
std::ostream(&_buf)
@ -163,23 +137,4 @@ HTTPFixedLengthOutputStream::~HTTPFixedLengthOutputStream()
}
void* HTTPFixedLengthOutputStream::operator new(std::size_t size)
{
return _pool.get();
}
void HTTPFixedLengthOutputStream::operator delete(void* ptr)
{
try
{
_pool.release(ptr);
}
catch (...)
{
poco_unexpected();
}
}
} } // namespace Poco::Net

View File

@ -26,7 +26,7 @@ namespace Net {
HTTPHeaderStreamBuf::HTTPHeaderStreamBuf(HTTPSession& session, openmode mode):
HTTPBasicStreamBuf(HTTPBufferAllocator::BUFFER_SIZE, mode),
HTTPBasicStreamBuf(HTTP_DEFAULT_BUFFER_SIZE, mode),
_session(session),
_end(false)
{
@ -101,10 +101,6 @@ HTTPHeaderStreamBuf* HTTPHeaderIOS::rdbuf()
// HTTPHeaderInputStream
//
Poco::MemoryPool HTTPHeaderInputStream::_pool(sizeof(HTTPHeaderInputStream));
HTTPHeaderInputStream::HTTPHeaderInputStream(HTTPSession& session):
HTTPHeaderIOS(session, std::ios::in),
std::istream(&_buf)
@ -116,34 +112,10 @@ HTTPHeaderInputStream::~HTTPHeaderInputStream()
{
}
void* HTTPHeaderInputStream::operator new(std::size_t size)
{
return _pool.get();
}
void HTTPHeaderInputStream::operator delete(void* ptr)
{
try
{
_pool.release(ptr);
}
catch (...)
{
poco_unexpected();
}
}
//
// HTTPHeaderOutputStream
//
Poco::MemoryPool HTTPHeaderOutputStream::_pool(sizeof(HTTPHeaderOutputStream));
HTTPHeaderOutputStream::HTTPHeaderOutputStream(HTTPSession& session):
HTTPHeaderIOS(session, std::ios::out),
std::ostream(&_buf)
@ -155,24 +127,4 @@ HTTPHeaderOutputStream::~HTTPHeaderOutputStream()
{
}
void* HTTPHeaderOutputStream::operator new(std::size_t size)
{
return _pool.get();
}
void HTTPHeaderOutputStream::operator delete(void* ptr)
{
try
{
_pool.release(ptr);
}
catch (...)
{
poco_unexpected();
}
}
} } // namespace Poco::Net

View File

@ -13,8 +13,8 @@
#include "Poco/Net/HTTPSession.h"
#include "Poco/Net/HTTPBufferAllocator.h"
#include "Poco/Net/NetException.h"
#include "Poco/Net/HTTPBasicStreamBuf.h"
#include <cstring>
@ -68,14 +68,6 @@ HTTPSession::HTTPSession(const StreamSocket& socket, bool keepAlive):
HTTPSession::~HTTPSession()
{
try
{
if (_pBuffer) HTTPBufferAllocator::deallocate(_pBuffer, HTTPBufferAllocator::BUFFER_SIZE);
}
catch (...)
{
poco_unexpected();
}
try
{
close();
@ -177,10 +169,10 @@ void HTTPSession::refill()
{
if (!_pBuffer)
{
_pBuffer = HTTPBufferAllocator::allocate(HTTPBufferAllocator::BUFFER_SIZE);
_pBuffer = std::make_unique<char[]>(HTTP_DEFAULT_BUFFER_SIZE);
}
_pCurrent = _pEnd = _pBuffer;
int n = receive(_pBuffer, HTTPBufferAllocator::BUFFER_SIZE);
_pCurrent = _pEnd = _pBuffer.get();
int n = receive(_pBuffer.get(), HTTP_DEFAULT_BUFFER_SIZE);
_pEnd += n;
}
@ -199,7 +191,7 @@ void HTTPSession::connect(const SocketAddress& address)
_socket.setNoDelay(true);
// There may be leftover data from a previous (failed) request in the buffer,
// so we clear it.
_pCurrent = _pEnd = _pBuffer;
_pCurrent = _pEnd = _pBuffer.get();
}

View File

@ -26,7 +26,7 @@ namespace Net {
HTTPStreamBuf::HTTPStreamBuf(HTTPSession& session, openmode mode):
HTTPBasicStreamBuf(HTTPBufferAllocator::BUFFER_SIZE, mode),
HTTPBasicStreamBuf(HTTP_DEFAULT_BUFFER_SIZE, mode),
_session(session),
_mode(mode)
{
@ -96,10 +96,6 @@ HTTPStreamBuf* HTTPIOS::rdbuf()
// HTTPInputStream
//
Poco::MemoryPool HTTPInputStream::_pool(sizeof(HTTPInputStream));
HTTPInputStream::HTTPInputStream(HTTPSession& session):
HTTPIOS(session, std::ios::in),
std::istream(&_buf)
@ -112,33 +108,11 @@ HTTPInputStream::~HTTPInputStream()
}
void* HTTPInputStream::operator new(std::size_t size)
{
return _pool.get();
}
void HTTPInputStream::operator delete(void* ptr)
{
try
{
_pool.release(ptr);
}
catch (...)
{
poco_unexpected();
}
}
//
// HTTPOutputStream
//
Poco::MemoryPool HTTPOutputStream::_pool(sizeof(HTTPOutputStream));
HTTPOutputStream::HTTPOutputStream(HTTPSession& session):
HTTPIOS(session, std::ios::out),
std::ostream(&_buf)
@ -150,24 +124,4 @@ HTTPOutputStream::~HTTPOutputStream()
{
}
void* HTTPOutputStream::operator new(std::size_t size)
{
return _pool.get();
}
void HTTPOutputStream::operator delete(void* ptr)
{
try
{
_pool.release(ptr);
}
catch (...)
{
poco_unexpected();
}
}
} } // namespace Poco::Net

2
contrib/libhdfs3 vendored

@ -1 +1 @@
Subproject commit 164b89253fad7991bce77882f01b51ab81d19f3d
Subproject commit 377220ef351ae24994a5fcd2b5fa3930d00c4db0

View File

@ -120,11 +120,12 @@
"docker/test/base": {
"name": "clickhouse/test-base",
"dependent": [
"docker/test/stateless",
"docker/test/integration/base",
"docker/test/fuzzer",
"docker/test/integration/base",
"docker/test/keeper-jepsen",
"docker/test/server-jepsen"
"docker/test/server-jepsen",
"docker/test/sqllogic",
"docker/test/stateless"
]
},
"docker/test/integration/kerberized_hadoop": {

View File

@ -13,6 +13,7 @@ RUN apt-get update --yes \
sqlite3 \
unixodbc \
unixodbc-dev \
odbcinst \
sudo \
&& apt-get clean

View File

@ -19,7 +19,7 @@ ln -s /usr/share/clickhouse-test/clickhouse-test /usr/bin/clickhouse-test
source /usr/share/clickhouse-test/ci/attach_gdb.lib || true # FIXME: to not break old builds, clean on 2023-09-01
# shellcheck disable=SC1091
source /usr/share/clickhouse-test/ci/utils.lib
source /usr/share/clickhouse-test/ci/utils.lib || true # FIXME: to not break old builds, clean on 2023-09-01
# install test configs
/usr/share/clickhouse-test/config/install.sh
@ -93,6 +93,22 @@ sleep 5
attach_gdb_to_clickhouse || true # FIXME: to not break old builds, clean on 2023-09-01
function fn_exists() {
declare -F "$1" > /dev/null;
}
# FIXME: to not break old builds, clean on 2023-09-01
function try_run_with_retry() {
local total_retries="$1"
shift
if fn_exists run_with_retry; then
run_with_retry "$total_retries" "$@"
else
"$@"
fi
}
function run_tests()
{
set -x
@ -140,7 +156,7 @@ function run_tests()
ADDITIONAL_OPTIONS+=('--report-logs-stats')
run_with_retry 10 clickhouse-client -q "insert into system.zookeeper (name, path, value) values ('auxiliary_zookeeper2', '/test/chroot/', '')"
try_run_with_retry 10 clickhouse-client -q "insert into system.zookeeper (name, path, value) values ('auxiliary_zookeeper2', '/test/chroot/', '')"
set +e
clickhouse-test --testname --shard --zookeeper --check-zookeeper-session --hung-check --print-time \

View File

@ -18,7 +18,7 @@ RUN apt-get update && env DEBIAN_FRONTEND=noninteractive apt-get install --yes \
python3-pip \
shellcheck \
yamllint \
&& pip3 install black==23.1.0 boto3 codespell==2.2.1 dohq-artifactory mypy PyGithub unidiff pylint==2.6.2 \
&& pip3 install black==23.1.0 boto3 codespell==2.2.1 mypy==1.3.0 PyGithub unidiff pylint==2.6.2 \
&& apt-get clean \
&& rm -rf /root/.cache/pip

View File

@ -189,6 +189,7 @@ rg -Fav -e "Code: 236. DB::Exception: Cancelled merging parts" \
-e "Authentication failed" \
-e "Cannot flush" \
-e "Container already exists" \
-e "doesn't have metadata version on disk" \
clickhouse-server.upgrade.log \
| grep -av -e "_repl_01111_.*Mapping for table with UUID" \
| zgrep -Fa "<Error>" > /test_output/upgrade_error_messages.txt \

View File

@ -0,0 +1,29 @@
---
toc_priority:
toc_title:
---
# data_type_name {#data_type-name}
Description.
**Parameters** (Optional)
- `x` — Description. [Type name](relative/path/to/type/dscr.md#type).
- `y` — Description. [Type name](relative/path/to/type/dscr.md#type).
**Examples**
```sql
```
## Additional Info {#additional-info} (Optional)
The name of an additional section can be any, for example, **Usage**.
**See Also** (Optional)
- [link](#)
[Original article](https://clickhouse.com/docs/en/data-types/<data-type-name>/) <!--hide-->

View File

@ -0,0 +1,63 @@
# EngineName {#enginename}
- What the Database/Table engine does.
- Relations with other engines if they exist.
## Creating a Database {#creating-a-database}
``` sql
CREATE DATABASE ...
```
or
## Creating a Table {#creating-a-table}
``` sql
CREATE TABLE ...
```
**Engine Parameters**
**Query Clauses** (for Table engines only)
## Virtual columns {#virtual-columns} (for Table engines only)
List and virtual columns with description, if they exist.
## Data Types Support {#data_types-support} (for Database engines only)
| EngineName | ClickHouse |
|-----------------------|------------------------------------|
| NativeDataTypeName | [ClickHouseDataTypeName](link#) |
## Specifics and recommendations {#specifics-and-recommendations}
Algorithms
Specifics of read and write processes
Examples of tasks
Recommendations for usage
Specifics of data storage
## Usage Example {#usage-example}
The example must show usage and use cases. The following text contains the recommended parts of this section.
Input table:
``` text
```
Query:
``` sql
```
Result:
``` text
```
Follow up with any text to clarify the example.
**See Also**
- [link](#)

View File

@ -0,0 +1,51 @@
## functionName {#functionname-in-lower-case}
Short description.
**Syntax** (without SELECT)
``` sql
<function syntax>
```
Alias: `<alias name>`. (Optional)
More text (Optional).
**Arguments** (Optional)
- `x` — Description. Optional (only for optional arguments). Possible values: <values list>. Default value: <value>. [Type name](relative/path/to/type/dscr.md#type).
- `y` — Description. Optional (only for optional arguments). Possible values: <values list>.Default value: <value>. [Type name](relative/path/to/type/dscr.md#type).
**Parameters** (Optional, only for parametric aggregate functions)
- `z` — Description. Optional (only for optional parameters). Possible values: <values list>. Default value: <value>. [Type name](relative/path/to/type/dscr.md#type).
**Returned value(s)**
- Returned values list.
Type: [Type name](relative/path/to/type/dscr.md#type).
**Example**
The example must show usage and/or a use cases. The following text contains recommended parts of an example.
Input table (Optional):
``` text
```
Query:
``` sql
```
Result:
``` text
```
**See Also** (Optional)
- [link](#)

View File

@ -0,0 +1,33 @@
## server_setting_name {#server_setting_name}
Description.
Describe what is configured in this section of settings.
Possible value: ...
Default value: ...
**Settings** (Optional)
If the section contains several settings, list them here. Specify possible values and default values:
- setting_1 — Description.
- setting_2 — Description.
**Example**
```xml
<server_setting_name>
<setting_1> ... </setting_1>
<setting_2> ... </setting_2>
</server_setting_name>
```
**Additional Info** (Optional)
The name of an additional section can be any, for example, **Usage**.
**See Also** (Optional)
- [link](#)

View File

@ -0,0 +1,27 @@
## setting_name {#setting_name}
Description.
For the switch setting, use the typical phrase: “Enables or disables something …”.
Possible values:
*For switcher setting:*
- 0 — Disabled.
- 1 — Enabled.
*For another setting (typical phrases):*
- Positive integer.
- 0 — Disabled or unlimited or something else.
Default value: `value`.
**Additional Info** (Optional)
The name of an additional section can be any, for example, **Usage**.
**See Also** (Optional)
- [link](#)

View File

@ -0,0 +1,24 @@
# Statement name (for example, SHOW USER) {#statement-name-in-lower-case}
Brief description of what the statement does.
**Syntax**
```sql
Syntax of the statement.
```
## Other necessary sections of the description (Optional) {#anchor}
Examples of descriptions with a complicated structure:
- https://clickhouse.com/docs/en/sql-reference/statements/grant/
- https://clickhouse.com/docs/en/sql-reference/statements/revoke/
- https://clickhouse.com/docs/en/sql-reference/statements/select/join/
**See Also** (Optional)
Links to related topics as a list.
- [link](#)

View File

@ -0,0 +1,25 @@
# system.table_name {#system-tables_table-name}
Description.
Columns:
- `column_name` ([data_type_name](path/to/data_type.md)) — Description.
**Example**
Query:
``` sql
SELECT * FROM system.table_name
```
Result:
``` text
Some output. It shouldn't be too long.
```
**See Also**
- [Article name](path/to/article_name.md) — Some words about referenced information.

View File

@ -378,6 +378,10 @@ request](https://github.com/ClickHouse/ClickHouse/commits/master) and find CI ch
https://s3.amazonaws.com/clickhouse/builds/PRs/.../.../binary_aarch64_v80compat/clickhouse". You can then click the link to download the
build.
### macOS-only: Install with Homebrew
To install ClickHouse using the popular `brew` package manager, follow the instructions listed in the [ClickHouse Homebrew tap](https://github.com/ClickHouse/homebrew-clickhouse).
## Launch {#launch}
To start the server as a daemon, run:

View File

@ -13,6 +13,7 @@ System tables provide information about:
- Server states, processes, and environment.
- Servers internal processes.
- Options used when the ClickHouse binary was built.
System tables:

View File

@ -283,7 +283,7 @@ The optional keyword `EXTENDED` currently has no effect, it only exists for MySQ
`SHOW INDEX` produces a result table with the following structure:
- table - The name of the table (String)
- non_unique - 0 if the index can contain duplicates, 1 otherwise (UInt8)
- non_unique - 0 if the index cannot contain duplicates, 1 otherwise (UInt8)
- key_name - The name of the index, `PRIMARY` if the index is a primary key index (String)
- seq_in_index - Currently unused
- column_name - Currently unused

View File

@ -790,7 +790,7 @@ bool Client::processWithFuzzing(const String & full_query)
WriteBufferFromOStream cerr_buf(std::cerr, 4096);
fuzz_base->dumpTree(cerr_buf);
cerr_buf.next();
cerr_buf.finalize();
fmt::print(
stderr,
@ -928,7 +928,7 @@ bool Client::processWithFuzzing(const String & full_query)
std::cout << std::endl;
WriteBufferFromOStream ast_buf(std::cout, 4096);
formatAST(*query, ast_buf, false /*highlight*/);
ast_buf.next();
ast_buf.finalize();
if (const auto * insert = query->as<ASTInsertQuery>())
{
/// For inserts with data it's really useful to have the data itself available in the logs, as formatAST doesn't print it

View File

@ -151,6 +151,7 @@ int mainEntryClickHouseFormat(int argc, char ** argv)
WriteBufferFromFileDescriptor out(STDOUT_FILENO);
obfuscateQueries(query, out, obfuscated_words_map, used_nouns, hash_func, is_known_identifier);
out.finalize();
}
else
{
@ -175,7 +176,7 @@ int mainEntryClickHouseFormat(int argc, char ** argv)
{
WriteBufferFromOStream res_buf(std::cout, 4096);
formatAST(*res, res_buf, hilite, oneline);
res_buf.next();
res_buf.finalize();
if (multiple)
std::cout << "\n;\n";
std::cout << std::endl;
@ -199,7 +200,7 @@ int mainEntryClickHouseFormat(int argc, char ** argv)
res_cout.write(*s_pos++);
}
res_cout.next();
res_cout.finalize();
if (multiple)
std::cout << " \\\n;\n";
std::cout << std::endl;

View File

@ -9,6 +9,7 @@
#include <Poco/AutoPtr.h>
#include <Poco/Logger.h>
#include <Common/logger_useful.h>
#include <Disks/DiskLocal.h>
int mainEntryClickHouseKeeperConverter(int argc, char ** argv)
@ -39,8 +40,9 @@ int mainEntryClickHouseKeeperConverter(int argc, char ** argv)
try
{
auto keeper_context = std::make_shared<KeeperContext>();
keeper_context->digest_enabled = true;
auto keeper_context = std::make_shared<KeeperContext>(true);
keeper_context->setDigestEnabled(true);
keeper_context->setSnapshotDisk(std::make_shared<DiskLocal>("Keeper-snapshots", options["output-dir"].as<std::string>(), 0));
DB::KeeperStorage storage(/* tick_time_ms */ 500, /* superdigest */ "", keeper_context, /* initialize_system_nodes */ false);
@ -51,10 +53,10 @@ int mainEntryClickHouseKeeperConverter(int argc, char ** argv)
DB::SnapshotMetadataPtr snapshot_meta = std::make_shared<DB::SnapshotMetadata>(storage.getZXID(), 1, std::make_shared<nuraft::cluster_config>());
DB::KeeperStorageSnapshot snapshot(&storage, snapshot_meta);
DB::KeeperSnapshotManager manager(options["output-dir"].as<std::string>(), 1, keeper_context);
DB::KeeperSnapshotManager manager(1, keeper_context);
auto snp = manager.serializeSnapshotToBuffer(snapshot);
auto path = manager.serializeSnapshotBufferToDisk(*snp, storage.getZXID());
std::cout << "Snapshot serialized to path:" << path << std::endl;
auto file_info = manager.serializeSnapshotBufferToDisk(*snp, storage.getZXID());
std::cout << "Snapshot serialized to path:" << fs::path(file_info.disk->getPath()) / file_info.path << std::endl;
}
catch (...)
{

View File

@ -48,10 +48,10 @@ if (BUILD_STANDALONE_KEEPER)
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Coordination/KeeperSnapshotManager.cpp
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Coordination/KeeperSnapshotManagerS3.cpp
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Coordination/KeeperStateMachine.cpp
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Coordination/KeeperContext.cpp
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Coordination/KeeperStateManager.cpp
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Coordination/KeeperStorage.cpp
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Coordination/KeeperAsynchronousMetrics.cpp
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Coordination/TinyContext.cpp
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Coordination/pathUtils.cpp
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Coordination/SessionExpiryQueue.cpp
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Coordination/SummingStateMachine.cpp
@ -60,10 +60,14 @@ if (BUILD_STANDALONE_KEEPER)
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Core/SettingsFields.cpp
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Core/BaseSettings.cpp
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Core/ServerSettings.cpp
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Core/Field.cpp
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Core/SettingsEnums.cpp
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Core/ServerUUID.cpp
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Core/UUID.cpp
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Core/BackgroundSchedulePool.cpp
${CMAKE_CURRENT_SOURCE_DIR}/../../src/IO/ReadBuffer.cpp
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Server/KeeperTCPHandler.cpp
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Server/TCPServer.cpp
@ -95,6 +99,10 @@ if (BUILD_STANDALONE_KEEPER)
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Compression/ICompressionCodec.cpp
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Compression/LZ4_decompress_faster.cpp
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Common/CurrentThread.cpp
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Common/NamedCollections/NamedCollections.cpp
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Common/NamedCollections/NamedCollectionConfiguration.cpp
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Common/ZooKeeper/IKeeper.cpp
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Common/ZooKeeper/TestKeeper.cpp
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Common/ZooKeeper/ZooKeeperCommon.cpp
@ -105,11 +113,58 @@ if (BUILD_STANDALONE_KEEPER)
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Common/ZooKeeper/ZooKeeperLock.cpp
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Common/ZooKeeper/ZooKeeperNodeCache.cpp
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Disks/registerDisks.cpp
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Disks/IDisk.cpp
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Disks/DiskFactory.cpp
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Disks/DiskSelector.cpp
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Disks/DiskLocal.cpp
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Disks/DiskLocalCheckThread.cpp
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Disks/LocalDirectorySyncGuard.cpp
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Disks/TemporaryFileOnDisk.cpp
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Disks/loadLocalDiskConfig.cpp
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Disks/ObjectStorages/IObjectStorage.cpp
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Disks/ObjectStorages/MetadataStorageFromPlainObjectStorage.cpp
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Disks/ObjectStorages/MetadataStorageFromDisk.cpp
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Disks/ObjectStorages/MetadataFromDiskTransactionState.cpp
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Disks/ObjectStorages/DiskObjectStorageMetadata.cpp
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Disks/ObjectStorages/MetadataStorageFromDiskTransactionOperations.cpp
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Disks/ObjectStorages/DiskObjectStorage.cpp
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Disks/ObjectStorages/DiskObjectStorageTransaction.cpp
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Disks/ObjectStorages/DiskObjectStorageRemoteMetadataRestoreHelper.cpp
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Disks/ObjectStorages/DiskObjectStorageCommon.cpp
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Disks/ObjectStorages/ObjectStorageIteratorAsync.cpp
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Disks/ObjectStorages/ObjectStorageIterator.cpp
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Disks/ObjectStorages/StoredObject.cpp
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Disks/ObjectStorages/S3/registerDiskS3.cpp
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Disks/ObjectStorages/S3/S3ObjectStorage.cpp
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Disks/ObjectStorages/S3/S3Capabilities.cpp
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Disks/ObjectStorages/S3/diskSettings.cpp
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Disks/ObjectStorages/S3/ProxyListConfiguration.cpp
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Disks/ObjectStorages/S3/ProxyResolverConfiguration.cpp
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Disks/IO/createReadBufferFromFileBase.cpp
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Disks/IO/ReadBufferFromRemoteFSGather.cpp
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Disks/IO/IOUringReader.cpp
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Disks/IO/WriteBufferFromTemporaryFile.cpp
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Disks/IO/WriteBufferWithFinalizeCallback.cpp
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Disks/IO/AsynchronousBoundedReadBuffer.cpp
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Disks/IO/getThreadPoolReader.cpp
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Disks/IO/ThreadPoolRemoteFSReader.cpp
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Disks/IO/ThreadPoolReader.cpp
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Storages/StorageS3Settings.cpp
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Daemon/BaseDaemon.cpp
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Daemon/SentryWriter.cpp
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Daemon/GraphiteWriter.cpp
${CMAKE_CURRENT_BINARY_DIR}/../../src/Daemon/GitHash.generated.cpp
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Coordination/Standalone/Context.cpp
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Coordination/Standalone/Settings.cpp
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Coordination/Standalone/ThreadStatusExt.cpp
Keeper.cpp
clickhouse-keeper.cpp
)
@ -132,10 +187,6 @@ if (BUILD_STANDALONE_KEEPER)
target_compile_definitions (clickhouse-keeper PRIVATE -DCLICKHOUSE_PROGRAM_STANDALONE_BUILD)
target_compile_definitions (clickhouse-keeper PUBLIC -DWITHOUT_TEXT_LOG)
target_include_directories(clickhouse-keeper PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}/../../src") # uses includes from src directory
target_include_directories(clickhouse-keeper PUBLIC "${CMAKE_CURRENT_BINARY_DIR}/../../src/Core/include") # uses some includes from core
target_include_directories(clickhouse-keeper PUBLIC "${CMAKE_CURRENT_BINARY_DIR}/../../src") # uses some includes from common
if (ENABLE_CLICKHOUSE_KEEPER_CLIENT AND TARGET ch_rust::skim)
target_link_libraries(clickhouse-keeper PRIVATE ch_rust::skim)
endif()

View File

@ -24,6 +24,8 @@
#include <sys/stat.h>
#include <pwd.h>
#include <Interpreters/Context.h>
#include <Coordination/FourLetterCommand.h>
#include <Coordination/KeeperAsynchronousMetrics.h>
@ -45,6 +47,8 @@
#include <Server/ProtocolServerAdapter.h>
#include <Server/KeeperTCPHandlerFactory.h>
#include <Disks/registerDisks.h>
int mainEntryClickHouseKeeper(int argc, char ** argv)
{
@ -201,9 +205,12 @@ void Keeper::defineOptions(Poco::Util::OptionSet & options)
BaseDaemon::defineOptions(options);
}
struct Keeper::KeeperHTTPContext : public IHTTPContext
namespace
{
explicit KeeperHTTPContext(TinyContextPtr context_)
struct KeeperHTTPContext : public IHTTPContext
{
explicit KeeperHTTPContext(ContextPtr context_)
: context(std::move(context_))
{}
@ -247,12 +254,14 @@ struct Keeper::KeeperHTTPContext : public IHTTPContext
return {context->getConfigRef().getInt64("keeper_server.http_send_timeout", DBMS_DEFAULT_SEND_TIMEOUT_SEC), 0};
}
TinyContextPtr context;
ContextPtr context;
};
HTTPContextPtr Keeper::httpContext()
HTTPContextPtr httpContext()
{
return std::make_shared<KeeperHTTPContext>(tiny_context);
return std::make_shared<KeeperHTTPContext>(Context::getGlobalContextInstance());
}
}
int Keeper::main(const std::vector<std::string> & /*args*/)
@ -316,10 +325,21 @@ try
std::mutex servers_lock;
auto servers = std::make_shared<std::vector<ProtocolServerAdapter>>();
tiny_context = std::make_shared<TinyContext>();
auto shared_context = Context::createShared();
auto global_context = Context::createGlobal(shared_context.get());
global_context->makeGlobalContext();
global_context->setPath(path);
global_context->setRemoteHostFilter(config());
if (config().has("macros"))
global_context->setMacros(std::make_unique<Macros>(config(), "macros", log));
registerDisks(/*global_skip_access_check=*/false);
/// This object will periodically calculate some metrics.
KeeperAsynchronousMetrics async_metrics(
tiny_context,
global_context,
config().getUInt("asynchronous_metrics_update_period_s", 1),
[&]() -> std::vector<ProtocolServerMetrics>
{
@ -344,12 +364,12 @@ try
}
/// Initialize keeper RAFT. Do nothing if no keeper_server in config.
tiny_context->initializeKeeperDispatcher(/* start_async = */ true);
FourLetterCommandFactory::registerCommands(*tiny_context->getKeeperDispatcher());
global_context->initializeKeeperDispatcher(/* start_async = */ true);
FourLetterCommandFactory::registerCommands(*global_context->getKeeperDispatcher());
auto config_getter = [this] () -> const Poco::Util::AbstractConfiguration &
auto config_getter = [&] () -> const Poco::Util::AbstractConfiguration &
{
return tiny_context->getConfigRef();
return global_context->getConfigRef();
};
auto tcp_receive_timeout = config().getInt64("keeper_server.socket_receive_timeout_sec", DBMS_DEFAULT_RECEIVE_TIMEOUT_SEC);
@ -371,7 +391,7 @@ try
"Keeper (tcp): " + address.toString(),
std::make_unique<TCPServer>(
new KeeperTCPHandlerFactory(
config_getter, tiny_context->getKeeperDispatcher(),
config_getter, global_context->getKeeperDispatcher(),
tcp_receive_timeout, tcp_send_timeout, false), server_pool, socket));
});
@ -389,7 +409,7 @@ try
"Keeper with secure protocol (tcp_secure): " + address.toString(),
std::make_unique<TCPServer>(
new KeeperTCPHandlerFactory(
config_getter, tiny_context->getKeeperDispatcher(),
config_getter, global_context->getKeeperDispatcher(),
tcp_receive_timeout, tcp_send_timeout, true), server_pool, socket));
#else
UNUSED(port);
@ -441,7 +461,7 @@ try
[&](ConfigurationPtr config, bool /* initial_loading */)
{
if (config->has("keeper_server"))
tiny_context->updateKeeperConfiguration(*config);
global_context->updateKeeperConfiguration(*config);
},
/* already_loaded = */ false); /// Reload it right now (initial loading)
@ -472,7 +492,7 @@ try
else
LOG_INFO(log, "Closed connections to Keeper.");
tiny_context->shutdownKeeperDispatcher();
global_context->shutdownKeeperDispatcher();
/// Wait server pool to avoid use-after-free of destroyed context in the handlers
server_pool.joinAll();

View File

@ -1,9 +1,7 @@
#pragma once
#include <Server/IServer.h>
#include <Server/HTTP/HTTPContext.h>
#include <Daemon/BaseDaemon.h>
#include <Coordination/TinyContext.h>
namespace Poco
{
@ -68,11 +66,6 @@ protected:
std::string getDefaultConfigFileName() const override;
private:
TinyContextPtr tiny_context;
struct KeeperHTTPContext;
HTTPContextPtr httpContext();
Poco::Net::SocketAddress socketBindListen(Poco::Net::ServerSocket & socket, const std::string & host, UInt16 port, [[maybe_unused]] bool secure = false) const;
using CreateServerFunc = std::function<void(UInt16)>;

360
rust/skim/Cargo.lock generated
View File

@ -14,13 +14,19 @@ dependencies = [
[[package]]
name = "aho-corasick"
version = "0.7.20"
version = "1.0.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "cc936419f96fa211c1b9166887b38e5e40b19958e5b895be7c1f93adec7071ac"
checksum = "43f6cb1bf222025340178f382c426f13757b2960e89779dfcb319c32542a5a41"
dependencies = [
"memchr",
]
[[package]]
name = "android-tzdata"
version = "0.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e999941b234f3131b00bc13c22d06e8c5ff726d1b6318ac7eb276997bbb4fef0"
[[package]]
name = "android_system_properties"
version = "0.1.5"
@ -32,9 +38,9 @@ dependencies = [
[[package]]
name = "arrayvec"
version = "0.7.2"
version = "0.7.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8da52d66c7071e2e3fa2a1e5c6d088fec47b593032b254f5e980de8ea54454d6"
checksum = "96d30a06541fbafbc7f82ed10c06164cfbd2c401138f6addd8404629c4b16711"
[[package]]
name = "atty"
@ -42,7 +48,7 @@ version = "0.2.14"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8"
dependencies = [
"hermit-abi",
"hermit-abi 0.1.19",
"libc",
"winapi",
]
@ -67,15 +73,15 @@ checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a"
[[package]]
name = "bumpalo"
version = "3.11.1"
version = "3.13.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "572f695136211188308f16ad2ca5c851a712c464060ae6974944458eb83880ba"
checksum = "a3e2c3daef883ecc1b5d58c15adae93470a91d425f3532ba1695849656af3fc1"
[[package]]
name = "cc"
version = "1.0.77"
version = "1.0.79"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e9f73505338f7d905b19d18738976aae232eb46b8efc15554ffc56deb5d9ebe4"
checksum = "50d30906286121d95be3d479533b458f87493b30a4b5f79a607db8f5d11aa91f"
[[package]]
name = "cfg-if"
@ -85,13 +91,13 @@ checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd"
[[package]]
name = "chrono"
version = "0.4.23"
version = "0.4.26"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "16b0a3d9ed01224b22057780a37bb8c5dbfe1be8ba48678e7bf57ec4b385411f"
checksum = "ec837a71355b28f6556dbd569b37b3f363091c0bd4b2e735674521b4c5fd9bc5"
dependencies = [
"android-tzdata",
"iana-time-zone",
"js-sys",
"num-integer",
"num-traits",
"time 0.1.45",
"wasm-bindgen",
@ -100,9 +106,9 @@ dependencies = [
[[package]]
name = "clap"
version = "3.2.23"
version = "3.2.25"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "71655c45cb9845d3270c9d6df84ebe72b4dad3c2ba3f7023ad47c144e4e473a5"
checksum = "4ea181bf566f71cb9a5d17a59e1871af638180a18fb0035c92ae62b705207123"
dependencies = [
"atty",
"bitflags",
@ -135,9 +141,9 @@ dependencies = [
[[package]]
name = "core-foundation-sys"
version = "0.8.3"
version = "0.8.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5827cebf4670468b8772dd191856768aedcb1b0278a04f989f7766351917b9dc"
checksum = "e496a50fda8aacccc86d7529e2c1e0892dbd0f898a6b5645b5561b89c3210efa"
[[package]]
name = "crossbeam"
@ -155,9 +161,9 @@ dependencies = [
[[package]]
name = "crossbeam-channel"
version = "0.5.6"
version = "0.5.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c2dd04ddaf88237dc3b8d8f9a3c1004b506b54b3313403944054d23c0870c521"
checksum = "a33c2bf77f2df06183c3aa30d1e96c0695a313d4f9c453cc3762a6db39f99200"
dependencies = [
"cfg-if",
"crossbeam-utils",
@ -165,9 +171,9 @@ dependencies = [
[[package]]
name = "crossbeam-deque"
version = "0.8.2"
version = "0.8.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "715e8152b692bba2d374b53d4875445368fdf21a94751410af607a5ac677d1fc"
checksum = "ce6fd6f855243022dcecf8702fef0c297d4338e226845fe067f6341ad9fa0cef"
dependencies = [
"cfg-if",
"crossbeam-epoch",
@ -176,14 +182,14 @@ dependencies = [
[[package]]
name = "crossbeam-epoch"
version = "0.9.13"
version = "0.9.15"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "01a9af1f4c2ef74bb8aa1f7e19706bc72d03598c8a570bb5de72243c7a9d9d5a"
checksum = "ae211234986c545741a7dc064309f67ee1e5ad243d0e48335adc0484d960bcc7"
dependencies = [
"autocfg",
"cfg-if",
"crossbeam-utils",
"memoffset 0.7.1",
"memoffset 0.9.0",
"scopeguard",
]
@ -199,18 +205,18 @@ dependencies = [
[[package]]
name = "crossbeam-utils"
version = "0.8.14"
version = "0.8.16"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4fb766fa798726286dbbb842f174001dab8abc7b627a1dd86e0b7222a95d929f"
checksum = "5a22b2d63d4d1dc0b7f1b6b2747dd0088008a9be28b6ddf0b1e7d335e3037294"
dependencies = [
"cfg-if",
]
[[package]]
name = "cxx"
version = "1.0.83"
version = "1.0.97"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bdf07d07d6531bfcdbe9b8b739b104610c6508dcc4d63b410585faf338241daf"
checksum = "e88abab2f5abbe4c56e8f1fb431b784d710b709888f35755a160e62e33fe38e8"
dependencies = [
"cc",
"cxxbridge-flags",
@ -220,9 +226,9 @@ dependencies = [
[[package]]
name = "cxx-build"
version = "1.0.83"
version = "1.0.97"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d2eb5b96ecdc99f72657332953d4d9c50135af1bac34277801cc3937906ebd39"
checksum = "5c0c11acd0e63bae27dcd2afced407063312771212b7a823b4fd72d633be30fb"
dependencies = [
"cc",
"codespan-reporting",
@ -230,31 +236,31 @@ dependencies = [
"proc-macro2",
"quote",
"scratch",
"syn",
"syn 2.0.23",
]
[[package]]
name = "cxxbridge-flags"
version = "1.0.83"
version = "1.0.97"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ac040a39517fd1674e0f32177648334b0f4074625b5588a64519804ba0553b12"
checksum = "8d3816ed957c008ccd4728485511e3d9aaf7db419aa321e3d2c5a2f3411e36c8"
[[package]]
name = "cxxbridge-macro"
version = "1.0.83"
version = "1.0.97"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1362b0ddcfc4eb0a1f57b68bd77dd99f0e826958a96abd0ae9bd092e114ffed6"
checksum = "a26acccf6f445af85ea056362561a24ef56cdc15fcc685f03aec50b9c702cb6d"
dependencies = [
"proc-macro2",
"quote",
"syn",
"syn 2.0.23",
]
[[package]]
name = "darling"
version = "0.14.2"
version = "0.14.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b0dd3cd20dc6b5a876612a6e5accfe7f3dd883db6d07acfbf14c128f61550dfa"
checksum = "7b750cb3417fd1b327431a470f388520309479ab0bf5e323505daf0290cd3850"
dependencies = [
"darling_core",
"darling_macro",
@ -262,27 +268,27 @@ dependencies = [
[[package]]
name = "darling_core"
version = "0.14.2"
version = "0.14.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a784d2ccaf7c98501746bf0be29b2022ba41fd62a2e622af997a03e9f972859f"
checksum = "109c1ca6e6b7f82cc233a97004ea8ed7ca123a9af07a8230878fcfda9b158bf0"
dependencies = [
"fnv",
"ident_case",
"proc-macro2",
"quote",
"strsim",
"syn",
"syn 1.0.109",
]
[[package]]
name = "darling_macro"
version = "0.14.2"
version = "0.14.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7618812407e9402654622dd402b0a89dff9ba93badd6540781526117b92aab7e"
checksum = "a4aab4dbc9f7611d8b55048a3a16d2d010c2c8334e46304b40ac1cc14bf3b48e"
dependencies = [
"darling_core",
"quote",
"syn",
"syn 1.0.109",
]
[[package]]
@ -313,7 +319,7 @@ dependencies = [
"darling",
"proc-macro2",
"quote",
"syn",
"syn 1.0.109",
]
[[package]]
@ -323,7 +329,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8f0314b72bed045f3a68671b3c86328386762c93f82d98c65c3cb5e5f573dd68"
dependencies = [
"derive_builder_core",
"syn",
"syn 1.0.109",
]
[[package]]
@ -349,9 +355,9 @@ dependencies = [
[[package]]
name = "either"
version = "1.8.0"
version = "1.8.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "90e5c1c8368803113bf0c9584fc495a58b86dc8a29edbf8fe877d21d9507e797"
checksum = "7fcaabb2fef8c910e7f4c7ce9f67a1283a1715879a7c230ca9d6d1ae31f16d91"
[[package]]
name = "env_logger"
@ -383,9 +389,9 @@ dependencies = [
[[package]]
name = "getrandom"
version = "0.2.8"
version = "0.2.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c05aeb6a22b8f62540c194aac980f2115af067bfe15a0734d7277a768d396b31"
checksum = "be4136b2a15dd319360be1c07d9933517ccf0be8f16bf62a3bee4f0d618df427"
dependencies = [
"cfg-if",
"libc",
@ -407,6 +413,12 @@ dependencies = [
"libc",
]
[[package]]
name = "hermit-abi"
version = "0.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fed44880c466736ef9a5c5b5facefb5ed0785676d0c02d612db14e54f0d84286"
[[package]]
name = "humantime"
version = "2.1.0"
@ -415,26 +427,25 @@ checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4"
[[package]]
name = "iana-time-zone"
version = "0.1.53"
version = "0.1.57"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "64c122667b287044802d6ce17ee2ddf13207ed924c712de9a66a5814d5b64765"
checksum = "2fad5b825842d2b38bd206f3e81d6957625fd7f0a361e345c30e01a0ae2dd613"
dependencies = [
"android_system_properties",
"core-foundation-sys",
"iana-time-zone-haiku",
"js-sys",
"wasm-bindgen",
"winapi",
"windows",
]
[[package]]
name = "iana-time-zone-haiku"
version = "0.1.1"
version = "0.1.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0703ae284fc167426161c2e3f1da3ea71d94b21bedbcc9494e92b28e334e3dca"
checksum = "f31827a206f56af32e590ba56d5d2d085f558508192593743f16b2306495269f"
dependencies = [
"cxx",
"cxx-build",
"cc",
]
[[package]]
@ -445,9 +456,9 @@ checksum = "b9e0384b61958566e926dc50660321d12159025e767c18e043daf26b70104c39"
[[package]]
name = "indexmap"
version = "1.9.2"
version = "1.9.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1885e79c1fc4b10f0e172c475f458b7f7b93061064d98c3293e98c5ba0c8b399"
checksum = "bd070e393353796e801d209ad339e89596eb4c8d430d18ede6a1cced8fafbd99"
dependencies = [
"autocfg",
"hashbrown",
@ -455,9 +466,9 @@ dependencies = [
[[package]]
name = "js-sys"
version = "0.3.60"
version = "0.3.64"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "49409df3e3bf0856b916e2ceaca09ee28e6871cf7d9ce97a692cacfdb2a25a47"
checksum = "c5f195fe497f702db0f318b07fdd68edb16955aed830df8363d837542f8f935a"
dependencies = [
"wasm-bindgen",
]
@ -470,27 +481,24 @@ checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646"
[[package]]
name = "libc"
version = "0.2.138"
version = "0.2.147"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "db6d7e329c562c5dfab7a46a2afabc8b987ab9a4834c9d1ca04dc54c1546cef8"
checksum = "b4668fb0ea861c1df094127ac5f1da3409a82116a4ba74fca2e58ef927159bb3"
[[package]]
name = "link-cplusplus"
version = "1.0.7"
version = "1.0.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9272ab7b96c9046fbc5bc56c06c117cb639fe2d509df0c421cad82d2915cf369"
checksum = "ecd207c9c713c34f95a097a5b029ac2ce6010530c7b49d7fea24d977dede04f5"
dependencies = [
"cc",
]
[[package]]
name = "log"
version = "0.4.17"
version = "0.4.19"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "abb12e687cfb44aa40f41fc3978ef76448f9b6038cad6aef4259d3c095a2382e"
dependencies = [
"cfg-if",
]
checksum = "b06a4cde4c0f271a446782e3eff8de789548ce57dbc8eca9292c27f4a42004b4"
[[package]]
name = "memchr"
@ -509,9 +517,9 @@ dependencies = [
[[package]]
name = "memoffset"
version = "0.7.1"
version = "0.9.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5de893c32cde5f383baa4c04c5d6dbdd735cfd4a794b0debdb2bb1b421da5ff4"
checksum = "5a634b1c61a95585bd15607c6ab0c4e5b226e695ff2800ba0cdccddf208c406c"
dependencies = [
"autocfg",
]
@ -541,16 +549,6 @@ dependencies = [
"pin-utils",
]
[[package]]
name = "num-integer"
version = "0.1.45"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "225d3389fb3509a24c93f5c29eb6bde2586b98d9f016636dff58d7c6f7569cd9"
dependencies = [
"autocfg",
"num-traits",
]
[[package]]
name = "num-traits"
version = "0.2.15"
@ -562,25 +560,25 @@ dependencies = [
[[package]]
name = "num_cpus"
version = "1.14.0"
version = "1.16.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f6058e64324c71e02bc2b150e4f3bc8286db6c83092132ffa3f6b1eab0f9def5"
checksum = "4161fcb6d602d4d2081af7c3a45852d875a03dd337a6bfdd6e06407b61342a43"
dependencies = [
"hermit-abi",
"hermit-abi 0.3.1",
"libc",
]
[[package]]
name = "once_cell"
version = "1.16.0"
version = "1.18.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "86f0b0d4bf799edbc74508c1e8bf170ff5f41238e5f8225603ca7caaae2b7860"
checksum = "dd8b5dd2ae5ed71462c540258bedcb51965123ad7e7ccf4b9a8cafaa4a63576d"
[[package]]
name = "os_str_bytes"
version = "6.4.1"
version = "6.5.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9b7820b9daea5457c9f21c69448905d723fbd21136ccf521748f23fd49e723ee"
checksum = "4d5d9eb14b174ee9aa2ef96dc2b94637a2d4b6e7cb873c7e171f0c20c6cf3eac"
[[package]]
name = "pin-utils"
@ -590,27 +588,27 @@ checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184"
[[package]]
name = "proc-macro2"
version = "1.0.47"
version = "1.0.63"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5ea3d908b0e36316caf9e9e2c4625cdde190a7e6f440d794667ed17a1855e725"
checksum = "7b368fba921b0dce7e60f5e04ec15e565b3303972b42bcfde1d0713b881959eb"
dependencies = [
"unicode-ident",
]
[[package]]
name = "quote"
version = "1.0.21"
version = "1.0.29"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bbe448f377a7d6961e30f5955f9b8d106c3f5e449d493ee1b125c1d43c2b5179"
checksum = "573015e8ab27661678357f27dc26460738fd2b6c86e46f386fde94cb5d913105"
dependencies = [
"proc-macro2",
]
[[package]]
name = "rayon"
version = "1.6.1"
version = "1.7.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6db3a213adf02b3bcfd2d3846bb41cb22857d131789e01df434fb7e7bc0759b7"
checksum = "1d2df5196e37bcc87abebc0053e20787d73847bb33134a69841207dd0a47f03b"
dependencies = [
"either",
"rayon-core",
@ -618,9 +616,9 @@ dependencies = [
[[package]]
name = "rayon-core"
version = "1.10.1"
version = "1.11.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "cac410af5d00ab6884528b4ab69d1e8e146e8d471201800fa1b4524126de6ad3"
checksum = "4b8f95bd6966f5c87776639160a66bd8ab9895d9d4ab01ddba9fc60661aebe8d"
dependencies = [
"crossbeam-channel",
"crossbeam-deque",
@ -650,9 +648,9 @@ dependencies = [
[[package]]
name = "regex"
version = "1.7.0"
version = "1.8.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e076559ef8e241f2ae3479e36f97bd5741c0330689e217ad51ce2c76808b868a"
checksum = "d0ab3ca65655bb1e41f2a8c8cd662eb4fb035e67c3f78da1d61dffe89d07300f"
dependencies = [
"aho-corasick",
"memchr",
@ -661,15 +659,15 @@ dependencies = [
[[package]]
name = "regex-syntax"
version = "0.6.28"
version = "0.7.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "456c603be3e8d448b072f410900c09faf164fbce2d480456f50eea6e25f9c848"
checksum = "436b050e76ed2903236f032a59761c1eb99e1b0aead2c257922771dab1fc8c78"
[[package]]
name = "rustversion"
version = "1.0.9"
version = "1.0.12"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "97477e48b4cf8603ad5f7aaf897467cf42ab4218a38ef76fb14c2d6773a6d6a8"
checksum = "4f3208ce4d8448b3f3e7d168a73f5e0c43a61e32930de3bceeccedb388b6bf06"
[[package]]
name = "scopeguard"
@ -679,15 +677,15 @@ checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd"
[[package]]
name = "scratch"
version = "1.0.2"
version = "1.0.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9c8132065adcfd6e02db789d9285a0deb2f3fcb04002865ab67d5fb103533898"
checksum = "1792db035ce95be60c3f8853017b3999209281c24e2ba5bc8e59bf97a0c590c1"
[[package]]
name = "serde"
version = "1.0.149"
version = "1.0.164"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "256b9932320c590e707b94576e3cc1f7c9024d0ee6612dfbcf1cb106cbe8e055"
checksum = "9e8c8cf938e98f769bc164923b06dce91cea1751522f46f8466461af04c9027d"
[[package]]
name = "shlex"
@ -697,9 +695,9 @@ checksum = "43b2853a4d09f215c24cc5489c992ce46052d359b5109343cbafbf26bc62f8a3"
[[package]]
name = "skim"
version = "0.10.2"
version = "0.10.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "cebed5f897cd6c0d80fbe30adb36c0abf7400e93043a63ae56458495642b3485"
checksum = "e5d28de0a6cb2cdd83a076f1de9d965b973ae08b244df1aa70b432946dda0f32"
dependencies = [
"atty",
"beef",
@ -717,7 +715,7 @@ dependencies = [
"rayon",
"regex",
"shlex",
"time 0.3.17",
"time 0.3.22",
"timer",
"tuikit",
"unicode-width",
@ -732,9 +730,20 @@ checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623"
[[package]]
name = "syn"
version = "1.0.105"
version = "1.0.109"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "60b9b43d45702de4c839cb9b51d9f529c5dd26a4aff255b42b1ebc03e88ee908"
checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237"
dependencies = [
"proc-macro2",
"quote",
"unicode-ident",
]
[[package]]
name = "syn"
version = "2.0.23"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "59fb7d6d8281a51045d62b8eb3a7d1ce347b76f312af50cd3dc0af39c87c1737"
dependencies = [
"proc-macro2",
"quote",
@ -754,9 +763,9 @@ dependencies = [
[[package]]
name = "termcolor"
version = "1.1.3"
version = "1.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bab24d30b911b2376f3a13cc2cd443142f0c81dda04c118693e35b3835757755"
checksum = "be55cf8942feac5c765c2c993422806843c9a9a45d4d5c407ad6dd2ea95eb9b6"
dependencies = [
"winapi-util",
]
@ -769,30 +778,31 @@ checksum = "222a222a5bfe1bba4a77b45ec488a741b3cb8872e5e499451fd7d0129c9c7c3d"
[[package]]
name = "thiserror"
version = "1.0.37"
version = "1.0.40"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "10deb33631e3c9018b9baf9dcbbc4f737320d2b576bac10f6aefa048fa407e3e"
checksum = "978c9a314bd8dc99be594bc3c175faaa9794be04a5a5e153caba6915336cebac"
dependencies = [
"thiserror-impl",
]
[[package]]
name = "thiserror-impl"
version = "1.0.37"
version = "1.0.40"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "982d17546b47146b28f7c22e3d08465f6b8903d0ea13c1660d9d84a6e7adcdbb"
checksum = "f9456a42c5b0d803c8cd86e73dd7cc9edd429499f37a3550d286d5e86720569f"
dependencies = [
"proc-macro2",
"quote",
"syn",
"syn 2.0.23",
]
[[package]]
name = "thread_local"
version = "1.1.4"
version = "1.1.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5516c27b78311c50bf42c071425c560ac799b11c30b31f87e3081965fe5e0180"
checksum = "3fdd6f064ccff2d6567adcb3873ca630700f00b5ad3f060c25b5dcfd9a4ce152"
dependencies = [
"cfg-if",
"once_cell",
]
@ -809,9 +819,9 @@ dependencies = [
[[package]]
name = "time"
version = "0.3.17"
version = "0.3.22"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a561bf4617eebd33bca6434b988f39ed798e527f51a1e797d0ee4f61c0a38376"
checksum = "ea9e1b3cf1243ae005d9e74085d4d542f3125458f3a81af210d901dcd7411efd"
dependencies = [
"serde",
"time-core",
@ -819,9 +829,9 @@ dependencies = [
[[package]]
name = "time-core"
version = "0.1.0"
version = "0.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2e153e1f1acaef8acc537e68b44906d2db6436e2b35ac2c6b42640fff91f00fd"
checksum = "7300fbefb4dadc1af235a9cef3737cea692a9d97e1b9cbcd4ebdae6f8868e6fb"
[[package]]
name = "timer"
@ -848,9 +858,9 @@ dependencies = [
[[package]]
name = "unicode-ident"
version = "1.0.5"
version = "1.0.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6ceab39d59e4c9499d4e5a8ee0e2735b891bb7308ac83dfb4e80cad195c9f6f3"
checksum = "b15811caf2415fb889178633e7724bad2509101cde276048e013b9def5e51fa0"
[[package]]
name = "unicode-width"
@ -860,15 +870,15 @@ checksum = "c0edd1e5b14653f783770bce4a4dabb4a5108a5370a5f5d8cfe8710c361f6c8b"
[[package]]
name = "utf8parse"
version = "0.2.0"
version = "0.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "936e4b492acfd135421d8dca4b1aa80a7bfc26e702ef3af710e0752684df5372"
checksum = "711b9620af191e0cdc7468a8d14e709c3dcdb115b36f838e601583af800a370a"
[[package]]
name = "vte"
version = "0.11.0"
version = "0.11.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1aae21c12ad2ec2d168c236f369c38ff332bc1134f7246350dca641437365045"
checksum = "f5022b5fbf9407086c180e9557be968742d839e68346af7792b8592489732197"
dependencies = [
"arrayvec",
"utf8parse",
@ -899,9 +909,9 @@ checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423"
[[package]]
name = "wasm-bindgen"
version = "0.2.83"
version = "0.2.87"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "eaf9f5aceeec8be17c128b2e93e031fb8a4d469bb9c4ae2d7dc1888b26887268"
checksum = "7706a72ab36d8cb1f80ffbf0e071533974a60d0a308d01a5d0375bf60499a342"
dependencies = [
"cfg-if",
"wasm-bindgen-macro",
@ -909,24 +919,24 @@ dependencies = [
[[package]]
name = "wasm-bindgen-backend"
version = "0.2.83"
version = "0.2.87"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4c8ffb332579b0557b52d268b91feab8df3615f265d5270fec2a8c95b17c1142"
checksum = "5ef2b6d3c510e9625e5fe6f509ab07d66a760f0885d858736483c32ed7809abd"
dependencies = [
"bumpalo",
"log",
"once_cell",
"proc-macro2",
"quote",
"syn",
"syn 2.0.23",
"wasm-bindgen-shared",
]
[[package]]
name = "wasm-bindgen-macro"
version = "0.2.83"
version = "0.2.87"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "052be0f94026e6cbc75cdefc9bae13fd6052cdcaf532fa6c45e7ae33a1e6c810"
checksum = "dee495e55982a3bd48105a7b947fd2a9b4a8ae3010041b9e0faab3f9cd028f1d"
dependencies = [
"quote",
"wasm-bindgen-macro-support",
@ -934,22 +944,22 @@ dependencies = [
[[package]]
name = "wasm-bindgen-macro-support"
version = "0.2.83"
version = "0.2.87"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "07bc0c051dc5f23e307b13285f9d75df86bfdf816c5721e573dec1f9b8aa193c"
checksum = "54681b18a46765f095758388f2d0cf16eb8d4169b639ab575a8f5693af210c7b"
dependencies = [
"proc-macro2",
"quote",
"syn",
"syn 2.0.23",
"wasm-bindgen-backend",
"wasm-bindgen-shared",
]
[[package]]
name = "wasm-bindgen-shared"
version = "0.2.83"
version = "0.2.87"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1c38c045535d93ec4f0b4defec448e4291638ee608530863b1e2ba115d4fff7f"
checksum = "ca6ad05a4870b2bf5fe995117d3728437bd27d7cd5f06f13c17443ef369775a1"
[[package]]
name = "winapi"
@ -981,3 +991,69 @@ name = "winapi-x86_64-pc-windows-gnu"
version = "0.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f"
[[package]]
name = "windows"
version = "0.48.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e686886bc078bc1b0b600cac0147aadb815089b6e4da64016cbd754b6342700f"
dependencies = [
"windows-targets",
]
[[package]]
name = "windows-targets"
version = "0.48.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "05d4b17490f70499f20b9e791dcf6a299785ce8af4d709018206dc5b4953e95f"
dependencies = [
"windows_aarch64_gnullvm",
"windows_aarch64_msvc",
"windows_i686_gnu",
"windows_i686_msvc",
"windows_x86_64_gnu",
"windows_x86_64_gnullvm",
"windows_x86_64_msvc",
]
[[package]]
name = "windows_aarch64_gnullvm"
version = "0.48.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "91ae572e1b79dba883e0d315474df7305d12f569b400fcf90581b06062f7e1bc"
[[package]]
name = "windows_aarch64_msvc"
version = "0.48.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b2ef27e0d7bdfcfc7b868b317c1d32c641a6fe4629c171b8928c7b08d98d7cf3"
[[package]]
name = "windows_i686_gnu"
version = "0.48.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "622a1962a7db830d6fd0a69683c80a18fda201879f0f447f065a3b7467daa241"
[[package]]
name = "windows_i686_msvc"
version = "0.48.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4542c6e364ce21bf45d69fdd2a8e455fa38d316158cfd43b3ac1c5b1b19f8e00"
[[package]]
name = "windows_x86_64_gnu"
version = "0.48.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ca2b8a661f7628cbd23440e50b05d705db3686f894fc9580820623656af974b1"
[[package]]
name = "windows_x86_64_gnullvm"
version = "0.48.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7896dbc1f41e08872e9d5e8f8baa8fdd2677f29468c4e156210174edc7f7b953"
[[package]]
name = "windows_x86_64_msvc"
version = "0.48.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1a515f5799fe4961cb532f983ce2b23082366b898e52ffbce459c86f67c8378a"

View File

@ -228,6 +228,12 @@ ContextAccess::ContextAccess(const AccessControl & access_control_, const Params
}
ContextAccess::ContextAccess(FullAccess)
: is_full_access(true), access(std::make_shared<AccessRights>(AccessRights::getFullAccess())), access_with_implicit(access)
{
}
ContextAccess::~ContextAccess()
{
enabled_settings.reset();
@ -413,14 +419,8 @@ std::optional<QuotaUsage> ContextAccess::getQuotaUsage() const
std::shared_ptr<const ContextAccess> ContextAccess::getFullAccess()
{
static const std::shared_ptr<const ContextAccess> res = []
{
auto full_access = std::make_shared<ContextAccess>();
full_access->is_full_access = true;
full_access->access = std::make_shared<AccessRights>(AccessRights::getFullAccess());
full_access->access_with_implicit = full_access->access;
return full_access;
}();
static const std::shared_ptr<const ContextAccess> res =
[] { return std::shared_ptr<ContextAccess>(new ContextAccess{kFullAccess}); }();
return res;
}

View File

@ -69,7 +69,6 @@ public:
using Params = ContextAccessParams;
const Params & getParams() const { return params; }
ContextAccess() { } /// NOLINT
ContextAccess(const AccessControl & access_control_, const Params & params_);
/// Returns the current user. Throws if user is nullptr.
@ -171,10 +170,17 @@ public:
private:
friend class AccessControl;
struct FullAccess {};
static const FullAccess kFullAccess;
/// Makes an instance of ContextAccess which provides full access to everything
/// without any limitations. This is used for the global context.
explicit ContextAccess(FullAccess);
void initialize();
void setUser(const UserPtr & user_) const;
void setRolesInfo(const std::shared_ptr<const EnabledRolesInfo> & roles_info_) const;
void calculateAccessRights() const;
void setUser(const UserPtr & user_) const TSA_REQUIRES(mutex);
void setRolesInfo(const std::shared_ptr<const EnabledRolesInfo> & roles_info_) const TSA_REQUIRES(mutex);
void calculateAccessRights() const TSA_REQUIRES(mutex);
template <bool throw_if_denied, bool grant_option>
bool checkAccessImpl(const AccessFlags & flags) const;
@ -217,20 +223,23 @@ private:
const AccessControl * access_control = nullptr;
const Params params;
bool is_full_access = false;
mutable Poco::Logger * trace_log = nullptr;
mutable UserPtr user;
mutable String user_name;
mutable bool user_was_dropped = false;
mutable scope_guard subscription_for_user_change;
mutable std::shared_ptr<const EnabledRoles> enabled_roles;
mutable scope_guard subscription_for_roles_changes;
mutable std::shared_ptr<const EnabledRolesInfo> roles_info;
mutable std::shared_ptr<const AccessRights> access;
mutable std::shared_ptr<const AccessRights> access_with_implicit;
mutable std::shared_ptr<const EnabledRowPolicies> enabled_row_policies;
mutable std::shared_ptr<const EnabledQuota> enabled_quota;
mutable std::shared_ptr<const EnabledSettings> enabled_settings;
const bool is_full_access = false;
mutable std::atomic<bool> user_was_dropped = false;
mutable std::atomic<Poco::Logger *> trace_log = nullptr;
mutable UserPtr user TSA_GUARDED_BY(mutex);
mutable String user_name TSA_GUARDED_BY(mutex);
mutable scope_guard subscription_for_user_change TSA_GUARDED_BY(mutex);
mutable std::shared_ptr<const EnabledRoles> enabled_roles TSA_GUARDED_BY(mutex);
mutable scope_guard subscription_for_roles_changes TSA_GUARDED_BY(mutex);
mutable std::shared_ptr<const EnabledRolesInfo> roles_info TSA_GUARDED_BY(mutex);
mutable std::shared_ptr<const AccessRights> access TSA_GUARDED_BY(mutex);
mutable std::shared_ptr<const AccessRights> access_with_implicit TSA_GUARDED_BY(mutex);
mutable std::shared_ptr<const EnabledRowPolicies> enabled_row_policies TSA_GUARDED_BY(mutex);
mutable std::shared_ptr<const EnabledQuota> enabled_quota TSA_GUARDED_BY(mutex);
mutable std::shared_ptr<const EnabledSettings> enabled_settings TSA_GUARDED_BY(mutex);
mutable std::mutex mutex;
};

View File

@ -239,6 +239,7 @@ QueryPipeline ExternalDictionaryLibraryBridgeHelper::loadKeys(const Block & requ
WriteBufferFromOStream out_buffer(os);
auto output_format = getContext()->getOutputFormat(ExternalDictionaryLibraryBridgeHelper::DEFAULT_FORMAT, out_buffer, requested_block.cloneEmpty());
formatBlock(output_format, requested_block);
out_buffer.finalize();
};
return QueryPipeline(loadBase(uri, out_stream_callback));
}

View File

@ -362,7 +362,7 @@ ASTPtr ClientBase::parseQuery(const char *& pos, const char * end, bool allow_mu
std::cout << std::endl;
WriteBufferFromOStream res_buf(std::cout, 4096);
formatAST(*res, res_buf);
res_buf.next();
res_buf.finalize();
std::cout << std::endl << std::endl;
}

View File

@ -1244,7 +1244,7 @@ void QueryFuzzer::fuzzMain(ASTPtr & ast)
std::cout << std::endl;
WriteBufferFromOStream ast_buf(std::cout, 4096);
formatAST(*ast, ast_buf, false /*highlight*/);
ast_buf.next();
ast_buf.finalize();
std::cout << std::endl << std::endl;
}

View File

@ -188,6 +188,7 @@
M(CacheDetachedFileSegments, "Number of existing detached cache file segments") \
M(FilesystemCacheSize, "Filesystem cache size in bytes") \
M(FilesystemCacheElements, "Filesystem cache elements (file segments)") \
M(FilesystemCacheDownloadQueueElements, "Filesystem cache elements in download queue") \
M(AsyncInsertCacheSize, "Number of async insert hash id in cache") \
M(S3Requests, "S3 requests") \
M(KeeperAliveConnections, "Number of alive connections") \

View File

@ -67,8 +67,8 @@ ThreadGroup::ThreadGroup()
: master_thread_id(CurrentThread::get().thread_id)
{}
ThreadStatus::ThreadStatus()
: thread_id{getThreadId()}
ThreadStatus::ThreadStatus(bool check_current_thread_on_destruction_)
: thread_id{getThreadId()}, check_current_thread_on_destruction(check_current_thread_on_destruction_)
{
last_rusage = std::make_unique<RUsageCounters>();
@ -201,8 +201,11 @@ ThreadStatus::~ThreadStatus()
/// Only change current_thread if it's currently being used by this ThreadStatus
/// For example, PushingToViews chain creates and deletes ThreadStatus instances while running in the main query thread
if (current_thread == this)
if (check_current_thread_on_destruction)
{
assert(current_thread == this);
current_thread = nullptr;
}
}
void ThreadStatus::updatePerformanceCounters()

View File

@ -224,8 +224,10 @@ private:
Poco::Logger * log = nullptr;
bool check_current_thread_on_destruction;
public:
ThreadStatus();
explicit ThreadStatus(bool check_current_thread_on_destruction_ = true);
~ThreadStatus();
ThreadGroupPtr getThreadGroup() const;

View File

@ -37,7 +37,7 @@ SipHash getHashOfLoadedBinary()
std::string getHashOfLoadedBinaryHex()
{
SipHash hash = getHashOfLoadedBinary();
std::array<UInt64, 2> checksum;
UInt128 checksum;
hash.get128(checksum);
return getHexUIntUppercase(checksum);
}

View File

@ -49,8 +49,8 @@ static void validateChecksum(char * data, size_t size, const Checksum expected_c
/// TODO mess up of endianness in error message.
message << "Checksum doesn't match: corrupted data."
" Reference: " + getHexUIntLowercase(expected_checksum.high64) + getHexUIntLowercase(expected_checksum.low64)
+ ". Actual: " + getHexUIntLowercase(calculated_checksum.high64) + getHexUIntLowercase(calculated_checksum.low64)
" Reference: " + getHexUIntLowercase(expected_checksum)
+ ". Actual: " + getHexUIntLowercase(calculated_checksum)
+ ". Size of compressed block: " + toString(size);
const char * message_hardware_failure = "This is most likely due to hardware failure. "

View File

@ -1,18 +1,19 @@
#include <filesystem>
#include <Coordination/Changelog.h>
#include <Disks/DiskLocal.h>
#include <IO/ReadBufferFromFile.h>
#include <IO/ReadHelpers.h>
#include <IO/WriteBufferFromFile.h>
#include <IO/WriteHelpers.h>
#include <IO/ZstdDeflatingAppendableWriteBuffer.h>
#include <base/errnoToString.h>
#include <boost/algorithm/string/join.hpp>
#include <boost/algorithm/string/split.hpp>
#include <boost/algorithm/string/trim.hpp>
#include <Common/filesystemHelpers.h>
#include <Common/Exception.h>
#include <Common/SipHash.h>
#include <Common/filesystemHelpers.h>
#include <Common/logger_useful.h>
#include <IO/WriteBufferFromFile.h>
#include <base/errnoToString.h>
#include <libnuraft/log_val_type.hxx>
@ -24,20 +25,41 @@ namespace ErrorCodes
extern const int CHECKSUM_DOESNT_MATCH;
extern const int CORRUPTED_DATA;
extern const int UNKNOWN_FORMAT_VERSION;
extern const int NOT_IMPLEMENTED;
extern const int BAD_ARGUMENTS;
extern const int LOGICAL_ERROR;
}
namespace
{
constexpr std::string_view tmp_prefix = "tmp_";
void moveFileBetweenDisks(DiskPtr disk_from, ChangelogFileDescriptionPtr description, DiskPtr disk_to, const std::string & path_to)
{
/// we use empty file with prefix tmp_ to detect incomplete copies
/// if a copy is complete we don't care from which disk we use the same file
/// so it's okay if a failure happens after removing of tmp file but before we remove
/// the changelog from the source disk
auto from_path = fs::path(description->path);
auto tmp_changelog_name = from_path.parent_path() / (std::string{tmp_prefix} + from_path.filename().string());
{
auto buf = disk_to->writeFile(tmp_changelog_name);
buf->finalize();
}
disk_from->copyFile(from_path, *disk_to, path_to, {});
disk_to->removeFile(tmp_changelog_name);
disk_from->removeFile(description->path);
description->path = path_to;
description->disk = disk_to;
}
constexpr auto DEFAULT_PREFIX = "changelog";
std::string formatChangelogPath(
const std::string & prefix, const std::string & name_prefix, uint64_t from_index, uint64_t to_index, const std::string & extension)
inline std::string
formatChangelogPath(const std::string & name_prefix, uint64_t from_index, uint64_t to_index, const std::string & extension)
{
std::filesystem::path path(prefix);
path /= std::filesystem::path(fmt::format("{}_{}_{}.{}", name_prefix, from_index, to_index, extension));
return path;
return fmt::format("{}_{}_{}.{}", name_prefix, from_index, to_index, extension);
}
ChangelogFileDescriptionPtr getChangelogFileDescription(const std::filesystem::path & path)
@ -89,17 +111,19 @@ class ChangelogWriter
public:
ChangelogWriter(
std::map<uint64_t, ChangelogFileDescriptionPtr> & existing_changelogs_,
const std::filesystem::path & changelogs_dir_,
KeeperContextPtr keeper_context_,
LogFileSettings log_file_settings_)
: existing_changelogs(existing_changelogs_)
, log_file_settings(log_file_settings_)
, changelogs_dir(changelogs_dir_)
, keeper_context(std::move(keeper_context_))
, log(&Poco::Logger::get("Changelog"))
{
}
void setFile(ChangelogFileDescriptionPtr file_description, WriteMode mode)
{
auto disk = getDisk();
try
{
if (mode == WriteMode::Append && file_description->expectedEntriesCountInLog() != log_file_settings.rotate_interval)
@ -110,7 +134,7 @@ public:
file_description->expectedEntriesCountInLog());
// we have a file we need to finalize first
if (tryGetFileBuffer() && prealloc_done)
if (tryGetFileBaseBuffer() && prealloc_done)
{
finalizeCurrentFile();
@ -118,27 +142,55 @@ public:
// if we wrote at least 1 log in the log file we can rename the file to reflect correctly the
// contained logs
// file can be deleted from disk earlier by compaction
if (!current_file_description->deleted && last_index_written
&& *last_index_written != current_file_description->to_log_index)
if (!current_file_description->deleted)
{
auto new_path = formatChangelogPath(
changelogs_dir,
auto log_disk = current_file_description->disk;
const auto & path = current_file_description->path;
std::string new_path = path;
if (last_index_written && *last_index_written != current_file_description->to_log_index)
{
new_path = formatChangelogPath(
current_file_description->prefix,
current_file_description->from_log_index,
*last_index_written,
current_file_description->extension);
std::filesystem::rename(current_file_description->path, new_path);
}
if (disk == log_disk)
{
if (path != new_path)
{
try
{
disk->moveFile(path, new_path);
}
catch (...)
{
tryLogCurrentException(log, fmt::format("File rename failed on disk {}", disk->getName()));
}
current_file_description->path = std::move(new_path);
}
}
else
{
moveFileBetweenDisks(log_disk, current_file_description, disk, new_path);
}
}
}
file_buf = std::make_unique<WriteBufferFromFile>(
file_description->path, DBMS_DEFAULT_BUFFER_SIZE, mode == WriteMode::Rewrite ? -1 : (O_APPEND | O_CREAT | O_WRONLY));
auto latest_log_disk = getLatestLogDisk();
assert(file_description->disk == latest_log_disk);
file_buf = latest_log_disk->writeFile(file_description->path, DBMS_DEFAULT_BUFFER_SIZE, mode);
assert(file_buf);
last_index_written.reset();
current_file_description = std::move(file_description);
if (log_file_settings.compress_logs)
compressed_buffer = std::make_unique<ZstdDeflatingAppendableWriteBuffer>(std::move(file_buf), /* compression level = */ 3, /* append_to_existing_file_ = */ mode == WriteMode::Append);
compressed_buffer = std::make_unique<ZstdDeflatingAppendableWriteBuffer>(
std::move(file_buf),
/* compressi)on level = */ 3,
/* append_to_existing_file_ = */ mode == WriteMode::Append,
[latest_log_disk, path = current_file_description->path] { return latest_log_disk->readFile(path); });
prealloc_done = false;
}
@ -149,12 +201,12 @@ public:
}
}
bool isFileSet() const { return tryGetFileBuffer() != nullptr; }
/// There is bug when compressed_buffer has value, file_buf's ownership transfer to compressed_buffer
bool isFileSet() const { return compressed_buffer != nullptr || file_buf != nullptr; }
bool appendRecord(ChangelogRecord && record)
{
const auto * file_buffer = tryGetFileBuffer();
const auto * file_buffer = tryGetFileBaseBuffer();
assert(file_buffer && current_file_description);
assert(record.header.index - getStartIndex() <= current_file_description->expectedEntriesCountInLog());
@ -211,7 +263,7 @@ public:
void flush()
{
auto * file_buffer = tryGetFileBuffer();
auto * file_buffer = tryGetFileBaseBuffer();
if (file_buffer)
{
/// Fsync file system if needed
@ -236,12 +288,12 @@ public:
new_description->from_log_index = new_start_log_index;
new_description->to_log_index = new_start_log_index + log_file_settings.rotate_interval - 1;
new_description->extension = "bin";
new_description->disk = getLatestLogDisk();
if (log_file_settings.compress_logs)
new_description->extension += "." + toContentEncodingName(CompressionMethod::Zstd);
new_description->path = formatChangelogPath(
changelogs_dir,
new_description->prefix,
new_start_log_index,
new_start_log_index + log_file_settings.rotate_interval - 1,
@ -260,17 +312,15 @@ public:
}
private:
void finalizeCurrentFile()
{
const auto * file_buffer = tryGetFileBuffer();
assert(file_buffer && prealloc_done);
assert(prealloc_done);
assert(current_file_description);
// compact can delete the file and we don't need to do anything
if (current_file_description->deleted)
{
LOG_WARNING(log, "Log {} is already deleted", file_buffer->getFileName());
LOG_WARNING(log, "Log {} is already deleted", current_file_description->path);
return;
}
@ -279,27 +329,36 @@ private:
flush();
if (log_file_settings.max_size != 0)
const auto * file_buffer = tryGetFileBuffer();
if (log_file_settings.max_size != 0 && file_buffer)
{
int res = -1;
do
{
res = ftruncate(file_buffer->getFD(), initial_file_size + file_buffer->count());
}
while (res < 0 && errno == EINTR);
} while (res < 0 && errno == EINTR);
if (res != 0)
LOG_WARNING(log, "Could not ftruncate file. Error: {}, errno: {}", errnoToString(), errno);
}
if (log_file_settings.compress_logs)
{
compressed_buffer.reset();
}
else
{
chassert(file_buf);
file_buf->finalize();
file_buf.reset();
}
}
WriteBuffer & getBuffer()
{
/// TODO: unify compressed_buffer and file_buf,
/// compressed_buffer can use its NestedBuffer directly if compress_logs=false
if (compressed_buffer)
return *compressed_buffer;
@ -319,38 +378,42 @@ private:
return *file_buffer;
}
const WriteBufferFromFile * tryGetFileBuffer() const
{
return const_cast<ChangelogWriter *>(this)->tryGetFileBuffer();
}
const WriteBufferFromFile * tryGetFileBuffer() const { return const_cast<ChangelogWriter *>(this)->tryGetFileBuffer(); }
WriteBufferFromFile * tryGetFileBuffer()
{
if (compressed_buffer)
return dynamic_cast<WriteBufferFromFile *>(compressed_buffer->getNestedBuffer());
if (file_buf)
return file_buf.get();
return dynamic_cast<WriteBufferFromFile *>(file_buf.get());
}
return nullptr;
WriteBufferFromFileBase * tryGetFileBaseBuffer()
{
if (compressed_buffer)
return dynamic_cast<WriteBufferFromFileBase *>(compressed_buffer->getNestedBuffer());
return file_buf.get();
}
void tryPreallocateForFile()
{
if (log_file_settings.max_size == 0)
const auto * file_buffer = tryGetFileBuffer();
if (log_file_settings.max_size == 0 || !file_buffer)
{
initial_file_size = 0;
prealloc_done = true;
return;
}
const auto & file_buffer = getFileBuffer();
#ifdef OS_LINUX
{
int res = -1;
do
{
res = fallocate(file_buffer.getFD(), FALLOC_FL_KEEP_SIZE, 0, log_file_settings.max_size + log_file_settings.overallocate_size);
res = fallocate(
file_buffer->getFD(), FALLOC_FL_KEEP_SIZE, 0, log_file_settings.max_size + log_file_settings.overallocate_size);
} while (res < 0 && errno == EINTR);
if (res != 0)
@ -365,15 +428,21 @@ private:
}
}
#endif
initial_file_size = getSizeFromFileDescriptor(file_buffer.getFD());
initial_file_size = getSizeFromFileDescriptor(file_buffer->getFD());
prealloc_done = true;
}
DiskPtr getLatestLogDisk() const { return keeper_context->getLatestLogDisk(); }
DiskPtr getDisk() const { return keeper_context->getLogDisk(); }
bool isLocalDisk() const { return dynamic_cast<DiskLocal *>(getDisk().get()) != nullptr; }
std::map<uint64_t, ChangelogFileDescriptionPtr> & existing_changelogs;
ChangelogFileDescriptionPtr current_file_description{nullptr};
std::unique_ptr<WriteBufferFromFile> file_buf;
std::unique_ptr<WriteBufferFromFileBase> file_buf;
std::optional<uint64_t> last_index_written;
size_t initial_file_size{0};
@ -383,7 +452,7 @@ private:
LogFileSettings log_file_settings;
const std::filesystem::path changelogs_dir;
KeeperContextPtr keeper_context;
Poco::Logger * const log;
};
@ -413,10 +482,10 @@ struct ChangelogReadResult
class ChangelogReader
{
public:
explicit ChangelogReader(const std::string & filepath_) : filepath(filepath_)
explicit ChangelogReader(DiskPtr disk_, const std::string & filepath_) : disk(disk_), filepath(filepath_)
{
auto compression_method = chooseCompressionMethod(filepath, "");
auto read_buffer_from_file = std::make_unique<ReadBufferFromFile>(filepath);
auto read_buffer_from_file = disk->readFile(filepath);
read_buf = wrapReadBufferWithCompressionMethod(std::move(read_buffer_from_file), compression_method);
}
@ -512,37 +581,103 @@ public:
}
private:
DiskPtr disk;
std::string filepath;
std::unique_ptr<ReadBuffer> read_buf;
};
Changelog::Changelog(
const std::string & changelogs_dir_,
Poco::Logger * log_,
LogFileSettings log_file_settings)
: changelogs_dir(changelogs_dir_)
, changelogs_detached_dir(changelogs_dir / "detached")
Changelog::Changelog(Poco::Logger * log_, LogFileSettings log_file_settings, KeeperContextPtr keeper_context_)
: changelogs_detached_dir("detached")
, rotate_interval(log_file_settings.rotate_interval)
, log(log_)
, write_operations(std::numeric_limits<size_t>::max())
, append_completion_queue(std::numeric_limits<size_t>::max())
, keeper_context(std::move(keeper_context_))
{
/// Load all files in changelog directory
namespace fs = std::filesystem;
if (!fs::exists(changelogs_dir))
fs::create_directories(changelogs_dir);
for (const auto & p : fs::directory_iterator(changelogs_dir))
if (auto latest_log_disk = getLatestLogDisk();
log_file_settings.force_sync && dynamic_cast<const DiskLocal *>(latest_log_disk.get()) == nullptr)
{
if (p == changelogs_detached_dir)
continue;
auto file_description = getChangelogFileDescription(p.path());
existing_changelogs[file_description->from_log_index] = std::move(file_description);
throw DB::Exception(
DB::ErrorCodes::BAD_ARGUMENTS,
"force_sync is set to true for logs but disk '{}' cannot satisfy such guarantee because it's not of type DiskLocal.\n"
"If you want to use force_sync and same disk for all logs, please set keeper_server.log_storage_disk to a local disk.\n"
"If you want to use force_sync and different disk only for old logs, please set 'keeper_server.log_storage_disk' to any "
"supported disk and 'keeper_server.latest_log_storage_disk' to a local disk.\n"
"Otherwise, disable force_sync",
latest_log_disk->getName());
}
/// Load all files on changelog disks
const auto load_from_disk = [&](const auto & disk)
{
LOG_TRACE(log, "Reading from disk {}", disk->getName());
std::unordered_map<std::string, std::string> incomplete_files;
const auto clean_incomplete_file = [&](const auto & file_path)
{
if (auto incomplete_it = incomplete_files.find(fs::path(file_path).filename()); incomplete_it != incomplete_files.end())
{
LOG_TRACE(log, "Removing {} from {}", file_path, disk->getName());
disk->removeFile(file_path);
disk->removeFile(incomplete_it->second);
incomplete_files.erase(incomplete_it);
return true;
}
return false;
};
std::vector<std::string> changelog_files;
for (auto it = disk->iterateDirectory(""); it->isValid(); it->next())
{
if (it->name() == changelogs_detached_dir)
continue;
if (it->name().starts_with(tmp_prefix))
{
incomplete_files.emplace(it->name().substr(tmp_prefix.size()), it->path());
continue;
}
if (clean_incomplete_file(it->path()))
continue;
changelog_files.push_back(it->path());
}
for (const auto & changelog_file : changelog_files)
{
if (clean_incomplete_file(fs::path(changelog_file).filename()))
continue;
auto file_description = getChangelogFileDescription(changelog_file);
file_description->disk = disk;
LOG_TRACE(log, "Found {} on {}", changelog_file, disk->getName());
auto [changelog_it, inserted] = existing_changelogs.insert_or_assign(file_description->from_log_index, std::move(file_description));
if (!inserted)
LOG_WARNING(log, "Found duplicate entries for {}, will use the entry from {}", changelog_it->second->path, disk->getName());
}
for (const auto & [name, path] : incomplete_files)
disk->removeFile(path);
};
/// Load all files from old disks
for (const auto & disk : keeper_context->getOldLogDisks())
load_from_disk(disk);
auto disk = getDisk();
load_from_disk(disk);
auto latest_log_disk = getLatestLogDisk();
if (disk != latest_log_disk)
load_from_disk(latest_log_disk);
if (existing_changelogs.empty())
LOG_WARNING(log, "No logs exists in {}. It's Ok if it's the first run of clickhouse-keeper.", changelogs_dir.generic_string());
LOG_WARNING(log, "No logs exists in {}. It's Ok if it's the first run of clickhouse-keeper.", disk->getPath());
clean_log_thread = ThreadFromGlobalPool([this] { cleanLogThread(); });
@ -550,8 +685,7 @@ Changelog::Changelog(
append_completion_thread = ThreadFromGlobalPool([this] { appendCompletionThread(); });
current_writer = std::make_unique<ChangelogWriter>(
existing_changelogs, changelogs_dir, log_file_settings);
current_writer = std::make_unique<ChangelogWriter>(existing_changelogs, keeper_context, log_file_settings);
}
void Changelog::readChangelogAndInitWriter(uint64_t last_commited_log_index, uint64_t logs_to_keep)
@ -623,7 +757,7 @@ void Changelog::readChangelogAndInitWriter(uint64_t last_commited_log_index, uin
break;
}
ChangelogReader reader(changelog_description.path);
ChangelogReader reader(changelog_description.disk, changelog_description.path);
last_log_read_result = reader.readChangelog(logs, start_to_read_from, log);
last_log_read_result->log_start_index = changelog_description.from_log_index;
@ -684,13 +818,13 @@ void Changelog::readChangelogAndInitWriter(uint64_t last_commited_log_index, uin
assert(existing_changelogs.find(last_log_read_result->log_start_index) != existing_changelogs.end());
assert(existing_changelogs.find(last_log_read_result->log_start_index)->first == existing_changelogs.rbegin()->first);
/// Continue to write into incomplete existing log if it doesn't finished with error
/// Continue to write into incomplete existing log if it didn't finish with error
const auto & description = existing_changelogs[last_log_read_result->log_start_index];
if (last_log_read_result->last_read_index == 0 || last_log_read_result->error) /// If it's broken log then remove it
{
LOG_INFO(log, "Removing chagelog {} because it's empty or read finished with error", description->path);
std::filesystem::remove(description->path);
description->disk->removeFile(description->path);
existing_changelogs.erase(last_log_read_result->log_start_index);
std::erase_if(logs, [last_log_read_result](const auto & item) { return item.first >= last_log_read_result->log_start_index; });
}
@ -699,26 +833,67 @@ void Changelog::readChangelogAndInitWriter(uint64_t last_commited_log_index, uin
initWriter(description);
}
}
else if (last_log_read_result.has_value())
{
/// check if we need to move completed log to another disk
auto latest_log_disk = getLatestLogDisk();
auto disk = getDisk();
auto & description = existing_changelogs.at(last_log_read_result->log_start_index);
if (latest_log_disk != disk && latest_log_disk == description->disk)
moveFileBetweenDisks(latest_log_disk, description, disk, description->path);
}
/// Start new log if we don't initialize writer from previous log. All logs can be "complete".
if (!current_writer->isFileSet())
current_writer->rotate(max_log_id + 1);
/// Move files to correct disks
auto latest_start_index = current_writer->getStartIndex();
auto latest_log_disk = getLatestLogDisk();
auto disk = getDisk();
for (const auto & [start_index, description] : existing_changelogs)
{
/// latest log should already be on latest_log_disk
if (start_index == latest_start_index)
{
chassert(description->disk == latest_log_disk);
continue;
}
if (description->disk != disk)
moveFileBetweenDisks(description->disk, description, disk, description->path);
}
initialized = true;
}
void Changelog::initWriter(ChangelogFileDescriptionPtr description)
{
if (description->expectedEntriesCountInLog() != rotate_interval)
LOG_TRACE(
log,
"Looks like rotate_logs_interval was changed, current {}, expected entries in last log {}",
rotate_interval,
description->expectedEntriesCountInLog());
LOG_TRACE(log, "Continue to write into {}", description->path);
auto log_disk = description->disk;
auto latest_log_disk = getLatestLogDisk();
if (log_disk != latest_log_disk)
moveFileBetweenDisks(log_disk, description, latest_log_disk, description->path);
current_writer->setFile(std::move(description), WriteMode::Append);
}
namespace
{
std::string getCurrentTimestampFolder()
{
std::string getCurrentTimestampFolder()
{
const auto timestamp = LocalDateTime{std::time(nullptr)};
return fmt::format(
"{:02}{:02}{:02}T{:02}{:02}{:02}",
@ -728,26 +903,54 @@ std::string getCurrentTimestampFolder()
timestamp.hour(),
timestamp.minute(),
timestamp.second());
}
}
DiskPtr Changelog::getDisk() const
{
return keeper_context->getLogDisk();
}
DiskPtr Changelog::getLatestLogDisk() const
{
return keeper_context->getLatestLogDisk();
}
void Changelog::removeExistingLogs(ChangelogIter begin, ChangelogIter end)
{
const auto timestamp_folder = changelogs_detached_dir / getCurrentTimestampFolder();
auto disk = getDisk();
const auto timestamp_folder = (fs::path(changelogs_detached_dir) / getCurrentTimestampFolder()).generic_string();
for (auto itr = begin; itr != end;)
{
if (!std::filesystem::exists(timestamp_folder))
if (!disk->exists(timestamp_folder))
{
LOG_WARNING(log, "Moving broken logs to {}", timestamp_folder.generic_string());
std::filesystem::create_directories(timestamp_folder);
LOG_WARNING(log, "Moving broken logs to {}", timestamp_folder);
disk->createDirectories(timestamp_folder);
}
LOG_WARNING(log, "Removing changelog {}", itr->second->path);
const std::filesystem::path & path = itr->second->path;
const auto new_path = timestamp_folder / path.filename();
std::filesystem::rename(path, new_path);
auto changelog_disk = itr->second->disk;
if (changelog_disk == disk)
{
try
{
disk->moveFile(path.generic_string(), new_path.generic_string());
}
catch (const DB::Exception & e)
{
if (e.code() == DB::ErrorCodes::NOT_IMPLEMENTED)
moveFileBetweenDisks(changelog_disk, itr->second, disk, new_path);
}
}
else
moveFileBetweenDisks(changelog_disk, itr->second, disk, new_path);
itr = existing_changelogs.erase(itr);
}
}
@ -882,7 +1085,6 @@ void Changelog::writeAt(uint64_t index, const LogEntryPtr & log_entry)
throw Exception(ErrorCodes::LOGICAL_ERROR, "Changelog must be initialized before writing records");
{
std::lock_guard lock(writer_mutex);
/// This write_at require to overwrite everything in this file and also in previous file(s)
const bool go_to_previous_file = index < current_writer->getStartIndex();
@ -898,13 +1100,18 @@ void Changelog::writeAt(uint64_t index, const LogEntryPtr & log_entry)
else
description = std::prev(index_changelog)->second;
auto log_disk = description->disk;
auto latest_log_disk = getLatestLogDisk();
if (log_disk != latest_log_disk)
moveFileBetweenDisks(log_disk, description, latest_log_disk, description->path);
current_writer->setFile(std::move(description), WriteMode::Append);
/// Remove all subsequent files if overwritten something in previous one
auto to_remove_itr = existing_changelogs.upper_bound(index);
for (auto itr = to_remove_itr; itr != existing_changelogs.end();)
{
std::filesystem::remove(itr->second->path);
itr->second->disk->removeFile(itr->second->path);
itr = existing_changelogs.erase(itr);
}
}
@ -954,14 +1161,22 @@ void Changelog::compact(uint64_t up_to_log_index)
LOG_INFO(log, "Removing changelog {} because of compaction", changelog_description.path);
/// If failed to push to queue for background removing, then we will remove it now
if (!log_files_to_delete_queue.tryPush(changelog_description.path, 1))
if (!log_files_to_delete_queue.tryPush({changelog_description.path, changelog_description.disk}, 1))
{
std::error_code ec;
std::filesystem::remove(changelog_description.path, ec);
if (ec)
LOG_WARNING(log, "Failed to remove changelog {} in compaction, error message: {}", changelog_description.path, ec.message());
else
LOG_INFO(log, "Removed changelog {} because of compaction", changelog_description.path);
try
{
changelog_description.disk->removeFile(changelog_description.path);
LOG_INFO(log, "Removed changelog {} because of compaction.", changelog_description.path);
}
catch (Exception & e)
{
LOG_WARNING(
log, "Failed to remove changelog {} in compaction, error message: {}", changelog_description.path, e.message());
}
catch (...)
{
tryLogCurrentException(log);
}
}
changelog_description.deleted = true;
@ -1151,14 +1366,23 @@ Changelog::~Changelog()
void Changelog::cleanLogThread()
{
std::string path;
while (log_files_to_delete_queue.pop(path))
std::pair<std::string, DiskPtr> path_with_disk;
while (log_files_to_delete_queue.pop(path_with_disk))
{
std::error_code ec;
if (std::filesystem::remove(path, ec))
const auto & [path, disk] = path_with_disk;
try
{
disk->removeFile(path);
LOG_INFO(log, "Removed changelog {} because of compaction.", path);
else
LOG_WARNING(log, "Failed to remove changelog {} in compaction, error message: {}", path, ec.message());
}
catch (Exception & e)
{
LOG_WARNING(log, "Failed to remove changelog {} in compaction, error message: {}", path, e.message());
}
catch (...)
{
tryLogCurrentException(log);
}
}
}

View File

@ -11,6 +11,7 @@
#include <libnuraft/raft_server.hxx>
#include <Common/ConcurrentBoundedQueue.h>
#include <Common/ThreadPool.h>
#include <Coordination/KeeperContext.h>
namespace DB
{
@ -59,6 +60,7 @@ struct ChangelogFileDescription
uint64_t to_log_index;
std::string extension;
DiskPtr disk;
std::string path;
bool deleted = false;
@ -87,9 +89,9 @@ class Changelog
{
public:
Changelog(
const std::string & changelogs_dir_,
Poco::Logger * log_,
LogFileSettings log_file_settings);
LogFileSettings log_file_settings,
KeeperContextPtr keeper_context_);
Changelog(Changelog &&) = delete;
@ -152,6 +154,9 @@ private:
/// Pack log_entry into changelog record
static ChangelogRecord buildRecord(uint64_t index, const LogEntryPtr & log_entry);
DiskPtr getDisk() const;
DiskPtr getLatestLogDisk() const;
/// Currently existing changelogs
std::map<uint64_t, ChangelogFileDescriptionPtr> existing_changelogs;
@ -169,8 +174,7 @@ private:
/// Clean useless log files in a background thread
void cleanLogThread();
const std::filesystem::path changelogs_dir;
const std::filesystem::path changelogs_detached_dir;
const String changelogs_detached_dir;
const uint64_t rotate_interval;
Poco::Logger * log;
@ -185,7 +189,7 @@ private:
uint64_t max_log_id = 0;
/// For compaction, queue of delete not used logs
/// 128 is enough, even if log is not removed, it's not a problem
ConcurrentBoundedQueue<std::string> log_files_to_delete_queue{128};
ConcurrentBoundedQueue<std::pair<std::string, DiskPtr>> log_files_to_delete_queue{128};
ThreadFromGlobalPool clean_log_thread;
struct AppendLog
@ -223,6 +227,8 @@ private:
nuraft::wptr<nuraft::raft_server> raft_server;
KeeperContextPtr keeper_context;
bool initialized = false;
};

View File

@ -85,14 +85,6 @@ void KeeperConfigurationAndSettings::dump(WriteBufferFromOwnString & buf) const
writeText(four_letter_word_allow_list, buf);
buf.write('\n');
writeText("log_storage_path=", buf);
writeText(log_storage_path, buf);
buf.write('\n');
writeText("snapshot_storage_path=", buf);
writeText(snapshot_storage_path, buf);
buf.write('\n');
/// coordination_settings
writeText("max_requests_batch_size=", buf);
@ -188,61 +180,9 @@ KeeperConfigurationAndSettings::loadFromConfig(const Poco::Util::AbstractConfigu
DEFAULT_FOUR_LETTER_WORD_CMD));
ret->log_storage_path = getLogsPathFromConfig(config, standalone_keeper_);
ret->snapshot_storage_path = getSnapshotsPathFromConfig(config, standalone_keeper_);
ret->state_file_path = getStateFilePathFromConfig(config, standalone_keeper_);
ret->coordination_settings->loadFromConfig("keeper_server.coordination_settings", config);
return ret;
}
String KeeperConfigurationAndSettings::getLogsPathFromConfig(const Poco::Util::AbstractConfiguration & config, bool standalone_keeper_)
{
/// the most specialized path
if (config.has("keeper_server.log_storage_path"))
return config.getString("keeper_server.log_storage_path");
if (config.has("keeper_server.storage_path"))
return std::filesystem::path{config.getString("keeper_server.storage_path")} / "logs";
if (standalone_keeper_)
return std::filesystem::path{config.getString("path", KEEPER_DEFAULT_PATH)} / "logs";
else
return std::filesystem::path{config.getString("path", DBMS_DEFAULT_PATH)} / "coordination/logs";
}
String KeeperConfigurationAndSettings::getSnapshotsPathFromConfig(const Poco::Util::AbstractConfiguration & config, bool standalone_keeper_)
{
/// the most specialized path
if (config.has("keeper_server.snapshot_storage_path"))
return config.getString("keeper_server.snapshot_storage_path");
if (config.has("keeper_server.storage_path"))
return std::filesystem::path{config.getString("keeper_server.storage_path")} / "snapshots";
if (standalone_keeper_)
return std::filesystem::path{config.getString("path", KEEPER_DEFAULT_PATH)} / "snapshots";
else
return std::filesystem::path{config.getString("path", DBMS_DEFAULT_PATH)} / "coordination/snapshots";
}
String KeeperConfigurationAndSettings::getStateFilePathFromConfig(const Poco::Util::AbstractConfiguration & config, bool standalone_keeper_)
{
if (config.has("keeper_server.storage_path"))
return std::filesystem::path{config.getString("keeper_server.storage_path")} / "state";
if (config.has("keeper_server.snapshot_storage_path"))
return std::filesystem::path(config.getString("keeper_server.snapshot_storage_path")).parent_path() / "state";
if (config.has("keeper_server.log_storage_path"))
return std::filesystem::path(config.getString("keeper_server.log_storage_path")).parent_path() / "state";
if (standalone_keeper_)
return std::filesystem::path{config.getString("path", KEEPER_DEFAULT_PATH)} / "state";
else
return std::filesystem::path{config.getString("path", DBMS_DEFAULT_PATH)} / "coordination/state";
}
}

View File

@ -82,17 +82,8 @@ struct KeeperConfigurationAndSettings
bool standalone_keeper;
CoordinationSettingsPtr coordination_settings;
String log_storage_path;
String snapshot_storage_path;
String state_file_path;
void dump(WriteBufferFromOwnString & buf) const;
static std::shared_ptr<KeeperConfigurationAndSettings> loadFromConfig(const Poco::Util::AbstractConfiguration & config, bool standalone_keeper_);
private:
static String getLogsPathFromConfig(const Poco::Util::AbstractConfiguration & config, bool standalone_keeper_);
static String getSnapshotsPathFromConfig(const Poco::Util::AbstractConfiguration & config, bool standalone_keeper_);
static String getStateFilePathFromConfig(const Poco::Util::AbstractConfiguration & config, bool standalone_keeper_);
};
using KeeperConfigurationAndSettingsPtr = std::shared_ptr<KeeperConfigurationAndSettings>;

View File

@ -297,6 +297,7 @@ String ConfCommand::run()
StringBuffer buf;
keeper_dispatcher.getKeeperConfigurationAndSettings()->dump(buf);
keeper_dispatcher.getKeeperContext()->dumpConfiguration(buf);
return buf.str();
}
@ -542,7 +543,7 @@ String CleanResourcesCommand::run()
String FeatureFlagsCommand::run()
{
const auto & feature_flags = keeper_dispatcher.getKeeperContext()->feature_flags;
const auto & feature_flags = keeper_dispatcher.getKeeperContext()->getFeatureFlags();
StringBuffer ret;

View File

@ -28,8 +28,8 @@ void updateKeeperInformation(KeeperDispatcher & keeper_dispatcher, AsynchronousM
size_t zxid = 0;
size_t session_with_watches = 0;
size_t paths_watched = 0;
size_t snapshot_dir_size = 0;
size_t log_dir_size = 0;
//size_t snapshot_dir_size = 0;
//size_t log_dir_size = 0;
if (keeper_dispatcher.isServerActive())
{
@ -49,8 +49,8 @@ void updateKeeperInformation(KeeperDispatcher & keeper_dispatcher, AsynchronousM
latest_snapshot_size = state_machine.getLatestSnapshotBufSize();
session_with_watches = state_machine.getSessionsWithWatchesCount();
paths_watched = state_machine.getWatchedPathsCount();
snapshot_dir_size = keeper_dispatcher.getSnapDirSize();
log_dir_size = keeper_dispatcher.getLogDirSize();
//snapshot_dir_size = keeper_dispatcher.getSnapDirSize();
//log_dir_size = keeper_dispatcher.getLogDirSize();
# if defined(__linux__) || defined(__APPLE__)
open_file_descriptor_count = getCurrentProcessFDCount();
@ -85,8 +85,8 @@ void updateKeeperInformation(KeeperDispatcher & keeper_dispatcher, AsynchronousM
new_values["KeeperZxid"] = { zxid, "The current transaction id number (zxid) in ClickHouse Keeper." };
new_values["KeeperSessionWithWatches"] = { session_with_watches, "The number of client sessions of ClickHouse Keeper having watches." };
new_values["KeeperPathsWatched"] = { paths_watched, "The number of different paths watched by the clients of ClickHouse Keeper." };
new_values["KeeperSnapshotDirSize"] = { snapshot_dir_size, "The size of the snapshots directory of ClickHouse Keeper, in bytes." };
new_values["KeeperLogDirSize"] = { log_dir_size, "The size of the logs directory of ClickHouse Keeper, in bytes." };
//new_values["KeeperSnapshotDirSize"] = { snapshot_dir_size, "The size of the snapshots directory of ClickHouse Keeper, in bytes." };
//new_values["KeeperLogDirSize"] = { log_dir_size, "The size of the logs directory of ClickHouse Keeper, in bytes." };
auto keeper_log_info = keeper_dispatcher.getKeeperLogInfo();
@ -108,8 +108,8 @@ void updateKeeperInformation(KeeperDispatcher & keeper_dispatcher, AsynchronousM
}
KeeperAsynchronousMetrics::KeeperAsynchronousMetrics(
TinyContextPtr tiny_context_, int update_period_seconds, const ProtocolServerMetricsFunc & protocol_server_metrics_func_)
: AsynchronousMetrics(update_period_seconds, protocol_server_metrics_func_), tiny_context(std::move(tiny_context_))
ContextPtr context_, int update_period_seconds, const ProtocolServerMetricsFunc & protocol_server_metrics_func_)
: AsynchronousMetrics(update_period_seconds, protocol_server_metrics_func_), context(std::move(context_))
{
}
@ -117,7 +117,7 @@ void KeeperAsynchronousMetrics::updateImpl(AsynchronousMetricValues & new_values
{
#if USE_NURAFT
{
auto keeper_dispatcher = tiny_context->tryGetKeeperDispatcher();
auto keeper_dispatcher = context->tryGetKeeperDispatcher();
if (keeper_dispatcher)
updateKeeperInformation(*keeper_dispatcher, new_values);
}

View File

@ -1,6 +1,6 @@
#pragma once
#include <Coordination/TinyContext.h>
#include <Interpreters/Context.h>
#include <Common/AsynchronousMetrics.h>
namespace DB
@ -13,10 +13,10 @@ class KeeperAsynchronousMetrics : public AsynchronousMetrics
{
public:
KeeperAsynchronousMetrics(
TinyContextPtr tiny_context_, int update_period_seconds, const ProtocolServerMetricsFunc & protocol_server_metrics_func_);
ContextPtr context_, int update_period_seconds, const ProtocolServerMetricsFunc & protocol_server_metrics_func_);
private:
TinyContextPtr tiny_context;
ContextPtr context;
void updateImpl(AsynchronousMetricValues & new_values, TimePoint update_time, TimePoint current_time) override;
};

View File

@ -1,4 +1,9 @@
#include <Coordination/KeeperContext.h>
#include <Coordination/Defines.h>
#include <Disks/DiskLocal.h>
#include <Interpreters/Context.h>
#include <Poco/Util/AbstractConfiguration.h>
#include <Coordination/KeeperConstants.h>
#include <Common/logger_useful.h>
#include <Coordination/KeeperFeatureFlags.h>
@ -14,14 +19,15 @@ extern const int BAD_ARGUMENTS;
}
KeeperContext::KeeperContext()
KeeperContext::KeeperContext(bool standalone_keeper_)
: disk_selector(std::make_shared<DiskSelector>())
, standalone_keeper(standalone_keeper_)
{
/// enable by default some feature flags
feature_flags.enableFeatureFlag(KeeperFeatureFlag::FILTERED_LIST);
feature_flags.enableFeatureFlag(KeeperFeatureFlag::MULTI_READ);
system_nodes_with_data[keeper_api_feature_flags_path] = feature_flags.getFeatureFlags();
/// for older clients, the default is equivalent to WITH_MULTI_READ version
system_nodes_with_data[keeper_api_version_path] = toString(static_cast<uint8_t>(KeeperApiVersion::WITH_MULTI_READ));
}
@ -31,6 +37,264 @@ void KeeperContext::initialize(const Poco::Util::AbstractConfiguration & config)
digest_enabled = config.getBool("keeper_server.digest_enabled", false);
ignore_system_path_on_startup = config.getBool("keeper_server.ignore_system_path_on_startup", false);
initializeFeatureFlags(config);
initializeDisks(config);
}
void KeeperContext::initializeDisks(const Poco::Util::AbstractConfiguration & config)
{
disk_selector->initialize(config, "storage_configuration.disks", Context::getGlobalContextInstance());
log_storage = getLogsPathFromConfig(config);
if (config.has("keeper_server.latest_log_storage_disk"))
latest_log_storage = config.getString("keeper_server.latest_log_storage_disk");
else
latest_log_storage = log_storage;
const auto collect_old_disk_names = [&](const std::string_view key_prefix, std::vector<std::string> & disk_names)
{
Poco::Util::AbstractConfiguration::Keys disk_name_keys;
config.keys("keeper_server", disk_name_keys);
for (const auto & key : disk_name_keys)
{
if (key.starts_with(key_prefix))
disk_names.push_back(config.getString(fmt::format("keeper_server.{}", key)));
}
};
collect_old_disk_names("old_log_storage_disk", old_log_disk_names);
collect_old_disk_names("old_snapshot_storage_disk", old_snapshot_disk_names);
snapshot_storage = getSnapshotsPathFromConfig(config);
if (config.has("keeper_server.latest_snapshot_storage_disk"))
latest_snapshot_storage = config.getString("keeper_server.latest_snapshot_storage_disk");
else
latest_snapshot_storage = snapshot_storage;
state_file_storage = getStatePathFromConfig(config);
}
KeeperContext::Phase KeeperContext::getServerState() const
{
return server_state;
}
void KeeperContext::setServerState(KeeperContext::Phase server_state_)
{
server_state = server_state_;
}
bool KeeperContext::ignoreSystemPathOnStartup() const
{
return ignore_system_path_on_startup;
}
bool KeeperContext::digestEnabled() const
{
return digest_enabled;
}
void KeeperContext::setDigestEnabled(bool digest_enabled_)
{
digest_enabled = digest_enabled_;
}
DiskPtr KeeperContext::getDisk(const Storage & storage) const
{
if (const auto * storage_disk = std::get_if<DiskPtr>(&storage))
return *storage_disk;
const auto & disk_name = std::get<std::string>(storage);
return disk_selector->get(disk_name);
}
DiskPtr KeeperContext::getLogDisk() const
{
return getDisk(log_storage);
}
std::vector<DiskPtr> KeeperContext::getOldLogDisks() const
{
std::vector<DiskPtr> old_log_disks;
old_log_disks.reserve(old_log_disk_names.size());
for (const auto & disk_name : old_log_disk_names)
old_log_disks.push_back(disk_selector->get(disk_name));
return old_log_disks;
}
DiskPtr KeeperContext::getLatestLogDisk() const
{
return getDisk(latest_log_storage);
}
void KeeperContext::setLogDisk(DiskPtr disk)
{
log_storage = disk;
latest_log_storage = std::move(disk);
}
DiskPtr KeeperContext::getLatestSnapshotDisk() const
{
return getDisk(latest_snapshot_storage);
}
DiskPtr KeeperContext::getSnapshotDisk() const
{
return getDisk(snapshot_storage);
}
std::vector<DiskPtr> KeeperContext::getOldSnapshotDisks() const
{
std::vector<DiskPtr> old_snapshot_disks;
old_snapshot_disks.reserve(old_snapshot_disk_names.size());
for (const auto & disk_name : old_snapshot_disk_names)
old_snapshot_disks.push_back(disk_selector->get(disk_name));
return old_snapshot_disks;
}
void KeeperContext::setSnapshotDisk(DiskPtr disk)
{
snapshot_storage = std::move(disk);
latest_snapshot_storage = snapshot_storage;
}
DiskPtr KeeperContext::getStateFileDisk() const
{
return getDisk(state_file_storage);
}
void KeeperContext::setStateFileDisk(DiskPtr disk)
{
state_file_storage = std::move(disk);
}
const std::unordered_map<std::string, std::string> & KeeperContext::getSystemNodesWithData() const
{
return system_nodes_with_data;
}
const KeeperFeatureFlags & KeeperContext::getFeatureFlags() const
{
return feature_flags;
}
void KeeperContext::dumpConfiguration(WriteBufferFromOwnString & buf) const
{
auto dump_disk_info = [&](const std::string_view prefix, const IDisk & disk)
{
writeText(fmt::format("{}_path=", prefix), buf);
writeText(disk.getPath(), buf);
buf.write('\n');
writeText(fmt::format("{}_disk=", prefix), buf);
writeText(disk.getName(), buf);
buf.write('\n');
};
{
auto log_disk = getDisk(log_storage);
dump_disk_info("log_storage", *log_disk);
auto latest_log_disk = getDisk(latest_log_storage);
if (log_disk != latest_log_disk)
dump_disk_info("latest_log_storage", *latest_log_disk);
}
{
auto snapshot_disk = getDisk(snapshot_storage);
dump_disk_info("snapshot_storage", *snapshot_disk);
}
}
KeeperContext::Storage KeeperContext::getLogsPathFromConfig(const Poco::Util::AbstractConfiguration & config) const
{
const auto create_local_disk = [](const auto & path)
{
if (!fs::exists(path))
fs::create_directories(path);
return std::make_shared<DiskLocal>("LocalLogDisk", path, 0);
};
/// the most specialized path
if (config.has("keeper_server.log_storage_path"))
return create_local_disk(config.getString("keeper_server.log_storage_path"));
if (config.has("keeper_server.log_storage_disk"))
return config.getString("keeper_server.log_storage_disk");
if (config.has("keeper_server.storage_path"))
return create_local_disk(std::filesystem::path{config.getString("keeper_server.storage_path")} / "logs");
if (standalone_keeper)
return create_local_disk(std::filesystem::path{config.getString("path", KEEPER_DEFAULT_PATH)} / "logs");
else
return create_local_disk(std::filesystem::path{config.getString("path", DBMS_DEFAULT_PATH)} / "coordination/logs");
}
KeeperContext::Storage KeeperContext::getSnapshotsPathFromConfig(const Poco::Util::AbstractConfiguration & config) const
{
const auto create_local_disk = [](const auto & path)
{
if (!fs::exists(path))
fs::create_directories(path);
return std::make_shared<DiskLocal>("LocalSnapshotDisk", path, 0);
};
/// the most specialized path
if (config.has("keeper_server.snapshot_storage_path"))
return create_local_disk(config.getString("keeper_server.snapshot_storage_path"));
if (config.has("keeper_server.snapshot_storage_disk"))
return config.getString("keeper_server.snapshot_storage_disk");
if (config.has("keeper_server.storage_path"))
return create_local_disk(std::filesystem::path{config.getString("keeper_server.storage_path")} / "snapshots");
if (standalone_keeper)
return create_local_disk(std::filesystem::path{config.getString("path", KEEPER_DEFAULT_PATH)} / "snapshots");
else
return create_local_disk(std::filesystem::path{config.getString("path", DBMS_DEFAULT_PATH)} / "coordination/snapshots");
}
KeeperContext::Storage KeeperContext::getStatePathFromConfig(const Poco::Util::AbstractConfiguration & config) const
{
const auto create_local_disk = [](const auto & path)
{
if (!fs::exists(path))
fs::create_directories(path);
return std::make_shared<DiskLocal>("LocalStateFileDisk", path, 0);
};
if (config.has("keeper_server.state_storage_disk"))
return config.getString("keeper_server.state_storage_disk");
if (config.has("keeper_server.storage_path"))
return create_local_disk(std::filesystem::path{config.getString("keeper_server.storage_path")});
if (config.has("keeper_server.snapshot_storage_path"))
return create_local_disk(std::filesystem::path(config.getString("keeper_server.snapshot_storage_path")).parent_path());
if (config.has("keeper_server.log_storage_path"))
return create_local_disk(std::filesystem::path(config.getString("keeper_server.log_storage_path")).parent_path());
if (standalone_keeper)
return create_local_disk(std::filesystem::path{config.getString("path", KEEPER_DEFAULT_PATH)});
else
return create_local_disk(std::filesystem::path{config.getString("path", DBMS_DEFAULT_PATH)} / "coordination");
}
void KeeperContext::initializeFeatureFlags(const Poco::Util::AbstractConfiguration & config)
{
static const std::string feature_flags_key = "keeper_server.feature_flags";
if (config.has(feature_flags_key))
{

View File

@ -1,16 +1,21 @@
#pragma once
#include <Poco/Util/AbstractConfiguration.h>
#include <Coordination/KeeperFeatureFlags.h>
#include <IO/WriteBufferFromString.h>
#include <Disks/DiskSelector.h>
#include <cstdint>
#include <memory>
namespace DB
{
struct KeeperContext
class KeeperContext
{
KeeperContext();
void initialize(const Poco::Util::AbstractConfiguration & config);
public:
explicit KeeperContext(bool standalone_keeper_);
enum class Phase : uint8_t
{
@ -19,11 +24,64 @@ struct KeeperContext
SHUTDOWN
};
void initialize(const Poco::Util::AbstractConfiguration & config);
Phase getServerState() const;
void setServerState(Phase server_state_);
bool ignoreSystemPathOnStartup() const;
bool digestEnabled() const;
void setDigestEnabled(bool digest_enabled_);
DiskPtr getLatestLogDisk() const;
DiskPtr getLogDisk() const;
std::vector<DiskPtr> getOldLogDisks() const;
void setLogDisk(DiskPtr disk);
DiskPtr getLatestSnapshotDisk() const;
DiskPtr getSnapshotDisk() const;
std::vector<DiskPtr> getOldSnapshotDisks() const;
void setSnapshotDisk(DiskPtr disk);
DiskPtr getStateFileDisk() const;
void setStateFileDisk(DiskPtr disk);
const std::unordered_map<std::string, std::string> & getSystemNodesWithData() const;
const KeeperFeatureFlags & getFeatureFlags() const;
void dumpConfiguration(WriteBufferFromOwnString & buf) const;
private:
/// local disk defined using path or disk name
using Storage = std::variant<DiskPtr, std::string>;
void initializeFeatureFlags(const Poco::Util::AbstractConfiguration & config);
void initializeDisks(const Poco::Util::AbstractConfiguration & config);
Storage getLogsPathFromConfig(const Poco::Util::AbstractConfiguration & config) const;
Storage getSnapshotsPathFromConfig(const Poco::Util::AbstractConfiguration & config) const;
Storage getStatePathFromConfig(const Poco::Util::AbstractConfiguration & config) const;
DiskPtr getDisk(const Storage & storage) const;
Phase server_state{Phase::INIT};
bool ignore_system_path_on_startup{false};
bool digest_enabled{true};
std::shared_ptr<DiskSelector> disk_selector;
Storage log_storage;
Storage latest_log_storage;
Storage snapshot_storage;
Storage latest_snapshot_storage;
Storage state_file_storage;
std::vector<std::string> old_log_disk_names;
std::vector<std::string> old_snapshot_disk_names;
bool standalone_keeper;
std::unordered_map<std::string, std::string> system_nodes_with_data;
KeeperFeatureFlags feature_flags;

View File

@ -38,8 +38,6 @@ namespace ProfileEvents
extern const Event MemoryAllocatorPurgeTimeMicroseconds;
}
namespace fs = std::filesystem;
namespace DB
{
@ -238,13 +236,13 @@ void KeeperDispatcher::snapshotThread()
try
{
auto snapshot_path = task.create_snapshot(std::move(task.snapshot));
auto snapshot_file_info = task.create_snapshot(std::move(task.snapshot));
if (snapshot_path.empty())
if (snapshot_file_info.path.empty())
continue;
if (isLeader())
snapshot_s3.uploadSnapshot(snapshot_path);
snapshot_s3.uploadSnapshot(snapshot_file_info);
}
catch (...)
{
@ -336,7 +334,7 @@ void KeeperDispatcher::initialize(const Poco::Util::AbstractConfiguration & conf
snapshot_s3.startup(config, macros);
keeper_context = std::make_shared<KeeperContext>();
keeper_context = std::make_shared<KeeperContext>(standalone_keeper);
keeper_context->initialize(config);
server = std::make_unique<KeeperServer>(
@ -777,35 +775,37 @@ void KeeperDispatcher::updateKeeperStatLatency(uint64_t process_time_ms)
keeper_stats.updateLatency(process_time_ms);
}
static uint64_t getDirSize(const fs::path & dir)
static uint64_t getTotalSize(const DiskPtr & disk, const std::string & path = "")
{
checkStackSize();
if (!fs::exists(dir))
return 0;
fs::directory_iterator it(dir);
fs::directory_iterator end;
uint64_t size{0};
while (it != end)
uint64_t size = 0;
for (auto it = disk->iterateDirectory(path); it->isValid(); it->next())
{
if (it->is_regular_file())
size += fs::file_size(*it);
if (disk->isFile(it->path()))
size += disk->getFileSize(it->path());
else
size += getDirSize(it->path());
++it;
size += getTotalSize(disk, it->path());
}
return size;
}
uint64_t KeeperDispatcher::getLogDirSize() const
{
return getDirSize(configuration_and_settings->log_storage_path);
auto log_disk = keeper_context->getLogDisk();
auto size = getTotalSize(log_disk);
auto latest_log_disk = keeper_context->getLatestLogDisk();
if (log_disk != latest_log_disk)
size += getTotalSize(latest_log_disk);
return size;
}
uint64_t KeeperDispatcher::getSnapDirSize() const
{
return getDirSize(configuration_and_settings->snapshot_storage_path);
return getTotalSize(keeper_context->getSnapshotDisk());
}
Keeper4LWInfo KeeperDispatcher::getKeeper4LWInfo() const

View File

@ -205,7 +205,6 @@ public:
return keeper_context;
}
void incrementPacketsSent()
{
keeper_stats.incrementPacketsSent();

View File

@ -1,14 +1,14 @@
#include <Coordination/KeeperLogStore.h>
#include <IO/CompressionMethod.h>
#include <Disks/DiskLocal.h>
#include <Common/logger_useful.h>
namespace DB
{
KeeperLogStore::KeeperLogStore(
const std::string & changelogs_path, LogFileSettings log_file_settings)
KeeperLogStore::KeeperLogStore(LogFileSettings log_file_settings, KeeperContextPtr keeper_context)
: log(&Poco::Logger::get("KeeperLogStore"))
, changelog(changelogs_path, log, log_file_settings)
, changelog(log, log_file_settings, keeper_context)
{
if (log_file_settings.force_sync)
LOG_INFO(log, "force_sync enabled");

View File

@ -4,6 +4,7 @@
#include <mutex>
#include <Core/Types.h>
#include <Coordination/Changelog.h>
#include <Coordination/KeeperContext.h>
#include <base/defines.h>
namespace DB
@ -13,7 +14,7 @@ namespace DB
class KeeperLogStore : public nuraft::log_store
{
public:
KeeperLogStore(const std::string & changelogs_path, LogFileSettings log_file_settings);
KeeperLogStore(LogFileSettings log_file_settings, KeeperContextPtr keeper_context);
/// Read log storage from filesystem starting from last_commited_log_index
void init(uint64_t last_commited_log_index, uint64_t logs_to_keep);

View File

@ -26,6 +26,7 @@
#include <Common/ZooKeeper/ZooKeeperIO.h>
#include <Common/Stopwatch.h>
#include <Common/getMultipleKeysFromConfig.h>
#include <Disks/DiskLocal.h>
namespace DB
{
@ -124,7 +125,6 @@ KeeperServer::KeeperServer(
state_machine = nuraft::cs_new<KeeperStateMachine>(
responses_queue_,
snapshots_queue_,
configuration_and_settings_->snapshot_storage_path,
coordination_settings,
keeper_context,
config.getBool("keeper_server.upload_snapshot_on_exit", true) ? &snapshot_manager_s3 : nullptr,
@ -134,10 +134,10 @@ KeeperServer::KeeperServer(
state_manager = nuraft::cs_new<KeeperStateManager>(
server_id,
"keeper_server",
configuration_and_settings_->log_storage_path,
configuration_and_settings_->state_file_path,
"state",
config,
coordination_settings);
coordination_settings,
keeper_context);
}
/**
@ -413,7 +413,7 @@ void KeeperServer::startup(const Poco::Util::AbstractConfiguration & config, boo
launchRaftServer(config, enable_ipv6);
keeper_context->server_state = KeeperContext::Phase::RUNNING;
keeper_context->setServerState(KeeperContext::Phase::RUNNING);
}
void KeeperServer::shutdownRaftServer()
@ -428,7 +428,7 @@ void KeeperServer::shutdownRaftServer()
raft_instance->shutdown();
keeper_context->server_state = KeeperContext::Phase::SHUTDOWN;
keeper_context->setServerState(KeeperContext::Phase::SHUTDOWN);
if (create_snapshot_on_exit)
raft_instance->create_snapshot();

View File

@ -9,13 +9,15 @@
#include <IO/WriteHelpers.h>
#include <IO/copyData.h>
#include <Common/ZooKeeper/ZooKeeperIO.h>
#include <Coordination/pathUtils.h>
#include <filesystem>
#include <memory>
#include <Common/logger_useful.h>
#include <Coordination/KeeperContext.h>
#include <Coordination/pathUtils.h>
#include <Coordination/KeeperConstants.h>
#include <Common/ZooKeeper/ZooKeeperCommon.h>
#include "Core/Field.h"
#include <Disks/DiskLocal.h>
namespace DB
@ -30,6 +32,25 @@ namespace ErrorCodes
namespace
{
constexpr std::string_view tmp_prefix = "tmp_";
void moveFileBetweenDisks(DiskPtr disk_from, const std::string & path_from, DiskPtr disk_to, const std::string & path_to)
{
/// we use empty file with prefix tmp_ to detect incomplete copies
/// if a copy is complete we don't care from which disk we use the same file
/// so it's okay if a failure happens after removing of tmp file but before we remove
/// the snapshot from the source disk
auto from_path = fs::path(path_from);
auto tmp_snapshot_name = from_path.parent_path() / (std::string{tmp_prefix} + from_path.filename().string());
{
auto buf = disk_to->writeFile(tmp_snapshot_name);
buf->finalize();
}
disk_from->copyFile(from_path, *disk_to, path_to, {});
disk_to->removeFile(tmp_snapshot_name);
disk_from->removeFile(path_from);
}
uint64_t getSnapshotPathUpToLogIdx(const String & snapshot_path)
{
std::filesystem::path path(snapshot_path);
@ -41,7 +62,7 @@ namespace
std::string getSnapshotFileName(uint64_t up_to_log_idx, bool compress_zstd)
{
auto base = std::string{"snapshot_"} + std::to_string(up_to_log_idx) + ".bin";
auto base = fmt::format("snapshot_{}.bin", up_to_log_idx);
if (compress_zstd)
base += ".zstd";
return base;
@ -156,7 +177,7 @@ void KeeperStorageSnapshot::serialize(const KeeperStorageSnapshot & snapshot, Wr
if (snapshot.version >= SnapshotVersion::V5)
{
writeBinary(snapshot.zxid, out);
if (keeper_context->digest_enabled)
if (keeper_context->digestEnabled())
{
writeBinary(static_cast<uint8_t>(KeeperStorage::CURRENT_DIGEST_VERSION), out);
writeBinary(snapshot.nodes_digest, out);
@ -185,7 +206,7 @@ void KeeperStorageSnapshot::serialize(const KeeperStorageSnapshot & snapshot, Wr
}
/// Serialize data tree
writeBinary(snapshot.snapshot_container_size - keeper_context->system_nodes_with_data.size(), out);
writeBinary(snapshot.snapshot_container_size - keeper_context->getSystemNodesWithData().size(), out);
size_t counter = 0;
for (auto it = snapshot.begin; counter < snapshot.snapshot_container_size; ++counter)
{
@ -267,7 +288,7 @@ void KeeperStorageSnapshot::deserialize(SnapshotDeserializationResult & deserial
deserialization_result.snapshot_meta = deserializeSnapshotMetadata(in);
KeeperStorage & storage = *deserialization_result.storage;
bool recalculate_digest = keeper_context->digest_enabled;
bool recalculate_digest = keeper_context->digestEnabled();
if (version >= SnapshotVersion::V5)
{
readBinary(storage.zxid, in);
@ -349,7 +370,7 @@ void KeeperStorageSnapshot::deserialize(SnapshotDeserializationResult & deserial
const std::string error_msg = fmt::format("Cannot read node on path {} from a snapshot because it is used as a system node", path);
if (match_result == IS_CHILD)
{
if (keeper_context->ignore_system_path_on_startup || keeper_context->server_state != KeeperContext::Phase::INIT)
if (keeper_context->ignoreSystemPathOnStartup() || keeper_context->getServerState() != KeeperContext::Phase::INIT)
{
LOG_ERROR(&Poco::Logger::get("KeeperSnapshotManager"), "{}. Ignoring it", error_msg);
continue;
@ -365,7 +386,7 @@ void KeeperStorageSnapshot::deserialize(SnapshotDeserializationResult & deserial
{
if (!is_node_empty(node))
{
if (keeper_context->ignore_system_path_on_startup || keeper_context->server_state != KeeperContext::Phase::INIT)
if (keeper_context->ignoreSystemPathOnStartup() || keeper_context->getServerState() != KeeperContext::Phase::INIT)
{
LOG_ERROR(&Poco::Logger::get("KeeperSnapshotManager"), "{}. Ignoring it", error_msg);
node = KeeperStorage::Node{};
@ -394,9 +415,9 @@ void KeeperStorageSnapshot::deserialize(SnapshotDeserializationResult & deserial
{
if (itr.key != "/")
{
auto parent_path = parentPath(itr.key);
auto parent_path = parentNodePath(itr.key);
storage.container.updateValue(
parent_path, [version, path = itr.key](KeeperStorage::Node & value) { value.addChild(getBaseName(path), /*update_size*/ version < SnapshotVersion::V4); });
parent_path, [version, path = itr.key](KeeperStorage::Node & value) { value.addChild(getBaseNodeName(path), /*update_size*/ version < SnapshotVersion::V4); });
}
}
@ -507,70 +528,110 @@ KeeperStorageSnapshot::~KeeperStorageSnapshot()
}
KeeperSnapshotManager::KeeperSnapshotManager(
const std::string & snapshots_path_,
size_t snapshots_to_keep_,
const KeeperContextPtr & keeper_context_,
bool compress_snapshots_zstd_,
const std::string & superdigest_,
size_t storage_tick_time_)
: snapshots_path(snapshots_path_)
, snapshots_to_keep(snapshots_to_keep_)
: snapshots_to_keep(snapshots_to_keep_)
, compress_snapshots_zstd(compress_snapshots_zstd_)
, superdigest(superdigest_)
, storage_tick_time(storage_tick_time_)
, keeper_context(keeper_context_)
{
namespace fs = std::filesystem;
if (!fs::exists(snapshots_path))
fs::create_directories(snapshots_path);
for (const auto & p : fs::directory_iterator(snapshots_path))
const auto load_snapshot_from_disk = [&](const auto & disk)
{
const auto & path = p.path();
LOG_TRACE(log, "Reading from disk {}", disk->getName());
std::unordered_map<std::string, std::string> incomplete_files;
if (!path.has_filename())
continue;
if (startsWith(path.filename(), "tmp_")) /// Unfinished tmp files
const auto clean_incomplete_file = [&](const auto & file_path)
{
std::filesystem::remove(p);
if (auto incomplete_it = incomplete_files.find(fs::path(file_path).filename()); incomplete_it != incomplete_files.end())
{
LOG_TRACE(log, "Removing {} from {}", file_path, disk->getName());
disk->removeFile(file_path);
disk->removeFile(incomplete_it->second);
incomplete_files.erase(incomplete_it);
return true;
}
return false;
};
std::vector<std::string> snapshot_files;
for (auto it = disk->iterateDirectory(""); it->isValid(); it->next())
{
if (it->name().starts_with(tmp_prefix))
{
incomplete_files.emplace(it->name().substr(tmp_prefix.size()), it->path());
continue;
}
/// Not snapshot file
if (!startsWith(path.filename(), "snapshot_"))
{
continue;
if (it->name().starts_with("snapshot_") && !clean_incomplete_file(it->path()))
snapshot_files.push_back(it->path());
}
size_t snapshot_up_to = getSnapshotPathUpToLogIdx(p.path());
existing_snapshots[snapshot_up_to] = p.path();
for (const auto & snapshot_file : snapshot_files)
{
if (clean_incomplete_file(fs::path(snapshot_file).filename()))
continue;
LOG_TRACE(log, "Found {} on {}", snapshot_file, disk->getName());
size_t snapshot_up_to = getSnapshotPathUpToLogIdx(snapshot_file);
auto [_, inserted] = existing_snapshots.insert_or_assign(snapshot_up_to, SnapshotFileInfo{snapshot_file, disk});
if (!inserted)
LOG_WARNING(
&Poco::Logger::get("KeeperSnapshotManager"),
"Found another snapshots with last log idx {}, will use snapshot from disk {}",
snapshot_up_to,
disk->getName());
}
for (const auto & [name, path] : incomplete_files)
disk->removeFile(path);
};
for (const auto & disk : keeper_context->getOldSnapshotDisks())
load_snapshot_from_disk(disk);
auto disk = getDisk();
load_snapshot_from_disk(disk);
auto latest_snapshot_disk = getLatestSnapshotDisk();
if (latest_snapshot_disk != disk)
load_snapshot_from_disk(latest_snapshot_disk);
removeOutdatedSnapshotsIfNeeded();
moveSnapshotsIfNeeded();
}
std::string KeeperSnapshotManager::serializeSnapshotBufferToDisk(nuraft::buffer & buffer, uint64_t up_to_log_idx)
SnapshotFileInfo KeeperSnapshotManager::serializeSnapshotBufferToDisk(nuraft::buffer & buffer, uint64_t up_to_log_idx)
{
ReadBufferFromNuraftBuffer reader(buffer);
auto snapshot_file_name = getSnapshotFileName(up_to_log_idx, compress_snapshots_zstd);
auto tmp_snapshot_file_name = "tmp_" + snapshot_file_name;
std::string tmp_snapshot_path = std::filesystem::path{snapshots_path} / tmp_snapshot_file_name;
std::string new_snapshot_path = std::filesystem::path{snapshots_path} / snapshot_file_name;
WriteBufferFromFile plain_buf(tmp_snapshot_path);
copyData(reader, plain_buf);
plain_buf.sync();
auto disk = getLatestSnapshotDisk();
std::filesystem::rename(tmp_snapshot_path, new_snapshot_path);
{
auto buf = disk->writeFile(tmp_snapshot_file_name);
buf->finalize();
}
existing_snapshots.emplace(up_to_log_idx, new_snapshot_path);
auto plain_buf = disk->writeFile(snapshot_file_name);
copyData(reader, *plain_buf);
plain_buf->sync();
plain_buf->finalize();
disk->removeFile(tmp_snapshot_file_name);
existing_snapshots.emplace(up_to_log_idx, SnapshotFileInfo{snapshot_file_name, disk});
removeOutdatedSnapshotsIfNeeded();
moveSnapshotsIfNeeded();
return new_snapshot_path;
return {snapshot_file_name, disk};
}
nuraft::ptr<nuraft::buffer> KeeperSnapshotManager::deserializeLatestSnapshotBufferFromDisk()
@ -584,7 +645,8 @@ nuraft::ptr<nuraft::buffer> KeeperSnapshotManager::deserializeLatestSnapshotBuff
}
catch (const DB::Exception &)
{
std::filesystem::remove(latest_itr->second);
const auto & [path, disk] = latest_itr->second;
disk->removeFile(path);
existing_snapshots.erase(latest_itr->first);
tryLogCurrentException(__PRETTY_FUNCTION__);
}
@ -595,10 +657,10 @@ nuraft::ptr<nuraft::buffer> KeeperSnapshotManager::deserializeLatestSnapshotBuff
nuraft::ptr<nuraft::buffer> KeeperSnapshotManager::deserializeSnapshotBufferFromDisk(uint64_t up_to_log_idx) const
{
const std::string & snapshot_path = existing_snapshots.at(up_to_log_idx);
const auto & [snapshot_path, snapshot_disk] = existing_snapshots.at(up_to_log_idx);
WriteBufferFromNuraftBuffer writer;
ReadBufferFromFile reader(snapshot_path);
copyData(reader, writer);
auto reader = snapshot_disk->readFile(snapshot_path);
copyData(*reader, writer);
return writer.getBuffer();
}
@ -659,30 +721,75 @@ SnapshotDeserializationResult KeeperSnapshotManager::restoreFromLatestSnapshot()
return deserializeSnapshotFromBuffer(buffer);
}
DiskPtr KeeperSnapshotManager::getDisk() const
{
return keeper_context->getSnapshotDisk();
}
DiskPtr KeeperSnapshotManager::getLatestSnapshotDisk() const
{
return keeper_context->getLatestSnapshotDisk();
}
void KeeperSnapshotManager::removeOutdatedSnapshotsIfNeeded()
{
while (existing_snapshots.size() > snapshots_to_keep)
removeSnapshot(existing_snapshots.begin()->first);
}
void KeeperSnapshotManager::moveSnapshotsIfNeeded()
{
/// move snapshots to correct disks
auto disk = getDisk();
auto latest_snapshot_disk = getLatestSnapshotDisk();
auto latest_snapshot_idx = getLatestSnapshotIndex();
for (auto & [idx, file_info] : existing_snapshots)
{
if (idx == latest_snapshot_idx)
{
if (file_info.disk != latest_snapshot_disk)
{
moveFileBetweenDisks(file_info.disk, file_info.path, latest_snapshot_disk, file_info.path);
file_info.disk = latest_snapshot_disk;
}
}
else
{
if (file_info.disk != disk)
{
moveFileBetweenDisks(file_info.disk, file_info.path, disk, file_info.path);
file_info.disk = disk;
}
}
}
}
void KeeperSnapshotManager::removeSnapshot(uint64_t log_idx)
{
auto itr = existing_snapshots.find(log_idx);
if (itr == existing_snapshots.end())
throw Exception(ErrorCodes::UNKNOWN_SNAPSHOT, "Unknown snapshot with log index {}", log_idx);
std::filesystem::remove(itr->second);
const auto & [path, disk] = itr->second;
disk->removeFile(path);
existing_snapshots.erase(itr);
}
std::pair<std::string, std::error_code> KeeperSnapshotManager::serializeSnapshotToDisk(const KeeperStorageSnapshot & snapshot)
SnapshotFileInfo KeeperSnapshotManager::serializeSnapshotToDisk(const KeeperStorageSnapshot & snapshot)
{
auto up_to_log_idx = snapshot.snapshot_meta->get_last_log_idx();
auto snapshot_file_name = getSnapshotFileName(up_to_log_idx, compress_snapshots_zstd);
auto tmp_snapshot_file_name = "tmp_" + snapshot_file_name;
std::string tmp_snapshot_path = std::filesystem::path{snapshots_path} / tmp_snapshot_file_name;
std::string new_snapshot_path = std::filesystem::path{snapshots_path} / snapshot_file_name;
auto writer = std::make_unique<WriteBufferFromFile>(tmp_snapshot_path, O_WRONLY | O_TRUNC | O_CREAT | O_CLOEXEC | O_APPEND);
auto disk = getLatestSnapshotDisk();
{
auto buf = disk->writeFile(tmp_snapshot_file_name);
buf->finalize();
}
auto writer = disk->writeFile(snapshot_file_name);
std::unique_ptr<WriteBuffer> compressed_writer;
if (compress_snapshots_zstd)
compressed_writer = wrapWriteBufferWithCompressionMethod(std::move(writer), CompressionMethod::Zstd, 3);
@ -693,14 +800,13 @@ std::pair<std::string, std::error_code> KeeperSnapshotManager::serializeSnapshot
compressed_writer->finalize();
compressed_writer->sync();
std::error_code ec;
std::filesystem::rename(tmp_snapshot_path, new_snapshot_path, ec);
if (!ec)
{
existing_snapshots.emplace(up_to_log_idx, new_snapshot_path);
disk->removeFile(tmp_snapshot_file_name);
existing_snapshots.emplace(up_to_log_idx, SnapshotFileInfo{snapshot_file_name, disk});
removeOutdatedSnapshotsIfNeeded();
}
return {new_snapshot_path, ec};
moveSnapshotsIfNeeded();
return {snapshot_file_name, disk};
}
}

View File

@ -6,6 +6,7 @@
#include <IO/WriteBuffer.h>
#include <libnuraft/nuraft.hxx>
#include <Coordination/KeeperContext.h>
#include <Disks/IDisk.h>
namespace DB
{
@ -86,8 +87,14 @@ public:
uint64_t nodes_digest;
};
struct SnapshotFileInfo
{
std::string path;
DiskPtr disk;
};
using KeeperStorageSnapshotPtr = std::shared_ptr<KeeperStorageSnapshot>;
using CreateSnapshotCallback = std::function<std::string(KeeperStorageSnapshotPtr &&)>;
using CreateSnapshotCallback = std::function<SnapshotFileInfo(KeeperStorageSnapshotPtr &&)>;
using SnapshotMetaAndStorage = std::pair<SnapshotMetadataPtr, KeeperStoragePtr>;
@ -98,7 +105,6 @@ class KeeperSnapshotManager
{
public:
KeeperSnapshotManager(
const std::string & snapshots_path_,
size_t snapshots_to_keep_,
const KeeperContextPtr & keeper_context_,
bool compress_snapshots_zstd_ = true,
@ -112,10 +118,10 @@ public:
nuraft::ptr<nuraft::buffer> serializeSnapshotToBuffer(const KeeperStorageSnapshot & snapshot) const;
/// Serialize already compressed snapshot to disk (return path)
std::string serializeSnapshotBufferToDisk(nuraft::buffer & buffer, uint64_t up_to_log_idx);
SnapshotFileInfo serializeSnapshotBufferToDisk(nuraft::buffer & buffer, uint64_t up_to_log_idx);
/// Serialize snapshot directly to disk
std::pair<std::string, std::error_code> serializeSnapshotToDisk(const KeeperStorageSnapshot & snapshot);
SnapshotFileInfo serializeSnapshotToDisk(const KeeperStorageSnapshot & snapshot);
SnapshotDeserializationResult deserializeSnapshotFromBuffer(nuraft::ptr<nuraft::buffer> buffer) const;
@ -139,30 +145,39 @@ public:
return 0;
}
std::string getLatestSnapshotPath() const
SnapshotFileInfo getLatestSnapshotInfo() const
{
if (!existing_snapshots.empty())
{
const auto & path = existing_snapshots.at(getLatestSnapshotIndex());
std::error_code ec;
if (std::filesystem::exists(path, ec))
return path;
const auto & [path, disk] = existing_snapshots.at(getLatestSnapshotIndex());
try
{
if (disk->exists(path))
return {path, disk};
}
return "";
catch (...)
{
}
}
return {"", nullptr};
}
private:
void removeOutdatedSnapshotsIfNeeded();
void moveSnapshotsIfNeeded();
DiskPtr getDisk() const;
DiskPtr getLatestSnapshotDisk() const;
/// Checks first 4 buffer bytes to became sure that snapshot compressed with
/// ZSTD codec.
static bool isZstdCompressed(nuraft::ptr<nuraft::buffer> buffer);
const std::string snapshots_path;
/// How many snapshots to keep before remove
const size_t snapshots_to_keep;
/// All existing snapshots in our path (log_index -> path)
std::map<uint64_t, std::string> existing_snapshots;
std::map<uint64_t, SnapshotFileInfo> existing_snapshots;
/// Compress snapshots in common ZSTD format instead of custom ClickHouse block LZ4 format
const bool compress_snapshots_zstd;
/// Superdigest for deserialization of storage
@ -171,6 +186,8 @@ private:
size_t storage_tick_time;
KeeperContextPtr keeper_context;
Poco::Logger * log = &Poco::Logger::get("KeeperSnapshotManager");
};
/// Keeper create snapshots in background thread. KeeperStateMachine just create

View File

@ -132,8 +132,9 @@ std::shared_ptr<KeeperSnapshotManagerS3::S3Configuration> KeeperSnapshotManagerS
return snapshot_s3_client;
}
void KeeperSnapshotManagerS3::uploadSnapshotImpl(const std::string & snapshot_path)
void KeeperSnapshotManagerS3::uploadSnapshotImpl(const SnapshotFileInfo & snapshot_file_info)
{
const auto & [snapshot_path, snapshot_disk] = snapshot_file_info;
try
{
auto s3_client = getSnapshotS3Client();
@ -154,8 +155,9 @@ void KeeperSnapshotManagerS3::uploadSnapshotImpl(const std::string & snapshot_pa
};
};
LOG_INFO(log, "Will try to upload snapshot on {} to S3", snapshot_path);
ReadBufferFromFile snapshot_file(snapshot_path);
LOG_INFO(log, "Will try to upload snapshot on {} to S3", snapshot_file_info.path);
auto snapshot_file = snapshot_disk->readFile(snapshot_file_info.path);
auto snapshot_name = fs::path(snapshot_path).filename().string();
auto lock_file = fmt::format(".{}_LOCK", snapshot_name);
@ -222,7 +224,7 @@ void KeeperSnapshotManagerS3::uploadSnapshotImpl(const std::string & snapshot_pa
});
WriteBufferFromS3 snapshot_writer = create_writer(snapshot_name);
copyData(snapshot_file, snapshot_writer);
copyData(*snapshot_file, snapshot_writer);
snapshot_writer.finalize();
LOG_INFO(log, "Successfully uploaded {} to S3", snapshot_path);
@ -240,31 +242,31 @@ void KeeperSnapshotManagerS3::snapshotS3Thread()
while (!shutdown_called)
{
std::string snapshot_path;
if (!snapshots_s3_queue.pop(snapshot_path))
SnapshotFileInfo snapshot_file_info;
if (!snapshots_s3_queue.pop(snapshot_file_info))
break;
if (shutdown_called)
break;
uploadSnapshotImpl(snapshot_path);
uploadSnapshotImpl(snapshot_file_info);
}
}
void KeeperSnapshotManagerS3::uploadSnapshot(const std::string & path, bool async_upload)
void KeeperSnapshotManagerS3::uploadSnapshot(const SnapshotFileInfo & file_info, bool async_upload)
{
if (getSnapshotS3Client() == nullptr)
return;
if (async_upload)
{
if (!snapshots_s3_queue.push(path))
LOG_WARNING(log, "Failed to add snapshot {} to S3 queue", path);
if (!snapshots_s3_queue.push(file_info))
LOG_WARNING(log, "Failed to add snapshot {} to S3 queue", file_info.path);
return;
}
uploadSnapshotImpl(path);
uploadSnapshotImpl(file_info);
}
void KeeperSnapshotManagerS3::startup(const Poco::Util::AbstractConfiguration & config, const MultiVersion<Macros>::Version & macros)

View File

@ -6,10 +6,13 @@
#include <Common/MultiVersion.h>
#include <Common/Macros.h>
#include <Coordination/KeeperSnapshotManager.h>
#if USE_AWS_S3
#include <Common/ConcurrentBoundedQueue.h>
#include <Common/ThreadPool.h>
#include <string>
#endif
@ -24,13 +27,13 @@ public:
/// 'macros' are used to substitute macros in endpoint of disks
void updateS3Configuration(const Poco::Util::AbstractConfiguration & config, const MultiVersion<Macros>::Version & macros);
void uploadSnapshot(const std::string & path, bool async_upload = true);
void uploadSnapshot(const SnapshotFileInfo & file_info, bool async_upload = true);
/// 'macros' are used to substitute macros in endpoint of disks
void startup(const Poco::Util::AbstractConfiguration & config, const MultiVersion<Macros>::Version & macros);
void shutdown();
private:
using SnapshotS3Queue = ConcurrentBoundedQueue<std::string>;
using SnapshotS3Queue = ConcurrentBoundedQueue<SnapshotFileInfo>;
SnapshotS3Queue snapshots_s3_queue;
/// Upload new snapshots to S3
@ -48,7 +51,7 @@ private:
std::shared_ptr<S3Configuration> getSnapshotS3Client() const;
void uploadSnapshotImpl(const std::string & snapshot_path);
void uploadSnapshotImpl(const SnapshotFileInfo & snapshot_file_info);
/// Thread upload snapshots to S3 in the background
void snapshotS3Thread();
@ -60,7 +63,7 @@ public:
KeeperSnapshotManagerS3() = default;
void updateS3Configuration(const Poco::Util::AbstractConfiguration &, const MultiVersion<Macros>::Version &) {}
void uploadSnapshot(const std::string &, [[maybe_unused]] bool async_upload = true) {}
void uploadSnapshot(const SnapshotFileInfo &, [[maybe_unused]] bool async_upload = true) {}
void startup(const Poco::Util::AbstractConfiguration &, const MultiVersion<Macros>::Version &) {}

View File

@ -14,6 +14,8 @@
#include <Common/logger_useful.h>
#include "Coordination/KeeperStorage.h"
#include <Disks/DiskLocal.h>
namespace ProfileEvents
{
@ -33,17 +35,11 @@ namespace DB
namespace ErrorCodes
{
extern const int LOGICAL_ERROR;
extern const int SYSTEM_ERROR;
}
namespace
{
}
KeeperStateMachine::KeeperStateMachine(
ResponsesQueue & responses_queue_,
SnapshotsQueue & snapshots_queue_,
const std::string & snapshots_path_,
const CoordinationSettingsPtr & coordination_settings_,
const KeeperContextPtr & keeper_context_,
KeeperSnapshotManagerS3 * snapshot_manager_s3_,
@ -52,7 +48,6 @@ KeeperStateMachine::KeeperStateMachine(
: commit_callback(commit_callback_)
, coordination_settings(coordination_settings_)
, snapshot_manager(
snapshots_path_,
coordination_settings->snapshots_to_keep,
keeper_context_,
coordination_settings->compress_snapshots_with_zstd_format,
@ -69,6 +64,16 @@ KeeperStateMachine::KeeperStateMachine(
{
}
namespace
{
bool isLocalDisk(const IDisk & disk)
{
return dynamic_cast<const DiskLocal *>(&disk) != nullptr;
}
}
void KeeperStateMachine::init()
{
/// Do everything without mutexes, no other threads exist.
@ -83,9 +88,13 @@ void KeeperStateMachine::init()
try
{
auto snapshot_deserialization_result
= snapshot_manager.deserializeSnapshotFromBuffer(snapshot_manager.deserializeSnapshotBufferFromDisk(latest_log_index));
latest_snapshot_path = snapshot_manager.getLatestSnapshotPath();
latest_snapshot_buf = snapshot_manager.deserializeSnapshotBufferFromDisk(latest_log_index);
auto snapshot_deserialization_result = snapshot_manager.deserializeSnapshotFromBuffer(latest_snapshot_buf);
latest_snapshot_info = snapshot_manager.getLatestSnapshotInfo();
if (isLocalDisk(*latest_snapshot_info.disk))
latest_snapshot_buf = nullptr;
storage = std::move(snapshot_deserialization_result.storage);
latest_snapshot_meta = snapshot_deserialization_result.snapshot_meta;
cluster_config = snapshot_deserialization_result.cluster_config;
@ -276,7 +285,7 @@ bool KeeperStateMachine::preprocess(const KeeperStorage::RequestForSession & req
std::abort();
}
if (keeper_context->digest_enabled && request_for_session.digest)
if (keeper_context->digestEnabled() && request_for_session.digest)
assertDigest(*request_for_session.digest, storage->getNodesDigest(false), *request_for_session.request, false);
return true;
@ -333,7 +342,7 @@ nuraft::ptr<nuraft::buffer> KeeperStateMachine::commit(const uint64_t log_idx, n
response_for_session.session_id);
}
if (keeper_context->digest_enabled && request_for_session->digest)
if (keeper_context->digestEnabled() && request_for_session->digest)
assertDigest(*request_for_session->digest, storage->getNodesDigest(true), *request_for_session->request, true);
}
@ -371,7 +380,12 @@ bool KeeperStateMachine::apply_snapshot(nuraft::snapshot & s)
{ /// deserialize and apply snapshot to storage
std::lock_guard lock(storage_and_responses_lock);
auto snapshot_deserialization_result
SnapshotDeserializationResult snapshot_deserialization_result;
if (latest_snapshot_ptr)
snapshot_deserialization_result = snapshot_manager.deserializeSnapshotFromBuffer(latest_snapshot_ptr);
else
snapshot_deserialization_result
= snapshot_manager.deserializeSnapshotFromBuffer(snapshot_manager.deserializeSnapshotBufferFromDisk(s.get_last_log_idx()));
/// maybe some logs were preprocessed with log idx larger than the snapshot idx
@ -464,19 +478,24 @@ void KeeperStateMachine::create_snapshot(nuraft::snapshot & s, nuraft::async_res
}
else
{
auto [path, error_code] = snapshot_manager.serializeSnapshotToDisk(*snapshot);
if (error_code)
{
throw Exception(
ErrorCodes::SYSTEM_ERROR,
"Snapshot {} was created failed, error: {}",
snapshot->snapshot_meta->get_last_log_idx(),
error_code.message());
}
latest_snapshot_path = path;
latest_snapshot_meta = snapshot->snapshot_meta;
/// we rely on the fact that the snapshot disk cannot be changed during runtime
if (isLocalDisk(*keeper_context->getLatestSnapshotDisk()))
{
auto snapshot_info = snapshot_manager.serializeSnapshotToDisk(*snapshot);
latest_snapshot_info = std::move(snapshot_info);
latest_snapshot_buf = nullptr;
}
else
{
auto snapshot_buf = snapshot_manager.serializeSnapshotToBuffer(*snapshot);
auto snapshot_info = snapshot_manager.serializeSnapshotBufferToDisk(*snapshot_buf, snapshot->snapshot_meta->get_last_log_idx());
latest_snapshot_info = std::move(snapshot_info);
latest_snapshot_buf = std::move(snapshot_buf);
}
ProfileEvents::increment(ProfileEvents::KeeperSnapshotCreations);
LOG_DEBUG(log, "Created persistent snapshot {} with path {}", latest_snapshot_meta->get_last_log_idx(), path);
LOG_DEBUG(log, "Created persistent snapshot {} with path {}", latest_snapshot_meta->get_last_log_idx(), latest_snapshot_info.path);
}
}
@ -500,19 +519,19 @@ void KeeperStateMachine::create_snapshot(nuraft::snapshot & s, nuraft::async_res
when_done(ret, exception);
return ret ? latest_snapshot_path : "";
return ret ? latest_snapshot_info : SnapshotFileInfo{};
};
if (keeper_context->server_state == KeeperContext::Phase::SHUTDOWN)
if (keeper_context->getServerState() == KeeperContext::Phase::SHUTDOWN)
{
LOG_INFO(log, "Creating a snapshot during shutdown because 'create_snapshot_on_exit' is enabled.");
auto snapshot_path = snapshot_task.create_snapshot(std::move(snapshot_task.snapshot));
auto snapshot_file_info = snapshot_task.create_snapshot(std::move(snapshot_task.snapshot));
if (!snapshot_path.empty() && snapshot_manager_s3)
if (!snapshot_file_info.path.empty() && snapshot_manager_s3)
{
LOG_INFO(log, "Uploading snapshot {} during shutdown because 'upload_snapshot_on_exit' is enabled.", snapshot_path);
snapshot_manager_s3->uploadSnapshot(snapshot_path, /* asnyc_upload */ false);
LOG_INFO(log, "Uploading snapshot {} during shutdown because 'upload_snapshot_on_exit' is enabled.", snapshot_file_info.path);
snapshot_manager_s3->uploadSnapshot(snapshot_file_info, /* asnyc_upload */ false);
}
return;
@ -533,14 +552,20 @@ void KeeperStateMachine::save_logical_snp_obj(
nuraft::ptr<nuraft::buffer> snp_buf = s.serialize();
nuraft::ptr<nuraft::snapshot> cloned_meta = nuraft::snapshot::deserialize(*snp_buf);
nuraft::ptr<nuraft::buffer> cloned_buffer;
/// we rely on the fact that the snapshot disk cannot be changed during runtime
if (!isLocalDisk(*keeper_context->getSnapshotDisk()))
cloned_buffer = nuraft::buffer::clone(data);
try
{
std::lock_guard lock(snapshots_lock);
/// Serialize snapshot to disk
auto result_path = snapshot_manager.serializeSnapshotBufferToDisk(data, s.get_last_log_idx());
latest_snapshot_path = result_path;
latest_snapshot_info = snapshot_manager.serializeSnapshotBufferToDisk(data, s.get_last_log_idx());
latest_snapshot_meta = cloned_meta;
LOG_DEBUG(log, "Saved snapshot {} to path {}", s.get_last_log_idx(), result_path);
latest_snapshot_buf = std::move(cloned_buffer);
LOG_DEBUG(log, "Saved snapshot {} to path {}", s.get_last_log_idx(), latest_snapshot_info.path);
obj_id++;
ProfileEvents::increment(ProfileEvents::KeeperSaveSnapshot);
}
@ -600,11 +625,23 @@ int KeeperStateMachine::read_logical_snp_obj(
latest_snapshot_meta->get_last_log_idx());
return -1;
}
if (bufferFromFile(log, latest_snapshot_path, data_out))
const auto & [path, disk] = latest_snapshot_info;
if (isLocalDisk(*disk))
{
LOG_WARNING(log, "Error reading snapshot {} from {}", s.get_last_log_idx(), latest_snapshot_path);
auto full_path = fs::path(disk->getPath()) / path;
if (bufferFromFile(log, full_path, data_out))
{
LOG_WARNING(log, "Error reading snapshot {} from {}", s.get_last_log_idx(), full_path);
return -1;
}
}
else
{
chassert(latest_snapshot_buf);
data_out = nuraft::buffer::clone(*latest_snapshot_buf);
}
is_last_obj = true;
ProfileEvents::increment(ProfileEvents::KeeperReadSnapshot);

View File

@ -26,7 +26,6 @@ public:
KeeperStateMachine(
ResponsesQueue & responses_queue_,
SnapshotsQueue & snapshots_queue_,
const std::string & snapshots_path_,
const CoordinationSettingsPtr & coordination_settings_,
const KeeperContextPtr & keeper_context_,
KeeperSnapshotManagerS3 * snapshot_manager_s3_,
@ -128,7 +127,7 @@ private:
/// In our state machine we always have a single snapshot which is stored
/// in memory in compressed (serialized) format.
SnapshotMetadataPtr latest_snapshot_meta = nullptr;
std::string latest_snapshot_path;
SnapshotFileInfo latest_snapshot_info;
nuraft::ptr<nuraft::buffer> latest_snapshot_buf = nullptr;
CoordinationSettingsPtr coordination_settings;

View File

@ -8,6 +8,7 @@
#include <IO/ReadHelpers.h>
#include <IO/ReadBufferFromFile.h>
#include <Common/getMultipleKeysFromConfig.h>
#include <Disks/DiskLocal.h>
#include <Common/logger_useful.h>
namespace DB
@ -22,6 +23,8 @@ namespace ErrorCodes
namespace
{
const std::string copy_lock_file = "STATE_COPY_LOCK";
bool isLocalhost(const std::string & hostname)
{
try
@ -212,12 +215,14 @@ KeeperStateManager::parseServersConfiguration(const Poco::Util::AbstractConfigur
return result;
}
KeeperStateManager::KeeperStateManager(
int server_id_, const std::string & host, int port, const std::string & logs_path, const std::string & state_file_path)
KeeperStateManager::KeeperStateManager(int server_id_, const std::string & host, int port, KeeperContextPtr keeper_context_)
: my_server_id(server_id_)
, secure(false)
, log_store(nuraft::cs_new<KeeperLogStore>(logs_path, LogFileSettings{.force_sync =false, .compress_logs = false, .rotate_interval = 5000}))
, server_state_path(state_file_path)
, log_store(nuraft::cs_new<KeeperLogStore>(
LogFileSettings{.force_sync = false, .compress_logs = false, .rotate_interval = 5000},
keeper_context_))
, server_state_file_name("state")
, keeper_context(keeper_context_)
, logger(&Poco::Logger::get("KeeperStateManager"))
{
auto peer_config = nuraft::cs_new<nuraft::srv_config>(my_server_id, host + ":" + std::to_string(port));
@ -230,16 +235,15 @@ KeeperStateManager::KeeperStateManager(
KeeperStateManager::KeeperStateManager(
int my_server_id_,
const std::string & config_prefix_,
const std::string & log_storage_path,
const std::string & state_file_path,
const std::string & server_state_file_name_,
const Poco::Util::AbstractConfiguration & config,
const CoordinationSettingsPtr & coordination_settings)
const CoordinationSettingsPtr & coordination_settings,
KeeperContextPtr keeper_context_)
: my_server_id(my_server_id_)
, secure(config.getBool(config_prefix_ + ".raft_configuration.secure", false))
, config_prefix(config_prefix_)
, configuration_wrapper(parseServersConfiguration(config, false))
, log_store(nuraft::cs_new<KeeperLogStore>(
log_storage_path,
LogFileSettings
{
.force_sync = coordination_settings->force_sync,
@ -247,8 +251,10 @@ KeeperStateManager::KeeperStateManager(
.rotate_interval = coordination_settings->rotate_log_storage_interval,
.max_size = coordination_settings->max_log_file_size,
.overallocate_size = coordination_settings->log_file_overallocate_size
}))
, server_state_path(state_file_path)
},
keeper_context_))
, server_state_file_name(server_state_file_name_)
, keeper_context(keeper_context_)
, logger(&Poco::Logger::get("KeeperStateManager"))
{
}
@ -287,16 +293,21 @@ void KeeperStateManager::save_config(const nuraft::cluster_config & config)
configuration_wrapper.cluster_config = nuraft::cluster_config::deserialize(*buf);
}
const std::filesystem::path & KeeperStateManager::getOldServerStatePath()
const String & KeeperStateManager::getOldServerStatePath()
{
static auto old_path = [this]
{
return server_state_path.parent_path() / (server_state_path.filename().generic_string() + "-OLD");
return server_state_file_name + "-OLD";
}();
return old_path;
}
DiskPtr KeeperStateManager::getStateFileDisk() const
{
return keeper_context->getStateFileDisk();
}
namespace
{
enum ServerStateVersion : uint8_t
@ -312,51 +323,61 @@ void KeeperStateManager::save_state(const nuraft::srv_state & state)
{
const auto & old_path = getOldServerStatePath();
if (std::filesystem::exists(server_state_path))
std::filesystem::rename(server_state_path, old_path);
auto disk = getStateFileDisk();
WriteBufferFromFile server_state_file(server_state_path, DBMS_DEFAULT_BUFFER_SIZE, O_TRUNC | O_CREAT | O_WRONLY);
if (disk->exists(server_state_file_name))
{
auto buf = disk->writeFile(copy_lock_file);
buf->finalize();
disk->copyFile(server_state_file_name, *disk, old_path);
disk->removeFile(copy_lock_file);
disk->removeFile(old_path);
}
auto server_state_file = disk->writeFile(server_state_file_name);
auto buf = state.serialize();
// calculate checksum
SipHash hash;
hash.update(current_server_state_version);
hash.update(reinterpret_cast<const char *>(buf->data_begin()), buf->size());
writeIntBinary(hash.get64(), server_state_file);
writeIntBinary(hash.get64(), *server_state_file);
writeIntBinary(static_cast<uint8_t>(current_server_state_version), server_state_file);
writeIntBinary(static_cast<uint8_t>(current_server_state_version), *server_state_file);
server_state_file.write(reinterpret_cast<const char *>(buf->data_begin()), buf->size());
server_state_file.sync();
server_state_file.close();
server_state_file->write(reinterpret_cast<const char *>(buf->data_begin()), buf->size());
server_state_file->sync();
server_state_file->finalize();
std::filesystem::remove(old_path);
disk->removeFileIfExists(old_path);
}
nuraft::ptr<nuraft::srv_state> KeeperStateManager::read_state()
{
const auto & old_path = getOldServerStatePath();
const auto try_read_file = [this](const auto & path) -> nuraft::ptr<nuraft::srv_state>
auto disk = getStateFileDisk();
const auto try_read_file = [&](const auto & path) -> nuraft::ptr<nuraft::srv_state>
{
try
{
ReadBufferFromFile read_buf(path);
auto content_size = read_buf.getFileSize();
auto read_buf = disk->readFile(path);
auto content_size = read_buf->getFileSize();
if (content_size == 0)
return nullptr;
uint64_t read_checksum{0};
readIntBinary(read_checksum, read_buf);
readIntBinary(read_checksum, *read_buf);
uint8_t version;
readIntBinary(version, read_buf);
readIntBinary(version, *read_buf);
auto buffer_size = content_size - sizeof read_checksum - sizeof version;
auto state_buf = nuraft::buffer::alloc(buffer_size);
read_buf.readStrict(reinterpret_cast<char *>(state_buf->data_begin()), buffer_size);
read_buf->readStrict(reinterpret_cast<char *>(state_buf->data_begin()), buffer_size);
SipHash hash;
hash.update(version);
@ -366,15 +387,15 @@ nuraft::ptr<nuraft::srv_state> KeeperStateManager::read_state()
{
constexpr auto error_format = "Invalid checksum while reading state from {}. Got {}, expected {}";
#ifdef NDEBUG
LOG_ERROR(logger, error_format, path.generic_string(), hash.get64(), read_checksum);
LOG_ERROR(logger, error_format, path, hash.get64(), read_checksum);
return nullptr;
#else
throw Exception(ErrorCodes::CORRUPTED_DATA, error_format, path.generic_string(), hash.get64(), read_checksum);
throw Exception(ErrorCodes::CORRUPTED_DATA, error_format, disk->getPath() + path, hash.get64(), read_checksum);
#endif
}
auto state = nuraft::srv_state::deserialize(*state_buf);
LOG_INFO(logger, "Read state from {}", path.generic_string());
LOG_INFO(logger, "Read state from {}", fs::path(disk->getPath()) / path);
return state;
}
catch (const std::exception & e)
@ -385,37 +406,45 @@ nuraft::ptr<nuraft::srv_state> KeeperStateManager::read_state()
throw;
}
LOG_ERROR(logger, "Failed to deserialize state from {}", path.generic_string());
LOG_ERROR(logger, "Failed to deserialize state from {}", disk->getPath() + path);
return nullptr;
}
};
if (std::filesystem::exists(server_state_path))
if (disk->exists(server_state_file_name))
{
auto state = try_read_file(server_state_path);
auto state = try_read_file(server_state_file_name);
if (state)
{
if (std::filesystem::exists(old_path))
std::filesystem::remove(old_path);
disk->removeFileIfExists(old_path);
return state;
}
std::filesystem::remove(server_state_path);
disk->removeFile(server_state_file_name);
}
if (std::filesystem::exists(old_path))
if (disk->exists(old_path))
{
if (disk->exists(copy_lock_file))
{
disk->removeFile(old_path);
disk->removeFile(copy_lock_file);
}
else
{
auto state = try_read_file(old_path);
if (state)
{
std::filesystem::rename(old_path, server_state_path);
disk->moveFile(old_path, server_state_file_name);
return state;
}
std::filesystem::remove(old_path);
disk->removeFile(old_path);
}
}
else if (disk->exists(copy_lock_file))
{
disk->removeFile(copy_lock_file);
}
LOG_WARNING(logger, "No state was read");

View File

@ -39,18 +39,17 @@ public:
KeeperStateManager(
int server_id_,
const std::string & config_prefix_,
const std::string & log_storage_path,
const std::string & state_file_path,
const std::string & server_state_file_name_,
const Poco::Util::AbstractConfiguration & config,
const CoordinationSettingsPtr & coordination_settings);
const CoordinationSettingsPtr & coordination_settings,
KeeperContextPtr keeper_context_);
/// Constructor for tests
KeeperStateManager(
int server_id_,
const std::string & host,
int port,
const std::string & logs_path,
const std::string & state_file_path);
KeeperContextPtr keeper_context_);
void loadLogStore(uint64_t last_commited_index, uint64_t logs_to_keep);
@ -111,7 +110,9 @@ public:
ConfigUpdateActions getConfigurationDiff(const Poco::Util::AbstractConfiguration & config) const;
private:
const std::filesystem::path & getOldServerStatePath();
const String & getOldServerStatePath();
DiskPtr getStateFileDisk() const;
/// Wrapper struct for Keeper cluster config. We parse this
/// info from XML files.
@ -136,7 +137,9 @@ private:
nuraft::ptr<KeeperLogStore> log_store;
const std::filesystem::path server_state_path;
const String server_state_file_name;
KeeperContextPtr keeper_context;
Poco::Logger * logger;

View File

@ -128,7 +128,7 @@ KeeperStorage::ResponsesForSessions processWatchesImpl(
watches.erase(watch_it);
}
auto parent_path = parentPath(path);
auto parent_path = parentNodePath(path);
Strings paths_to_check_for_list_watches;
if (event_type == Coordination::Event::CREATED)
@ -276,23 +276,23 @@ void KeeperStorage::initializeSystemNodes()
[](auto & node)
{
++node.stat.numChildren;
node.addChild(getBaseName(keeper_system_path));
node.addChild(getBaseNodeName(keeper_system_path));
}
);
addDigest(updated_root_it->value, "/");
}
// insert child system nodes
for (const auto & [path, data] : keeper_context->system_nodes_with_data)
for (const auto & [path, data] : keeper_context->getSystemNodesWithData())
{
assert(path.starts_with(keeper_system_path));
Node child_system_node;
child_system_node.setData(data);
auto [map_key, _] = container.insert(std::string{path}, child_system_node);
/// Take child path from key owned by map.
auto child_path = getBaseName(map_key->getKey());
auto child_path = getBaseNodeName(map_key->getKey());
container.updateValue(
parentPath(StringRef(path)),
parentNodePath(StringRef(path)),
[child_path](auto & parent)
{
// don't update stats so digest is okay
@ -728,7 +728,7 @@ bool KeeperStorage::createNode(
bool is_sequental,
Coordination::ACLs node_acls)
{
auto parent_path = parentPath(path);
auto parent_path = parentNodePath(path);
auto node_it = container.find(parent_path);
if (node_it == container.end())
@ -751,7 +751,7 @@ bool KeeperStorage::createNode(
created_node.is_sequental = is_sequental;
auto [map_key, _] = container.insert(path, created_node);
/// Take child path from key owned by map.
auto child_path = getBaseName(map_key->getKey());
auto child_path = getBaseNodeName(map_key->getKey());
container.updateValue(
parent_path,
[child_path](KeeperStorage::Node & parent)
@ -781,8 +781,8 @@ bool KeeperStorage::removeNode(const std::string & path, int32_t version)
acl_map.removeUsage(prev_node.acl_id);
container.updateValue(
parentPath(path),
[child_basename = getBaseName(node_it->key)](KeeperStorage::Node & parent)
parentNodePath(path),
[child_basename = getBaseNodeName(node_it->key)](KeeperStorage::Node & parent)
{
parent.removeChild(child_basename);
chassert(parent.stat.numChildren == static_cast<int32_t>(parent.getChildren().size()));
@ -866,7 +866,7 @@ Coordination::ACLs getNodeACLs(KeeperStorage & storage, StringRef path, bool is_
void handleSystemNodeModification(const KeeperContext & keeper_context, std::string_view error_msg)
{
if (keeper_context.server_state == KeeperContext::Phase::INIT && !keeper_context.ignore_system_path_on_startup)
if (keeper_context.getServerState() == KeeperContext::Phase::INIT && !keeper_context.ignoreSystemPathOnStartup())
throw Exception(
ErrorCodes::LOGICAL_ERROR,
"{}. Ignoring it can lead to data loss. "
@ -929,7 +929,7 @@ struct KeeperStorageCreateRequestProcessor final : public KeeperStorageRequestPr
bool checkAuth(KeeperStorage & storage, int64_t session_id, bool is_local) const override
{
auto path = zk_request->getPath();
return storage.checkACL(parentPath(path), Coordination::ACL::Create, session_id, is_local);
return storage.checkACL(parentNodePath(path), Coordination::ACL::Create, session_id, is_local);
}
std::vector<KeeperStorage::Delta>
@ -940,7 +940,7 @@ struct KeeperStorageCreateRequestProcessor final : public KeeperStorageRequestPr
std::vector<KeeperStorage::Delta> new_deltas;
auto parent_path = parentPath(request.path);
auto parent_path = parentNodePath(request.path);
auto parent_node = storage.uncommitted_state.getNode(parent_path);
if (parent_node == nullptr)
return {KeeperStorage::Delta{zxid, Coordination::Error::ZNONODE}};
@ -971,7 +971,7 @@ struct KeeperStorageCreateRequestProcessor final : public KeeperStorageRequestPr
if (storage.uncommitted_state.getNode(path_created))
return {KeeperStorage::Delta{zxid, Coordination::Error::ZNODEEXISTS}};
if (getBaseName(path_created).size == 0)
if (getBaseNodeName(path_created).size == 0)
return {KeeperStorage::Delta{zxid, Coordination::Error::ZBADARGUMENTS}};
Coordination::ACLs node_acls;
@ -1121,7 +1121,7 @@ struct KeeperStorageRemoveRequestProcessor final : public KeeperStorageRequestPr
{
bool checkAuth(KeeperStorage & storage, int64_t session_id, bool is_local) const override
{
return storage.checkACL(parentPath(zk_request->getPath()), Coordination::ACL::Delete, session_id, is_local);
return storage.checkACL(parentNodePath(zk_request->getPath()), Coordination::ACL::Delete, session_id, is_local);
}
using KeeperStorageRequestProcessor::KeeperStorageRequestProcessor;
@ -1143,7 +1143,7 @@ struct KeeperStorageRemoveRequestProcessor final : public KeeperStorageRequestPr
const auto update_parent_pzxid = [&]()
{
auto parent_path = parentPath(request.path);
auto parent_path = parentNodePath(request.path);
if (!storage.uncommitted_state.getNode(parent_path))
return;
@ -1178,7 +1178,7 @@ struct KeeperStorageRemoveRequestProcessor final : public KeeperStorageRequestPr
update_parent_pzxid();
new_deltas.emplace_back(
std::string{parentPath(request.path)},
std::string{parentNodePath(request.path)},
zxid,
KeeperStorage::UpdateNodeDelta{[](KeeperStorage::Node & parent)
{
@ -1321,7 +1321,7 @@ struct KeeperStorageSetRequestProcessor final : public KeeperStorageRequestProce
request.version});
new_deltas.emplace_back(
parentPath(request.path).toString(),
parentNodePath(request.path).toString(),
zxid,
KeeperStorage::UpdateNodeDelta
{
@ -1481,7 +1481,7 @@ struct KeeperStorageCheckRequestProcessor final : public KeeperStorageRequestPro
bool checkAuth(KeeperStorage & storage, int64_t session_id, bool is_local) const override
{
auto path = zk_request->getPath();
return storage.checkACL(check_not_exists ? parentPath(path) : path, Coordination::ACL::Read, session_id, is_local);
return storage.checkACL(check_not_exists ? parentNodePath(path) : path, Coordination::ACL::Read, session_id, is_local);
}
std::vector<KeeperStorage::Delta>
@ -2024,7 +2024,7 @@ KeeperStorageRequestProcessorsFactory::KeeperStorageRequestProcessorsFactory()
UInt64 KeeperStorage::calculateNodesDigest(UInt64 current_digest, const std::vector<Delta> & new_deltas) const
{
if (!keeper_context->digest_enabled)
if (!keeper_context->digestEnabled())
return current_digest;
std::unordered_map<std::string_view, std::shared_ptr<Node>> updated_nodes;
@ -2122,7 +2122,7 @@ void KeeperStorage::preprocessRequest(
TransactionInfo transaction{.zxid = new_last_zxid};
uint64_t new_digest = getNodesDigest(false).value;
SCOPE_EXIT({
if (keeper_context->digest_enabled)
if (keeper_context->digestEnabled())
// if the version of digest we got from the leader is the same as the one this instances has, we can simply copy the value
// and just check the digest on the commit
// a mistake can happen while applying the changes to the uncommitted_state so for now let's just recalculate the digest here also
@ -2145,7 +2145,7 @@ void KeeperStorage::preprocessRequest(
{
new_deltas.emplace_back
(
parentPath(ephemeral_path).toString(),
parentNodePath(ephemeral_path).toString(),
new_last_zxid,
UpdateNodeDelta
{
@ -2338,7 +2338,7 @@ void KeeperStorage::rollbackRequest(int64_t rollback_zxid, bool allow_missing)
KeeperStorage::Digest KeeperStorage::getNodesDigest(bool committed) const
{
if (!keeper_context->digest_enabled)
if (!keeper_context->digestEnabled())
return {.version = DigestVersion::NO_DIGEST};
if (committed || uncommitted_transactions.empty())
@ -2349,13 +2349,13 @@ KeeperStorage::Digest KeeperStorage::getNodesDigest(bool committed) const
void KeeperStorage::removeDigest(const Node & node, const std::string_view path)
{
if (keeper_context->digest_enabled)
if (keeper_context->digestEnabled())
nodes_digest -= node.getDigest(path);
}
void KeeperStorage::addDigest(const Node & node, const std::string_view path)
{
if (keeper_context->digest_enabled)
if (keeper_context->digestEnabled())
{
node.invalidateDigestCache();
nodes_digest += node.getDigest(path);

View File

@ -0,0 +1,321 @@
#include <Interpreters/Context.h>
#include <Common/Config/ConfigProcessor.h>
#include <Common/Macros.h>
#include <Common/ThreadPool.h>
#include <Core/ServerSettings.h>
#include <boost/noncopyable.hpp>
#include <memory>
#include <cassert>
namespace ProfileEvents
{
extern const Event ContextLock;
}
namespace CurrentMetrics
{
extern const Metric ContextLockWait;
extern const Metric BackgroundSchedulePoolTask;
extern const Metric BackgroundSchedulePoolSize;
extern const Metric IOWriterThreads;
extern const Metric IOWriterThreadsActive;
}
namespace DB
{
namespace ErrorCodes
{
extern const int LOGICAL_ERROR;
}
struct ContextSharedPart : boost::noncopyable
{
ContextSharedPart()
: macros(std::make_unique<Macros>())
{}
/// For access of most of shared objects. Recursive mutex.
mutable std::recursive_mutex mutex;
mutable std::mutex keeper_dispatcher_mutex;
mutable std::shared_ptr<KeeperDispatcher> keeper_dispatcher TSA_GUARDED_BY(keeper_dispatcher_mutex);
ServerSettings server_settings;
String path; /// Path to the data directory, with a slash at the end.
ConfigurationPtr config; /// Global configuration settings.
MultiVersion<Macros> macros; /// Substitutions extracted from config.
mutable std::unique_ptr<BackgroundSchedulePool> schedule_pool; /// A thread pool that can run different jobs in background
RemoteHostFilter remote_host_filter; /// Allowed URL from config.xml
///
mutable std::unique_ptr<IAsynchronousReader> asynchronous_remote_fs_reader;
mutable std::unique_ptr<IAsynchronousReader> asynchronous_local_fs_reader;
mutable std::unique_ptr<IAsynchronousReader> synchronous_local_fs_reader;
mutable std::unique_ptr<ThreadPool> threadpool_writer;
mutable ThrottlerPtr remote_read_throttler; /// A server-wide throttler for remote IO reads
mutable ThrottlerPtr remote_write_throttler; /// A server-wide throttler for remote IO writes
mutable ThrottlerPtr local_read_throttler; /// A server-wide throttler for local IO reads
mutable ThrottlerPtr local_write_throttler; /// A server-wide throttler for local IO writes
};
Context::Context() = default;
Context::~Context() = default;
Context::Context(const Context &) = default;
Context & Context::operator=(const Context &) = default;
SharedContextHolder::SharedContextHolder(SharedContextHolder &&) noexcept = default;
SharedContextHolder & SharedContextHolder::operator=(SharedContextHolder &&) noexcept = default;
SharedContextHolder::SharedContextHolder() = default;
SharedContextHolder::~SharedContextHolder() = default;
SharedContextHolder::SharedContextHolder(std::unique_ptr<ContextSharedPart> shared_context)
: shared(std::move(shared_context)) {}
void SharedContextHolder::reset() { shared.reset(); }
void Context::makeGlobalContext()
{
initGlobal();
global_context = shared_from_this();
}
ContextMutablePtr Context::createGlobal(ContextSharedPart * shared)
{
auto res = std::shared_ptr<Context>(new Context);
res->shared = shared;
return res;
}
void Context::initGlobal()
{
assert(!global_context_instance);
global_context_instance = shared_from_this();
}
SharedContextHolder Context::createShared()
{
return SharedContextHolder(std::make_unique<ContextSharedPart>());
}
ContextMutablePtr Context::getGlobalContext() const
{
auto ptr = global_context.lock();
if (!ptr) throw Exception(ErrorCodes::LOGICAL_ERROR, "There is no global context or global context has expired");
return ptr;
}
std::unique_lock<std::recursive_mutex> Context::getLock() const
{
ProfileEvents::increment(ProfileEvents::ContextLock);
CurrentMetrics::Increment increment{CurrentMetrics::ContextLockWait};
return std::unique_lock(shared->mutex);
}
String Context::getPath() const
{
auto lock = getLock();
return shared->path;
}
void Context::setPath(const String & path)
{
auto lock = getLock();
shared->path = path;
}
MultiVersion<Macros>::Version Context::getMacros() const
{
return shared->macros.get();
}
void Context::setMacros(std::unique_ptr<Macros> && macros)
{
shared->macros.set(std::move(macros));
}
BackgroundSchedulePool & Context::getSchedulePool() const
{
auto lock = getLock();
if (!shared->schedule_pool)
{
shared->schedule_pool = std::make_unique<BackgroundSchedulePool>(
shared->server_settings.background_schedule_pool_size,
CurrentMetrics::BackgroundSchedulePoolTask,
CurrentMetrics::BackgroundSchedulePoolSize,
"BgSchPool");
}
return *shared->schedule_pool;
}
void Context::setRemoteHostFilter(const Poco::Util::AbstractConfiguration & config)
{
shared->remote_host_filter.setValuesFromConfig(config);
}
const RemoteHostFilter & Context::getRemoteHostFilter() const
{
return shared->remote_host_filter;
}
IAsynchronousReader & Context::getThreadPoolReader(FilesystemReaderType type) const
{
auto lock = getLock();
switch (type)
{
case FilesystemReaderType::ASYNCHRONOUS_REMOTE_FS_READER:
{
if (!shared->asynchronous_remote_fs_reader)
shared->asynchronous_remote_fs_reader = createThreadPoolReader(type, getConfigRef());
return *shared->asynchronous_remote_fs_reader;
}
case FilesystemReaderType::ASYNCHRONOUS_LOCAL_FS_READER:
{
if (!shared->asynchronous_local_fs_reader)
shared->asynchronous_local_fs_reader = createThreadPoolReader(type, getConfigRef());
return *shared->asynchronous_local_fs_reader;
}
case FilesystemReaderType::SYNCHRONOUS_LOCAL_FS_READER:
{
if (!shared->synchronous_local_fs_reader)
shared->synchronous_local_fs_reader = createThreadPoolReader(type, getConfigRef());
return *shared->synchronous_local_fs_reader;
}
}
}
std::shared_ptr<FilesystemCacheLog> Context::getFilesystemCacheLog() const
{
return nullptr;
}
std::shared_ptr<FilesystemReadPrefetchesLog> Context::getFilesystemReadPrefetchesLog() const
{
return nullptr;
}
void Context::setConfig(const ConfigurationPtr & config)
{
auto lock = getLock();
shared->config = config;
}
const Poco::Util::AbstractConfiguration & Context::getConfigRef() const
{
auto lock = getLock();
return shared->config ? *shared->config : Poco::Util::Application::instance().config();
}
std::shared_ptr<AsyncReadCounters> Context::getAsyncReadCounters() const
{
auto lock = getLock();
if (!async_read_counters)
async_read_counters = std::make_shared<AsyncReadCounters>();
return async_read_counters;
}
ThreadPool & Context::getThreadPoolWriter() const
{
const auto & config = getConfigRef();
auto lock = getLock();
if (!shared->threadpool_writer)
{
auto pool_size = config.getUInt(".threadpool_writer_pool_size", 100);
auto queue_size = config.getUInt(".threadpool_writer_queue_size", 1000000);
shared->threadpool_writer = std::make_unique<ThreadPool>(
CurrentMetrics::IOWriterThreads, CurrentMetrics::IOWriterThreadsActive, pool_size, pool_size, queue_size);
}
return *shared->threadpool_writer;
}
ThrottlerPtr Context::getRemoteReadThrottler() const
{
return nullptr;
}
ThrottlerPtr Context::getRemoteWriteThrottler() const
{
return nullptr;
}
ThrottlerPtr Context::getLocalReadThrottler() const
{
return nullptr;
}
ThrottlerPtr Context::getLocalWriteThrottler() const
{
return nullptr;
}
ReadSettings Context::getReadSettings() const
{
return ReadSettings{};
}
void Context::initializeKeeperDispatcher([[maybe_unused]] bool start_async) const
{
const auto & config_ref = getConfigRef();
std::lock_guard lock(shared->keeper_dispatcher_mutex);
if (shared->keeper_dispatcher)
throw Exception(ErrorCodes::LOGICAL_ERROR, "Trying to initialize Keeper multiple times");
if (config_ref.has("keeper_server"))
{
shared->keeper_dispatcher = std::make_shared<KeeperDispatcher>();
shared->keeper_dispatcher->initialize(config_ref, true, start_async, getMacros());
}
}
std::shared_ptr<KeeperDispatcher> Context::getKeeperDispatcher() const
{
std::lock_guard lock(shared->keeper_dispatcher_mutex);
if (!shared->keeper_dispatcher)
throw Exception(ErrorCodes::LOGICAL_ERROR, "Keeper must be initialized before requests");
return shared->keeper_dispatcher;
}
std::shared_ptr<KeeperDispatcher> Context::tryGetKeeperDispatcher() const
{
std::lock_guard lock(shared->keeper_dispatcher_mutex);
return shared->keeper_dispatcher;
}
void Context::shutdownKeeperDispatcher() const
{
std::lock_guard lock(shared->keeper_dispatcher_mutex);
if (shared->keeper_dispatcher)
{
shared->keeper_dispatcher->shutdown();
shared->keeper_dispatcher.reset();
}
}
void Context::updateKeeperConfiguration([[maybe_unused]] const Poco::Util::AbstractConfiguration & config_)
{
std::lock_guard lock(shared->keeper_dispatcher_mutex);
if (!shared->keeper_dispatcher)
return;
shared->keeper_dispatcher->updateConfiguration(getConfigRef(), getMacros());
}
}

View File

@ -0,0 +1,120 @@
#pragma once
#include <Interpreters/Context_fwd.h>
#include <Coordination/KeeperDispatcher.h>
#include <Common/MultiVersion.h>
#include <Common/RemoteHostFilter.h>
#include <Disks/IO/getThreadPoolReader.h>
#include <Core/Settings.h>
#include <Core/BackgroundSchedulePool.h>
#include <IO/AsyncReadCounters.h>
#include <Poco/Util/Application.h>
#include <memory>
namespace DB
{
struct ContextSharedPart;
class Macros;
class FilesystemCacheLog;
class FilesystemReadPrefetchesLog;
/// A small class which owns ContextShared.
/// We don't use something like unique_ptr directly to allow ContextShared type to be incomplete.
struct SharedContextHolder
{
~SharedContextHolder();
SharedContextHolder();
explicit SharedContextHolder(std::unique_ptr<ContextSharedPart> shared_context);
SharedContextHolder(SharedContextHolder &&) noexcept;
SharedContextHolder & operator=(SharedContextHolder &&) noexcept;
ContextSharedPart * get() const { return shared.get(); }
void reset();
private:
std::unique_ptr<ContextSharedPart> shared;
};
class Context : public std::enable_shared_from_this<Context>
{
private:
/// Use copy constructor or createGlobal() instead
Context();
Context(const Context &);
Context & operator=(const Context &);
std::unique_lock<std::recursive_mutex> getLock() const;
ContextWeakMutablePtr global_context;
inline static ContextPtr global_context_instance;
ContextSharedPart * shared;
/// Query metrics for reading data asynchronously with IAsynchronousReader.
mutable std::shared_ptr<AsyncReadCounters> async_read_counters;
Settings settings; /// Setting for query execution.
public:
/// Create initial Context with ContextShared and etc.
static ContextMutablePtr createGlobal(ContextSharedPart * shared);
static SharedContextHolder createShared();
ContextMutablePtr getGlobalContext() const;
static ContextPtr getGlobalContextInstance() { return global_context_instance; }
void makeGlobalContext();
void initGlobal();
~Context();
using ConfigurationPtr = Poco::AutoPtr<Poco::Util::AbstractConfiguration>;
/// Global application configuration settings.
void setConfig(const ConfigurationPtr & config);
const Poco::Util::AbstractConfiguration & getConfigRef() const;
const Settings & getSettingsRef() const { return settings; }
String getPath() const;
void setPath(const String & path);
MultiVersion<Macros>::Version getMacros() const;
void setMacros(std::unique_ptr<Macros> && macros);
BackgroundSchedulePool & getSchedulePool() const;
/// Storage of allowed hosts from config.xml
void setRemoteHostFilter(const Poco::Util::AbstractConfiguration & config);
const RemoteHostFilter & getRemoteHostFilter() const;
std::shared_ptr<FilesystemCacheLog> getFilesystemCacheLog() const;
std::shared_ptr<FilesystemReadPrefetchesLog> getFilesystemReadPrefetchesLog() const;
IAsynchronousReader & getThreadPoolReader(FilesystemReaderType type) const;
std::shared_ptr<AsyncReadCounters> getAsyncReadCounters() const;
ThreadPool & getThreadPoolWriter() const;
ThrottlerPtr getRemoteReadThrottler() const;
ThrottlerPtr getRemoteWriteThrottler() const;
ThrottlerPtr getLocalReadThrottler() const;
ThrottlerPtr getLocalWriteThrottler() const;
ReadSettings getReadSettings() const;
std::shared_ptr<KeeperDispatcher> getKeeperDispatcher() const;
std::shared_ptr<KeeperDispatcher> tryGetKeeperDispatcher() const;
void initializeKeeperDispatcher(bool start_async) const;
void shutdownKeeperDispatcher() const;
void updateKeeperConfiguration(const Poco::Util::AbstractConfiguration & config);
};
}

View File

@ -0,0 +1,24 @@
#include <Core/Settings.h>
namespace DB
{
IMPLEMENT_SETTINGS_TRAITS(SettingsTraits, LIST_OF_SETTINGS)
std::vector<String> Settings::getAllRegisteredNames() const
{
std::vector<String> all_settings;
for (const auto & setting_field : all())
{
all_settings.push_back(setting_field.getName());
}
return all_settings;
}
void Settings::set(std::string_view name, const Field & value)
{
BaseSettings::set(name, value);
}
}

View File

@ -0,0 +1,14 @@
#include <Common/CurrentThread.h>
namespace DB
{
void CurrentThread::detachFromGroupIfNotDetached()
{
}
void CurrentThread::attachToGroup(const ThreadGroupPtr &)
{
}
}

View File

@ -1,87 +0,0 @@
#include <Coordination/TinyContext.h>
#include <Common/Exception.h>
#include <Coordination/KeeperDispatcher.h>
namespace DB
{
namespace ErrorCodes
{
extern const int LOGICAL_ERROR;
}
void TinyContext::setConfig(const ConfigurationPtr & config_)
{
std::lock_guard lock(keeper_dispatcher_mutex);
config = config_;
}
const Poco::Util::AbstractConfiguration & TinyContext::getConfigRef() const
{
std::lock_guard lock(keeper_dispatcher_mutex);
return config ? *config : Poco::Util::Application::instance().config();
}
void TinyContext::initializeKeeperDispatcher([[maybe_unused]] bool start_async) const
{
const auto & config_ref = getConfigRef();
std::lock_guard lock(keeper_dispatcher_mutex);
if (keeper_dispatcher)
throw Exception(ErrorCodes::LOGICAL_ERROR, "Trying to initialize Keeper multiple times");
if (config_ref.has("keeper_server"))
{
keeper_dispatcher = std::make_shared<KeeperDispatcher>();
MultiVersion<Macros>::Version macros;
if (config_ref.has("macros"))
macros = std::make_unique<Macros>(config_ref, "macros", &Poco::Logger::get("TinyContext"));
keeper_dispatcher->initialize(config_ref, true, start_async, macros);
}
}
std::shared_ptr<KeeperDispatcher> TinyContext::getKeeperDispatcher() const
{
std::lock_guard lock(keeper_dispatcher_mutex);
if (!keeper_dispatcher)
throw Exception(ErrorCodes::LOGICAL_ERROR, "Keeper must be initialized before requests");
return keeper_dispatcher;
}
std::shared_ptr<KeeperDispatcher> TinyContext::tryGetKeeperDispatcher() const
{
std::lock_guard lock(keeper_dispatcher_mutex);
return keeper_dispatcher;
}
void TinyContext::shutdownKeeperDispatcher() const
{
std::lock_guard lock(keeper_dispatcher_mutex);
if (keeper_dispatcher)
{
keeper_dispatcher->shutdown();
keeper_dispatcher.reset();
}
}
void TinyContext::updateKeeperConfiguration([[maybe_unused]] const Poco::Util::AbstractConfiguration & config_)
{
std::lock_guard lock(keeper_dispatcher_mutex);
if (!keeper_dispatcher)
return;
MultiVersion<Macros>::Version macros;
if (config_.has("macros"))
macros = std::make_unique<Macros>(config_, "macros", &Poco::Logger::get("TinyContext"));
keeper_dispatcher->updateConfiguration(config_, macros);
}
}

View File

@ -1,36 +0,0 @@
#pragma once
#include <memory>
#include <mutex>
#include <Poco/Util/Application.h>
#include <base/defines.h>
namespace DB
{
class KeeperDispatcher;
class TinyContext : public std::enable_shared_from_this<TinyContext>
{
public:
std::shared_ptr<KeeperDispatcher> getKeeperDispatcher() const;
std::shared_ptr<KeeperDispatcher> tryGetKeeperDispatcher() const;
void initializeKeeperDispatcher(bool start_async) const;
void shutdownKeeperDispatcher() const;
void updateKeeperConfiguration(const Poco::Util::AbstractConfiguration & config);
using ConfigurationPtr = Poco::AutoPtr<Poco::Util::AbstractConfiguration>;
void setConfig(const ConfigurationPtr & config);
const Poco::Util::AbstractConfiguration & getConfigRef() const;
private:
mutable std::mutex keeper_dispatcher_mutex;
mutable std::shared_ptr<KeeperDispatcher> keeper_dispatcher TSA_GUARDED_BY(keeper_dispatcher_mutex);
ConfigurationPtr config TSA_GUARDED_BY(keeper_dispatcher_mutex);
};
using TinyContextPtr = std::shared_ptr<TinyContext>;
}

View File

@ -139,8 +139,8 @@ int64_t deserializeStorageData(KeeperStorage & storage, ReadBuffer & in, Poco::L
{
if (itr.key != "/")
{
auto parent_path = parentPath(itr.key);
storage.container.updateValue(parent_path, [my_path = itr.key] (KeeperStorage::Node & value) { value.addChild(getBaseName(my_path)); ++value.stat.numChildren; });
auto parent_path = parentNodePath(itr.key);
storage.container.updateValue(parent_path, [my_path = itr.key] (KeeperStorage::Node & value) { value.addChild(getBaseNodeName(my_path)); ++value.stat.numChildren; });
}
}

View File

@ -21,7 +21,7 @@ static size_t findLastSlash(StringRef path)
return std::string::npos;
}
StringRef parentPath(StringRef path)
StringRef parentNodePath(StringRef path)
{
auto rslash_pos = findLastSlash(path);
if (rslash_pos > 0)
@ -29,7 +29,7 @@ StringRef parentPath(StringRef path)
return "/";
}
StringRef getBaseName(StringRef path)
StringRef getBaseNodeName(StringRef path)
{
size_t basename_start = findLastSlash(path);
return StringRef{path.data + basename_start + 1, path.size - basename_start - 1};

View File

@ -6,8 +6,8 @@
namespace DB
{
StringRef parentPath(StringRef path);
StringRef parentNodePath(StringRef path);
StringRef getBaseName(StringRef path);
StringRef getBaseNodeName(StringRef path);
}

File diff suppressed because it is too large Load Diff

View File

@ -378,6 +378,40 @@ void SettingFieldMap::readBinary(ReadBuffer & in)
*this = map;
}
#else
namespace ErrorCodes
{
extern const int LOGICAL_ERROR;
}
SettingFieldMap::SettingFieldMap(const Field &) : value(Map()) {}
String SettingFieldMap::toString() const
{
throw DB::Exception(DB::ErrorCodes::LOGICAL_ERROR, "Setting of type Map not supported");
}
SettingFieldMap & SettingFieldMap::operator =(const Field &)
{
throw DB::Exception(DB::ErrorCodes::LOGICAL_ERROR, "Setting of type Map not supported");
}
void SettingFieldMap::parseFromString(const String &)
{
throw DB::Exception(DB::ErrorCodes::LOGICAL_ERROR, "Setting of type Map not supported");
}
void SettingFieldMap::writeBinary(WriteBuffer &) const
{
throw DB::Exception(DB::ErrorCodes::LOGICAL_ERROR, "Setting of type Map not supported");
}
void SettingFieldMap::readBinary(ReadBuffer &)
{
throw DB::Exception(DB::ErrorCodes::LOGICAL_ERROR, "Setting of type Map not supported");
}
#endif
namespace

View File

@ -245,6 +245,12 @@ struct SettingFieldString
void readBinary(ReadBuffer & in);
};
#ifdef CLICKHOUSE_PROGRAM_STANDALONE_BUILD
#define NORETURN [[noreturn]]
#else
#define NORETURN
#endif
struct SettingFieldMap
{
public:
@ -261,13 +267,15 @@ public:
operator const Map &() const { return value; } /// NOLINT
explicit operator Field() const { return value; }
String toString() const;
void parseFromString(const String & str);
NORETURN String toString() const;
NORETURN void parseFromString(const String & str);
void writeBinary(WriteBuffer & out) const;
void readBinary(ReadBuffer & in);
NORETURN void writeBinary(WriteBuffer & out) const;
NORETURN void readBinary(ReadBuffer & in);
};
#undef NORETURN
struct SettingFieldChar
{
public:

View File

@ -1,10 +1,11 @@
#include <base/defines.h>
#include <Core/SettingsQuirks.h>
#include <Core/Settings.h>
#include <Poco/Environment.h>
#include <Poco/Platform.h>
#include <Common/VersionNumber.h>
#include <Common/logger_useful.h>
#include <cstdlib>
namespace
{
@ -71,6 +72,12 @@ void applySettingsQuirks(Settings & settings, Poco::Logger * log)
}
}
#if defined(THREAD_SANITIZER)
settings.use_hedged_requests.value = false;
if (log)
LOG_WARNING(log, "use_hedged_requests has been disabled for the build with Thread Sanitizer, because they are using fibers, leading to a failed assertion inside TSan");
#endif
if (!queryProfilerWorks())
{
if (settings.query_profiler_real_time_period_ns)

View File

@ -135,6 +135,7 @@ QueryPipeline HTTPDictionarySource::loadIds(const std::vector<UInt64> & ids)
WriteBufferFromOStream out_buffer(ostr);
auto output_format = context->getOutputFormatParallelIfPossible(configuration.format, out_buffer, block.cloneEmpty());
formatBlock(output_format, block);
out_buffer.finalize();
};
Poco::URI uri(configuration.url);
@ -164,6 +165,7 @@ QueryPipeline HTTPDictionarySource::loadKeys(const Columns & key_columns, const
WriteBufferFromOStream out_buffer(ostr);
auto output_format = context->getOutputFormatParallelIfPossible(configuration.format, out_buffer, block.cloneEmpty());
formatBlock(output_format, block);
out_buffer.finalize();
};
Poco::URI uri(configuration.url);

View File

@ -56,6 +56,7 @@ SeekableReadBufferPtr ReadBufferFromRemoteFSGather::createImplementationBuffer(c
size_t current_read_until_position = read_until_position ? read_until_position : object.bytes_size;
auto current_read_buffer_creator = [=, this]() { return read_buffer_creator(object_path, current_read_until_position); };
#ifndef CLICKHOUSE_PROGRAM_STANDALONE_BUILD
if (with_cache)
{
auto cache_key = settings.remote_fs_cache->createKeyForPath(object_path);
@ -72,6 +73,7 @@ SeekableReadBufferPtr ReadBufferFromRemoteFSGather::createImplementationBuffer(c
read_until_position ? std::optional<size_t>(read_until_position) : std::nullopt,
cache_log);
}
#endif
return current_read_buffer_creator();
}

View File

@ -7,9 +7,7 @@
#include <Disks/IO/ThreadPoolRemoteFSReader.h>
#include <Disks/IO/ThreadPoolReader.h>
#ifndef CLICKHOUSE_PROGRAM_STANDALONE_BUILD
#include <Interpreters/Context.h>
#endif
namespace DB
{
@ -21,32 +19,10 @@ namespace ErrorCodes
IAsynchronousReader & getThreadPoolReader(FilesystemReaderType type)
{
#ifdef CLICKHOUSE_PROGRAM_STANDALONE_BUILD
const auto & config = Poco::Util::Application::instance().config();
switch (type)
{
case FilesystemReaderType::ASYNCHRONOUS_REMOTE_FS_READER:
{
static auto asynchronous_remote_fs_reader = createThreadPoolReader(type, config);
return *asynchronous_remote_fs_reader;
}
case FilesystemReaderType::ASYNCHRONOUS_LOCAL_FS_READER:
{
static auto asynchronous_local_fs_reader = createThreadPoolReader(type, config);
return *asynchronous_local_fs_reader;
}
case FilesystemReaderType::SYNCHRONOUS_LOCAL_FS_READER:
{
static auto synchronous_local_fs_reader = createThreadPoolReader(type, config);
return *synchronous_local_fs_reader;
}
}
#else
auto context = Context::getGlobalContextInstance();
if (!context)
throw Exception(ErrorCodes::LOGICAL_ERROR, "Global context not initialized");
return context->getThreadPoolReader(type);
#endif
}
std::unique_ptr<IAsynchronousReader> createThreadPoolReader(

View File

@ -11,7 +11,6 @@
#include <Common/logger_useful.h>
#include <Common/filesystemHelpers.h>
#include <Common/CurrentMetrics.h>
#include <Disks/ObjectStorages/Cached/CachedObjectStorage.h>
#include <Disks/ObjectStorages/DiskObjectStorageRemoteMetadataRestoreHelper.h>
#include <Disks/ObjectStorages/DiskObjectStorageTransaction.h>
#include <Disks/FakeDiskTransaction.h>
@ -530,24 +529,6 @@ DiskObjectStoragePtr DiskObjectStorage::createDiskObjectStorage()
threadpool_size);
}
void DiskObjectStorage::wrapWithCache(FileCachePtr cache, const FileCacheSettings & cache_settings, const String & layer_name)
{
object_storage = std::make_shared<CachedObjectStorage>(object_storage, cache, cache_settings, layer_name);
}
NameSet DiskObjectStorage::getCacheLayersNames() const
{
NameSet cache_layers;
auto current_object_storage = object_storage;
while (current_object_storage->supportsCache())
{
auto * cached_object_storage = assert_cast<CachedObjectStorage *>(current_object_storage.get());
cache_layers.insert(cached_object_storage->getCacheConfigName());
current_object_storage = cached_object_storage->getWrappedObjectStorage();
}
return cache_layers;
}
std::unique_ptr<ReadBufferFromFileBase> DiskObjectStorage::readFile(
const String & path,
const ReadSettings & settings,

View File

@ -181,20 +181,22 @@ public:
/// MergeTree table on this disk.
bool isWriteOnce() const override;
/// Add a cache layer.
/// Example: DiskObjectStorage(S3ObjectStorage) -> DiskObjectStorage(CachedObjectStorage(S3ObjectStorage))
/// There can be any number of cache layers:
/// DiskObjectStorage(CachedObjectStorage(...CacheObjectStorage(S3ObjectStorage)...))
void wrapWithCache(FileCachePtr cache, const FileCacheSettings & cache_settings, const String & layer_name);
/// Get structure of object storage this disk works with. Examples:
/// DiskObjectStorage(S3ObjectStorage)
/// DiskObjectStorage(CachedObjectStorage(S3ObjectStorage))
/// DiskObjectStorage(CachedObjectStorage(CachedObjectStorage(S3ObjectStorage)))
String getStructure() const { return fmt::format("DiskObjectStorage-{}({})", getName(), object_storage->getName()); }
#ifndef CLICKHOUSE_PROGRAM_STANDALONE_BUILD
/// Add a cache layer.
/// Example: DiskObjectStorage(S3ObjectStorage) -> DiskObjectStorage(CachedObjectStorage(S3ObjectStorage))
/// There can be any number of cache layers:
/// DiskObjectStorage(CachedObjectStorage(...CacheObjectStorage(S3ObjectStorage)...))
void wrapWithCache(FileCachePtr cache, const FileCacheSettings & cache_settings, const String & layer_name);
/// Get names of all cache layers. Name is how cache is defined in configuration file.
NameSet getCacheLayersNames() const override;
#endif
static std::shared_ptr<Executor> getAsyncExecutor(const std::string & log_name, size_t size);

View File

@ -0,0 +1,28 @@
#include <Disks/ObjectStorages/Cached/CachedObjectStorage.h>
#include <Disks/ObjectStorages/DiskObjectStorage.h>
#include <Common/assert_cast.h>
namespace DB
{
void DiskObjectStorage::wrapWithCache(FileCachePtr cache, const FileCacheSettings & cache_settings, const String & layer_name)
{
object_storage = std::make_shared<CachedObjectStorage>(object_storage, cache, cache_settings, layer_name);
}
NameSet DiskObjectStorage::getCacheLayersNames() const
{
NameSet cache_layers;
auto current_object_storage = object_storage;
while (current_object_storage->supportsCache())
{
auto * cached_object_storage = assert_cast<CachedObjectStorage *>(current_object_storage.get());
cache_layers.insert(cached_object_storage->getCacheConfigName());
current_object_storage = cached_object_storage->getWrappedObjectStorage();
}
return cache_layers;
}
}

View File

@ -63,7 +63,7 @@ public:
uint32_t getHardlinkCount(const std::string & /* path */) const override
{
return 1;
return 0;
}
bool supportsChmod() const override { return false; }

View File

@ -32,6 +32,8 @@ void registerDiskCache(DiskFactory & factory, bool global_skip_access_check);
void registerDiskLocalObjectStorage(DiskFactory & factory, bool global_skip_access_check);
#ifndef CLICKHOUSE_PROGRAM_STANDALONE_BUILD
void registerDisks(bool global_skip_access_check)
{
auto & factory = DiskFactory::instance();
@ -61,4 +63,19 @@ void registerDisks(bool global_skip_access_check)
registerDiskLocalObjectStorage(factory, global_skip_access_check);
}
#else
void registerDisks(bool global_skip_access_check)
{
auto & factory = DiskFactory::instance();
registerDiskLocal(factory, global_skip_access_check);
#if USE_AWS_S3
registerDiskS3(factory, global_skip_access_check);
#endif
}
#endif
}

View File

@ -19,14 +19,7 @@ void WriteBufferFromOStream::nextImpl()
ostr->flush();
if (!ostr->good())
{
/// FIXME do not call finalize in dtors (and remove iostreams)
bool avoid_throwing_exceptions = std::uncaught_exceptions();
if (avoid_throwing_exceptions)
LOG_ERROR(&Poco::Logger::get("WriteBufferFromOStream"), "Cannot write to ostream at offset {}. Stack trace: {}", count(), StackTrace().toString());
else
throw Exception(ErrorCodes::CANNOT_WRITE_TO_OSTREAM, "Cannot write to ostream at offset {}", count());
}
}
WriteBufferFromOStream::WriteBufferFromOStream(
@ -46,9 +39,4 @@ WriteBufferFromOStream::WriteBufferFromOStream(
{
}
WriteBufferFromOStream::~WriteBufferFromOStream()
{
finalize();
}
}

View File

@ -18,8 +18,6 @@ public:
char * existing_memory = nullptr,
size_t alignment = 0);
~WriteBufferFromOStream() override;
protected:
explicit WriteBufferFromOStream(size_t size = DBMS_DEFAULT_BUFFER_SIZE, char * existing_memory = nullptr, size_t alignment = 0);

View File

@ -1,5 +1,6 @@
#include <IO/ZstdDeflatingAppendableWriteBuffer.h>
#include <Common/Exception.h>
#include "IO/ReadBufferFromFileBase.h"
#include <IO/ReadBufferFromFile.h>
namespace DB
@ -11,14 +12,16 @@ namespace ErrorCodes
}
ZstdDeflatingAppendableWriteBuffer::ZstdDeflatingAppendableWriteBuffer(
std::unique_ptr<WriteBufferFromFile> out_,
std::unique_ptr<WriteBufferFromFileBase> out_,
int compression_level,
bool append_to_existing_file_,
std::function<std::unique_ptr<ReadBufferFromFileBase>()> read_buffer_creator_,
size_t buf_size,
char * existing_memory,
size_t alignment)
: BufferWithOwnMemory(buf_size, existing_memory, alignment)
, out(std::move(out_))
, read_buffer_creator(std::move(read_buffer_creator_))
, append_to_existing_file(append_to_existing_file_)
{
cctx = ZSTD_createCCtx();
@ -194,13 +197,13 @@ void ZstdDeflatingAppendableWriteBuffer::addEmptyBlock()
bool ZstdDeflatingAppendableWriteBuffer::isNeedToAddEmptyBlock()
{
ReadBufferFromFile reader(out->getFileName());
auto fsize = reader.getFileSize();
auto reader = read_buffer_creator();
auto fsize = reader->getFileSize();
if (fsize > 3)
{
std::array<char, 3> result;
reader.seek(fsize - 3, SEEK_SET);
reader.readStrict(result.data(), 3);
reader->seek(fsize - 3, SEEK_SET);
reader->readStrict(result.data(), 3);
/// If we don't have correct block in the end, then we need to add it manually.
/// NOTE: maybe we can have the same bytes in case of data corruption/unfinished write.

View File

@ -5,6 +5,7 @@
#include <IO/WriteBuffer.h>
#include <IO/WriteBufferDecorator.h>
#include <IO/WriteBufferFromFile.h>
#include <IO/ReadBufferFromFileBase.h>
#include <zstd.h>
@ -29,9 +30,10 @@ public:
static inline constexpr ZSTDLastBlock ZSTD_CORRECT_TERMINATION_LAST_BLOCK = {0x01, 0x00, 0x00};
ZstdDeflatingAppendableWriteBuffer(
std::unique_ptr<WriteBufferFromFile> out_,
std::unique_ptr<WriteBufferFromFileBase> out_,
int compression_level,
bool append_to_existing_file_,
std::function<std::unique_ptr<ReadBufferFromFileBase>()> read_buffer_creator_,
size_t buf_size = DBMS_DEFAULT_BUFFER_SIZE,
char * existing_memory = nullptr,
size_t alignment = 0);
@ -68,7 +70,8 @@ private:
/// Adding zstd empty block (ZSTD_CORRECT_TERMINATION_LAST_BLOCK) to out.working_buffer
void addEmptyBlock();
std::unique_ptr<WriteBufferFromFile> out;
std::unique_ptr<WriteBufferFromFileBase> out;
std::function<std::unique_ptr<ReadBufferFromFileBase>()> read_buffer_creator;
bool append_to_existing_file = false;
ZSTD_CCtx * cctx;

Some files were not shown because too many files have changed in this diff Show More