mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-10 09:32:06 +00:00
Merge branch 'master' into add-separate-access-for-use-named-collections
This commit is contained in:
commit
dcda3576d2
@ -16,8 +16,9 @@ curl https://clickhouse.com/ | sh
|
||||
* [YouTube channel](https://www.youtube.com/c/ClickHouseDB) has a lot of content about ClickHouse in video format.
|
||||
* [Slack](https://clickhouse.com/slack) and [Telegram](https://telegram.me/clickhouse_en) allow chatting with ClickHouse users in real-time.
|
||||
* [Blog](https://clickhouse.com/blog/) contains various ClickHouse-related articles, as well as announcements and reports about events.
|
||||
* [Code Browser (Woboq)](https://clickhouse.com/codebrowser/ClickHouse/index.html) with syntax highlight and navigation.
|
||||
* [Code Browser (github.dev)](https://github.dev/ClickHouse/ClickHouse) with syntax highlight, powered by github.dev.
|
||||
* [Code Browser (Woboq)](https://clickhouse.com/codebrowser/ClickHouse/index.html) with syntax highlighting and navigation.
|
||||
* [Code Browser (github.dev)](https://github.dev/ClickHouse/ClickHouse) with syntax highlighting, powered by github.dev.
|
||||
* [Static Analysis (SonarCloud)](https://sonarcloud.io/project/issues?resolved=false&id=ClickHouse_ClickHouse) proposes C++ quality improvements.
|
||||
* [Contacts](https://clickhouse.com/company/contact) can help to get your questions answered if there are any.
|
||||
|
||||
## Upcoming Events
|
||||
|
426
base/base/hex.h
426
base/base/hex.h
@ -4,212 +4,288 @@
|
||||
#include <cstring>
|
||||
#include "types.h"
|
||||
|
||||
/// Maps 0..15 to 0..9A..F or 0..9a..f correspondingly.
|
||||
namespace CityHash_v1_0_2 { struct uint128; }
|
||||
|
||||
constexpr inline std::string_view hex_digit_to_char_uppercase_table = "0123456789ABCDEF";
|
||||
constexpr inline std::string_view hex_digit_to_char_lowercase_table = "0123456789abcdef";
|
||||
namespace wide
|
||||
{
|
||||
template <size_t Bits, typename Signed>
|
||||
class integer;
|
||||
}
|
||||
|
||||
namespace impl
|
||||
{
|
||||
/// Maps 0..15 to 0..9A..F or 0..9a..f correspondingly.
|
||||
constexpr inline std::string_view hex_digit_to_char_uppercase_table = "0123456789ABCDEF";
|
||||
constexpr inline std::string_view hex_digit_to_char_lowercase_table = "0123456789abcdef";
|
||||
|
||||
/// Maps 0..255 to 00..FF or 00..ff correspondingly.
|
||||
constexpr inline std::string_view hex_byte_to_char_uppercase_table = //
|
||||
"000102030405060708090A0B0C0D0E0F"
|
||||
"101112131415161718191A1B1C1D1E1F"
|
||||
"202122232425262728292A2B2C2D2E2F"
|
||||
"303132333435363738393A3B3C3D3E3F"
|
||||
"404142434445464748494A4B4C4D4E4F"
|
||||
"505152535455565758595A5B5C5D5E5F"
|
||||
"606162636465666768696A6B6C6D6E6F"
|
||||
"707172737475767778797A7B7C7D7E7F"
|
||||
"808182838485868788898A8B8C8D8E8F"
|
||||
"909192939495969798999A9B9C9D9E9F"
|
||||
"A0A1A2A3A4A5A6A7A8A9AAABACADAEAF"
|
||||
"B0B1B2B3B4B5B6B7B8B9BABBBCBDBEBF"
|
||||
"C0C1C2C3C4C5C6C7C8C9CACBCCCDCECF"
|
||||
"D0D1D2D3D4D5D6D7D8D9DADBDCDDDEDF"
|
||||
"E0E1E2E3E4E5E6E7E8E9EAEBECEDEEEF"
|
||||
"F0F1F2F3F4F5F6F7F8F9FAFBFCFDFEFF";
|
||||
|
||||
constexpr inline std::string_view hex_byte_to_char_lowercase_table = //
|
||||
"000102030405060708090a0b0c0d0e0f"
|
||||
"101112131415161718191a1b1c1d1e1f"
|
||||
"202122232425262728292a2b2c2d2e2f"
|
||||
"303132333435363738393a3b3c3d3e3f"
|
||||
"404142434445464748494a4b4c4d4e4f"
|
||||
"505152535455565758595a5b5c5d5e5f"
|
||||
"606162636465666768696a6b6c6d6e6f"
|
||||
"707172737475767778797a7b7c7d7e7f"
|
||||
"808182838485868788898a8b8c8d8e8f"
|
||||
"909192939495969798999a9b9c9d9e9f"
|
||||
"a0a1a2a3a4a5a6a7a8a9aaabacadaeaf"
|
||||
"b0b1b2b3b4b5b6b7b8b9babbbcbdbebf"
|
||||
"c0c1c2c3c4c5c6c7c8c9cacbcccdcecf"
|
||||
"d0d1d2d3d4d5d6d7d8d9dadbdcdddedf"
|
||||
"e0e1e2e3e4e5e6e7e8e9eaebecedeeef"
|
||||
"f0f1f2f3f4f5f6f7f8f9fafbfcfdfeff";
|
||||
|
||||
/// Maps 0..255 to 00000000..11111111 correspondingly.
|
||||
constexpr inline std::string_view bin_byte_to_char_table = //
|
||||
"0000000000000001000000100000001100000100000001010000011000000111"
|
||||
"0000100000001001000010100000101100001100000011010000111000001111"
|
||||
"0001000000010001000100100001001100010100000101010001011000010111"
|
||||
"0001100000011001000110100001101100011100000111010001111000011111"
|
||||
"0010000000100001001000100010001100100100001001010010011000100111"
|
||||
"0010100000101001001010100010101100101100001011010010111000101111"
|
||||
"0011000000110001001100100011001100110100001101010011011000110111"
|
||||
"0011100000111001001110100011101100111100001111010011111000111111"
|
||||
"0100000001000001010000100100001101000100010001010100011001000111"
|
||||
"0100100001001001010010100100101101001100010011010100111001001111"
|
||||
"0101000001010001010100100101001101010100010101010101011001010111"
|
||||
"0101100001011001010110100101101101011100010111010101111001011111"
|
||||
"0110000001100001011000100110001101100100011001010110011001100111"
|
||||
"0110100001101001011010100110101101101100011011010110111001101111"
|
||||
"0111000001110001011100100111001101110100011101010111011001110111"
|
||||
"0111100001111001011110100111101101111100011111010111111001111111"
|
||||
"1000000010000001100000101000001110000100100001011000011010000111"
|
||||
"1000100010001001100010101000101110001100100011011000111010001111"
|
||||
"1001000010010001100100101001001110010100100101011001011010010111"
|
||||
"1001100010011001100110101001101110011100100111011001111010011111"
|
||||
"1010000010100001101000101010001110100100101001011010011010100111"
|
||||
"1010100010101001101010101010101110101100101011011010111010101111"
|
||||
"1011000010110001101100101011001110110100101101011011011010110111"
|
||||
"1011100010111001101110101011101110111100101111011011111010111111"
|
||||
"1100000011000001110000101100001111000100110001011100011011000111"
|
||||
"1100100011001001110010101100101111001100110011011100111011001111"
|
||||
"1101000011010001110100101101001111010100110101011101011011010111"
|
||||
"1101100011011001110110101101101111011100110111011101111011011111"
|
||||
"1110000011100001111000101110001111100100111001011110011011100111"
|
||||
"1110100011101001111010101110101111101100111011011110111011101111"
|
||||
"1111000011110001111100101111001111110100111101011111011011110111"
|
||||
"1111100011111001111110101111101111111100111111011111111011111111";
|
||||
|
||||
/// Maps 0..9, A..F, a..f to 0..15. Other chars are mapped to implementation specific value.
|
||||
constexpr inline std::string_view hex_char_to_digit_table
|
||||
= {"\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff"
|
||||
"\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff"
|
||||
"\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff"
|
||||
"\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\xff\xff\xff\xff\xff\xff" //0-9
|
||||
"\xff\x0a\x0b\x0c\x0d\x0e\x0f\xff\xff\xff\xff\xff\xff\xff\xff\xff" //A-Z
|
||||
"\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff"
|
||||
"\xff\x0a\x0b\x0c\x0d\x0e\x0f\xff\xff\xff\xff\xff\xff\xff\xff\xff" //a-z
|
||||
"\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff"
|
||||
"\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff"
|
||||
"\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff"
|
||||
"\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff"
|
||||
"\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff"
|
||||
"\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff"
|
||||
"\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff"
|
||||
"\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff"
|
||||
"\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff",
|
||||
256};
|
||||
|
||||
/// Converts a hex digit '0'..'f' or '0'..'F' to its value 0..15.
|
||||
constexpr UInt8 unhexDigit(char c)
|
||||
{
|
||||
return hex_char_to_digit_table[static_cast<UInt8>(c)];
|
||||
}
|
||||
|
||||
/// Converts an unsigned integer in the native endian to hexadecimal representation and back. Used as a base class for HexConversion<T>.
|
||||
template <typename TUInt, typename = void>
|
||||
struct HexConversionUInt
|
||||
{
|
||||
static const constexpr size_t num_hex_digits = sizeof(TUInt) * 2;
|
||||
|
||||
static void hex(TUInt uint_, char * out, std::string_view table)
|
||||
{
|
||||
union
|
||||
{
|
||||
TUInt value;
|
||||
UInt8 uint8[sizeof(TUInt)];
|
||||
};
|
||||
|
||||
value = uint_;
|
||||
|
||||
for (size_t i = 0; i < sizeof(TUInt); ++i)
|
||||
{
|
||||
if constexpr (std::endian::native == std::endian::little)
|
||||
memcpy(out + i * 2, &table[static_cast<size_t>(uint8[sizeof(TUInt) - 1 - i]) * 2], 2);
|
||||
else
|
||||
memcpy(out + i * 2, &table[static_cast<size_t>(uint8[i]) * 2], 2);
|
||||
}
|
||||
}
|
||||
|
||||
static TUInt unhex(const char * data)
|
||||
{
|
||||
TUInt res;
|
||||
if constexpr (sizeof(TUInt) == 1)
|
||||
{
|
||||
res = static_cast<UInt8>(unhexDigit(data[0])) * 0x10 + static_cast<UInt8>(unhexDigit(data[1]));
|
||||
}
|
||||
else if constexpr (sizeof(TUInt) == 2)
|
||||
{
|
||||
res = static_cast<UInt16>(unhexDigit(data[0])) * 0x1000 + static_cast<UInt16>(unhexDigit(data[1])) * 0x100
|
||||
+ static_cast<UInt16>(unhexDigit(data[2])) * 0x10 + static_cast<UInt16>(unhexDigit(data[3]));
|
||||
}
|
||||
else if constexpr ((sizeof(TUInt) <= 8) || ((sizeof(TUInt) % 8) != 0))
|
||||
{
|
||||
res = 0;
|
||||
for (size_t i = 0; i < sizeof(TUInt) * 2; ++i, ++data)
|
||||
{
|
||||
res <<= 4;
|
||||
res += unhexDigit(*data);
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
res = 0;
|
||||
for (size_t i = 0; i < sizeof(TUInt) / 8; ++i, data += 16)
|
||||
{
|
||||
res <<= 64;
|
||||
res += HexConversionUInt<UInt64>::unhex(data);
|
||||
}
|
||||
}
|
||||
return res;
|
||||
}
|
||||
};
|
||||
|
||||
/// Helper template class to convert a value of any supported type to hexadecimal representation and back.
|
||||
template <typename T, typename SFINAE = void>
|
||||
struct HexConversion;
|
||||
|
||||
template <typename TUInt>
|
||||
struct HexConversion<TUInt, std::enable_if_t<std::is_integral_v<TUInt>>> : public HexConversionUInt<TUInt> {};
|
||||
|
||||
template <size_t Bits, typename Signed>
|
||||
struct HexConversion<wide::integer<Bits, Signed>> : public HexConversionUInt<wide::integer<Bits, Signed>> {};
|
||||
|
||||
template <typename CityHashUInt128> /// Partial specialization here allows not to include <city.h> in this header.
|
||||
struct HexConversion<CityHashUInt128, std::enable_if_t<std::is_same_v<CityHashUInt128, typename CityHash_v1_0_2::uint128>>>
|
||||
{
|
||||
static const constexpr size_t num_hex_digits = 32;
|
||||
|
||||
static void hex(const CityHashUInt128 & uint_, char * out, std::string_view table)
|
||||
{
|
||||
HexConversion<UInt64>::hex(uint_.high64, out, table);
|
||||
HexConversion<UInt64>::hex(uint_.low64, out + 16, table);
|
||||
}
|
||||
|
||||
static CityHashUInt128 unhex(const char * data)
|
||||
{
|
||||
CityHashUInt128 res;
|
||||
res.high64 = HexConversion<UInt64>::unhex(data);
|
||||
res.low64 = HexConversion<UInt64>::unhex(data + 16);
|
||||
return res;
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
/// Produces a hexadecimal representation of an integer value with leading zeros (for checksums).
|
||||
/// The function supports native integer types, wide::integer, CityHash_v1_0_2::uint128.
|
||||
/// It can be used with signed types as well, however they are written as corresponding unsigned numbers
|
||||
/// using two's complement (i.e. for example "-1" is written as "0xFF", not as "-0x01").
|
||||
template <typename T>
|
||||
void writeHexUIntUppercase(const T & value, char * out)
|
||||
{
|
||||
impl::HexConversion<T>::hex(value, out, impl::hex_byte_to_char_uppercase_table);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void writeHexUIntLowercase(const T & value, char * out)
|
||||
{
|
||||
impl::HexConversion<T>::hex(value, out, impl::hex_byte_to_char_lowercase_table);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
std::string getHexUIntUppercase(const T & value)
|
||||
{
|
||||
std::string res(impl::HexConversion<T>::num_hex_digits, '\0');
|
||||
writeHexUIntUppercase(value, res.data());
|
||||
return res;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
std::string getHexUIntLowercase(const T & value)
|
||||
{
|
||||
std::string res(impl::HexConversion<T>::num_hex_digits, '\0');
|
||||
writeHexUIntLowercase(value, res.data());
|
||||
return res;
|
||||
}
|
||||
|
||||
constexpr char hexDigitUppercase(unsigned char c)
|
||||
{
|
||||
return hex_digit_to_char_uppercase_table[c];
|
||||
return impl::hex_digit_to_char_uppercase_table[c];
|
||||
}
|
||||
|
||||
constexpr char hexDigitLowercase(unsigned char c)
|
||||
{
|
||||
return hex_digit_to_char_lowercase_table[c];
|
||||
return impl::hex_digit_to_char_lowercase_table[c];
|
||||
}
|
||||
|
||||
/// Maps 0..255 to 00..FF or 00..ff correspondingly
|
||||
|
||||
constexpr inline std::string_view hex_byte_to_char_uppercase_table = //
|
||||
"000102030405060708090A0B0C0D0E0F"
|
||||
"101112131415161718191A1B1C1D1E1F"
|
||||
"202122232425262728292A2B2C2D2E2F"
|
||||
"303132333435363738393A3B3C3D3E3F"
|
||||
"404142434445464748494A4B4C4D4E4F"
|
||||
"505152535455565758595A5B5C5D5E5F"
|
||||
"606162636465666768696A6B6C6D6E6F"
|
||||
"707172737475767778797A7B7C7D7E7F"
|
||||
"808182838485868788898A8B8C8D8E8F"
|
||||
"909192939495969798999A9B9C9D9E9F"
|
||||
"A0A1A2A3A4A5A6A7A8A9AAABACADAEAF"
|
||||
"B0B1B2B3B4B5B6B7B8B9BABBBCBDBEBF"
|
||||
"C0C1C2C3C4C5C6C7C8C9CACBCCCDCECF"
|
||||
"D0D1D2D3D4D5D6D7D8D9DADBDCDDDEDF"
|
||||
"E0E1E2E3E4E5E6E7E8E9EAEBECEDEEEF"
|
||||
"F0F1F2F3F4F5F6F7F8F9FAFBFCFDFEFF";
|
||||
|
||||
constexpr inline std::string_view hex_byte_to_char_lowercase_table = //
|
||||
"000102030405060708090a0b0c0d0e0f"
|
||||
"101112131415161718191a1b1c1d1e1f"
|
||||
"202122232425262728292a2b2c2d2e2f"
|
||||
"303132333435363738393a3b3c3d3e3f"
|
||||
"404142434445464748494a4b4c4d4e4f"
|
||||
"505152535455565758595a5b5c5d5e5f"
|
||||
"606162636465666768696a6b6c6d6e6f"
|
||||
"707172737475767778797a7b7c7d7e7f"
|
||||
"808182838485868788898a8b8c8d8e8f"
|
||||
"909192939495969798999a9b9c9d9e9f"
|
||||
"a0a1a2a3a4a5a6a7a8a9aaabacadaeaf"
|
||||
"b0b1b2b3b4b5b6b7b8b9babbbcbdbebf"
|
||||
"c0c1c2c3c4c5c6c7c8c9cacbcccdcecf"
|
||||
"d0d1d2d3d4d5d6d7d8d9dadbdcdddedf"
|
||||
"e0e1e2e3e4e5e6e7e8e9eaebecedeeef"
|
||||
"f0f1f2f3f4f5f6f7f8f9fafbfcfdfeff";
|
||||
|
||||
inline void writeHexByteUppercase(UInt8 byte, void * out)
|
||||
{
|
||||
memcpy(out, &hex_byte_to_char_uppercase_table[static_cast<size_t>(byte) * 2], 2);
|
||||
memcpy(out, &impl::hex_byte_to_char_uppercase_table[static_cast<size_t>(byte) * 2], 2);
|
||||
}
|
||||
|
||||
inline void writeHexByteLowercase(UInt8 byte, void * out)
|
||||
{
|
||||
memcpy(out, &hex_byte_to_char_lowercase_table[static_cast<size_t>(byte) * 2], 2);
|
||||
memcpy(out, &impl::hex_byte_to_char_lowercase_table[static_cast<size_t>(byte) * 2], 2);
|
||||
}
|
||||
|
||||
constexpr inline std::string_view bin_byte_to_char_table = //
|
||||
"0000000000000001000000100000001100000100000001010000011000000111"
|
||||
"0000100000001001000010100000101100001100000011010000111000001111"
|
||||
"0001000000010001000100100001001100010100000101010001011000010111"
|
||||
"0001100000011001000110100001101100011100000111010001111000011111"
|
||||
"0010000000100001001000100010001100100100001001010010011000100111"
|
||||
"0010100000101001001010100010101100101100001011010010111000101111"
|
||||
"0011000000110001001100100011001100110100001101010011011000110111"
|
||||
"0011100000111001001110100011101100111100001111010011111000111111"
|
||||
"0100000001000001010000100100001101000100010001010100011001000111"
|
||||
"0100100001001001010010100100101101001100010011010100111001001111"
|
||||
"0101000001010001010100100101001101010100010101010101011001010111"
|
||||
"0101100001011001010110100101101101011100010111010101111001011111"
|
||||
"0110000001100001011000100110001101100100011001010110011001100111"
|
||||
"0110100001101001011010100110101101101100011011010110111001101111"
|
||||
"0111000001110001011100100111001101110100011101010111011001110111"
|
||||
"0111100001111001011110100111101101111100011111010111111001111111"
|
||||
"1000000010000001100000101000001110000100100001011000011010000111"
|
||||
"1000100010001001100010101000101110001100100011011000111010001111"
|
||||
"1001000010010001100100101001001110010100100101011001011010010111"
|
||||
"1001100010011001100110101001101110011100100111011001111010011111"
|
||||
"1010000010100001101000101010001110100100101001011010011010100111"
|
||||
"1010100010101001101010101010101110101100101011011010111010101111"
|
||||
"1011000010110001101100101011001110110100101101011011011010110111"
|
||||
"1011100010111001101110101011101110111100101111011011111010111111"
|
||||
"1100000011000001110000101100001111000100110001011100011011000111"
|
||||
"1100100011001001110010101100101111001100110011011100111011001111"
|
||||
"1101000011010001110100101101001111010100110101011101011011010111"
|
||||
"1101100011011001110110101101101111011100110111011101111011011111"
|
||||
"1110000011100001111000101110001111100100111001011110011011100111"
|
||||
"1110100011101001111010101110101111101100111011011110111011101111"
|
||||
"1111000011110001111100101111001111110100111101011111011011110111"
|
||||
"1111100011111001111110101111101111111100111111011111111011111111";
|
||||
|
||||
inline void writeBinByte(UInt8 byte, void * out)
|
||||
/// Converts a hex representation with leading zeros back to an integer value.
|
||||
/// The function supports native integer types, wide::integer, CityHash_v1_0_2::uint128.
|
||||
template <typename T>
|
||||
constexpr T unhexUInt(const char * data)
|
||||
{
|
||||
memcpy(out, &bin_byte_to_char_table[static_cast<size_t>(byte) * 8], 8);
|
||||
return impl::HexConversion<T>::unhex(data);
|
||||
}
|
||||
|
||||
/// Produces hex representation of an unsigned int with leading zeros (for checksums)
|
||||
template <typename TUInt>
|
||||
inline void writeHexUIntImpl(TUInt uint_, char * out, std::string_view table)
|
||||
{
|
||||
union
|
||||
{
|
||||
TUInt value;
|
||||
UInt8 uint8[sizeof(TUInt)];
|
||||
};
|
||||
|
||||
value = uint_;
|
||||
|
||||
for (size_t i = 0; i < sizeof(TUInt); ++i)
|
||||
{
|
||||
if constexpr (std::endian::native == std::endian::little)
|
||||
memcpy(out + i * 2, &table[static_cast<size_t>(uint8[sizeof(TUInt) - 1 - i]) * 2], 2);
|
||||
else
|
||||
memcpy(out + i * 2, &table[static_cast<size_t>(uint8[i]) * 2], 2);
|
||||
}
|
||||
}
|
||||
|
||||
template <typename TUInt>
|
||||
inline void writeHexUIntUppercase(TUInt uint_, char * out)
|
||||
{
|
||||
writeHexUIntImpl(uint_, out, hex_byte_to_char_uppercase_table);
|
||||
}
|
||||
|
||||
template <typename TUInt>
|
||||
inline void writeHexUIntLowercase(TUInt uint_, char * out)
|
||||
{
|
||||
writeHexUIntImpl(uint_, out, hex_byte_to_char_lowercase_table);
|
||||
}
|
||||
|
||||
template <typename TUInt>
|
||||
std::string getHexUIntUppercase(TUInt uint_)
|
||||
{
|
||||
std::string res(sizeof(TUInt) * 2, '\0');
|
||||
writeHexUIntUppercase(uint_, res.data());
|
||||
return res;
|
||||
}
|
||||
|
||||
template <typename TUInt>
|
||||
std::string getHexUIntLowercase(TUInt uint_)
|
||||
{
|
||||
std::string res(sizeof(TUInt) * 2, '\0');
|
||||
writeHexUIntLowercase(uint_, res.data());
|
||||
return res;
|
||||
}
|
||||
|
||||
/// Maps 0..9, A..F, a..f to 0..15. Other chars are mapped to implementation specific value.
|
||||
|
||||
constexpr inline std::string_view hex_char_to_digit_table
|
||||
= {"\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff"
|
||||
"\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff"
|
||||
"\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff"
|
||||
"\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\xff\xff\xff\xff\xff\xff" //0-9
|
||||
"\xff\x0a\x0b\x0c\x0d\x0e\x0f\xff\xff\xff\xff\xff\xff\xff\xff\xff" //A-Z
|
||||
"\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff"
|
||||
"\xff\x0a\x0b\x0c\x0d\x0e\x0f\xff\xff\xff\xff\xff\xff\xff\xff\xff" //a-z
|
||||
"\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff"
|
||||
"\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff"
|
||||
"\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff"
|
||||
"\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff"
|
||||
"\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff"
|
||||
"\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff"
|
||||
"\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff"
|
||||
"\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff"
|
||||
"\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff",
|
||||
256};
|
||||
|
||||
/// Converts a hexadecimal digit '0'..'f' or '0'..'F' to UInt8.
|
||||
constexpr UInt8 unhex(char c)
|
||||
{
|
||||
return hex_char_to_digit_table[static_cast<UInt8>(c)];
|
||||
return impl::unhexDigit(c);
|
||||
}
|
||||
|
||||
/// Converts two hexadecimal digits to UInt8.
|
||||
constexpr UInt8 unhex2(const char * data)
|
||||
{
|
||||
return static_cast<UInt8>(unhex(data[0])) * 0x10 + static_cast<UInt8>(unhex(data[1]));
|
||||
return unhexUInt<UInt8>(data);
|
||||
}
|
||||
|
||||
/// Converts four hexadecimal digits to UInt16.
|
||||
constexpr UInt16 unhex4(const char * data)
|
||||
{
|
||||
return static_cast<UInt16>(unhex(data[0])) * 0x1000 + static_cast<UInt16>(unhex(data[1])) * 0x100
|
||||
+ static_cast<UInt16>(unhex(data[2])) * 0x10 + static_cast<UInt16>(unhex(data[3]));
|
||||
return unhexUInt<UInt16>(data);
|
||||
}
|
||||
|
||||
template <typename TUInt>
|
||||
constexpr TUInt unhexUInt(const char * data)
|
||||
/// Produces a binary representation of a single byte.
|
||||
inline void writeBinByte(UInt8 byte, void * out)
|
||||
{
|
||||
TUInt res = 0;
|
||||
if constexpr ((sizeof(TUInt) <= 8) || ((sizeof(TUInt) % 8) != 0))
|
||||
{
|
||||
for (size_t i = 0; i < sizeof(TUInt) * 2; ++i, ++data)
|
||||
{
|
||||
res <<= 4;
|
||||
res += unhex(*data);
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
for (size_t i = 0; i < sizeof(TUInt) / 8; ++i, data += 16)
|
||||
{
|
||||
res <<= 64;
|
||||
res += unhexUInt<UInt64>(data);
|
||||
}
|
||||
}
|
||||
return res;
|
||||
memcpy(out, &impl::bin_byte_to_char_table[static_cast<size_t>(byte) * 8], 8);
|
||||
}
|
||||
|
@ -87,7 +87,6 @@ set (SRCS
|
||||
src/LoggingRegistry.cpp
|
||||
src/LogStream.cpp
|
||||
src/MD5Engine.cpp
|
||||
src/MemoryPool.cpp
|
||||
src/MemoryStream.cpp
|
||||
src/Message.cpp
|
||||
src/Mutex.cpp
|
||||
|
@ -1,116 +0,0 @@
|
||||
//
|
||||
// MemoryPool.h
|
||||
//
|
||||
// Library: Foundation
|
||||
// Package: Core
|
||||
// Module: MemoryPool
|
||||
//
|
||||
// Definition of the MemoryPool class.
|
||||
//
|
||||
// Copyright (c) 2005-2006, Applied Informatics Software Engineering GmbH.
|
||||
// and Contributors.
|
||||
//
|
||||
// SPDX-License-Identifier: BSL-1.0
|
||||
//
|
||||
|
||||
|
||||
#ifndef Foundation_MemoryPool_INCLUDED
|
||||
#define Foundation_MemoryPool_INCLUDED
|
||||
|
||||
|
||||
#include <cstddef>
|
||||
#include <vector>
|
||||
#include "Poco/Foundation.h"
|
||||
#include "Poco/Mutex.h"
|
||||
|
||||
|
||||
namespace Poco
|
||||
{
|
||||
|
||||
|
||||
class Foundation_API MemoryPool
|
||||
/// A simple pool for fixed-size memory blocks.
|
||||
///
|
||||
/// The main purpose of this class is to speed-up
|
||||
/// memory allocations, as well as to reduce memory
|
||||
/// fragmentation in situations where the same blocks
|
||||
/// are allocated all over again, such as in server
|
||||
/// applications.
|
||||
///
|
||||
/// All allocated blocks are retained for future use.
|
||||
/// A limit on the number of blocks can be specified.
|
||||
/// Blocks can be preallocated.
|
||||
{
|
||||
public:
|
||||
MemoryPool(std::size_t blockSize, int preAlloc = 0, int maxAlloc = 0);
|
||||
/// Creates a MemoryPool for blocks with the given blockSize.
|
||||
/// The number of blocks given in preAlloc are preallocated.
|
||||
|
||||
~MemoryPool();
|
||||
|
||||
void * get();
|
||||
/// Returns a memory block. If there are no more blocks
|
||||
/// in the pool, a new block will be allocated.
|
||||
///
|
||||
/// If maxAlloc blocks are already allocated, an
|
||||
/// OutOfMemoryException is thrown.
|
||||
|
||||
void release(void * ptr);
|
||||
/// Releases a memory block and returns it to the pool.
|
||||
|
||||
std::size_t blockSize() const;
|
||||
/// Returns the block size.
|
||||
|
||||
int allocated() const;
|
||||
/// Returns the number of allocated blocks.
|
||||
|
||||
int available() const;
|
||||
/// Returns the number of available blocks in the pool.
|
||||
|
||||
private:
|
||||
MemoryPool();
|
||||
MemoryPool(const MemoryPool &);
|
||||
MemoryPool & operator=(const MemoryPool &);
|
||||
|
||||
void clear();
|
||||
|
||||
enum
|
||||
{
|
||||
BLOCK_RESERVE = 128
|
||||
};
|
||||
|
||||
typedef std::vector<char *> BlockVec;
|
||||
|
||||
std::size_t _blockSize;
|
||||
int _maxAlloc;
|
||||
int _allocated;
|
||||
BlockVec _blocks;
|
||||
FastMutex _mutex;
|
||||
};
|
||||
|
||||
|
||||
//
|
||||
// inlines
|
||||
//
|
||||
inline std::size_t MemoryPool::blockSize() const
|
||||
{
|
||||
return _blockSize;
|
||||
}
|
||||
|
||||
|
||||
inline int MemoryPool::allocated() const
|
||||
{
|
||||
return _allocated;
|
||||
}
|
||||
|
||||
|
||||
inline int MemoryPool::available() const
|
||||
{
|
||||
return (int)_blocks.size();
|
||||
}
|
||||
|
||||
|
||||
} // namespace Poco
|
||||
|
||||
|
||||
#endif // Foundation_MemoryPool_INCLUDED
|
@ -1,105 +0,0 @@
|
||||
//
|
||||
// MemoryPool.cpp
|
||||
//
|
||||
// Library: Foundation
|
||||
// Package: Core
|
||||
// Module: MemoryPool
|
||||
//
|
||||
// Copyright (c) 2005-2006, Applied Informatics Software Engineering GmbH.
|
||||
// and Contributors.
|
||||
//
|
||||
// SPDX-License-Identifier: BSL-1.0
|
||||
//
|
||||
|
||||
|
||||
#include "Poco/MemoryPool.h"
|
||||
#include "Poco/Exception.h"
|
||||
|
||||
|
||||
namespace Poco {
|
||||
|
||||
|
||||
MemoryPool::MemoryPool(std::size_t blockSize, int preAlloc, int maxAlloc):
|
||||
_blockSize(blockSize),
|
||||
_maxAlloc(maxAlloc),
|
||||
_allocated(preAlloc)
|
||||
{
|
||||
poco_assert (maxAlloc == 0 || maxAlloc >= preAlloc);
|
||||
poco_assert (preAlloc >= 0 && maxAlloc >= 0);
|
||||
|
||||
int r = BLOCK_RESERVE;
|
||||
if (preAlloc > r)
|
||||
r = preAlloc;
|
||||
if (maxAlloc > 0 && maxAlloc < r)
|
||||
r = maxAlloc;
|
||||
_blocks.reserve(r);
|
||||
|
||||
try
|
||||
{
|
||||
for (int i = 0; i < preAlloc; ++i)
|
||||
{
|
||||
_blocks.push_back(new char[_blockSize]);
|
||||
}
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
clear();
|
||||
throw;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
MemoryPool::~MemoryPool()
|
||||
{
|
||||
clear();
|
||||
}
|
||||
|
||||
|
||||
void MemoryPool::clear()
|
||||
{
|
||||
for (BlockVec::iterator it = _blocks.begin(); it != _blocks.end(); ++it)
|
||||
{
|
||||
delete [] *it;
|
||||
}
|
||||
_blocks.clear();
|
||||
}
|
||||
|
||||
|
||||
void* MemoryPool::get()
|
||||
{
|
||||
FastMutex::ScopedLock lock(_mutex);
|
||||
|
||||
if (_blocks.empty())
|
||||
{
|
||||
if (_maxAlloc == 0 || _allocated < _maxAlloc)
|
||||
{
|
||||
++_allocated;
|
||||
return new char[_blockSize];
|
||||
}
|
||||
else throw OutOfMemoryException("MemoryPool exhausted");
|
||||
}
|
||||
else
|
||||
{
|
||||
char* ptr = _blocks.back();
|
||||
_blocks.pop_back();
|
||||
return ptr;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void MemoryPool::release(void* ptr)
|
||||
{
|
||||
FastMutex::ScopedLock lock(_mutex);
|
||||
|
||||
try
|
||||
{
|
||||
_blocks.push_back(reinterpret_cast<char*>(ptr));
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
delete [] reinterpret_cast<char*>(ptr);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
} // namespace Poco
|
@ -19,7 +19,6 @@
|
||||
|
||||
|
||||
#include "Poco/BufferedStreamBuf.h"
|
||||
#include "Poco/Net/HTTPBufferAllocator.h"
|
||||
#include "Poco/Net/Net.h"
|
||||
|
||||
|
||||
@ -27,9 +26,9 @@ namespace Poco
|
||||
{
|
||||
namespace Net
|
||||
{
|
||||
constexpr size_t HTTP_DEFAULT_BUFFER_SIZE = 8 * 1024;
|
||||
|
||||
|
||||
typedef Poco::BasicBufferedStreamBuf<char, std::char_traits<char>, HTTPBufferAllocator> HTTPBasicStreamBuf;
|
||||
typedef Poco::BasicBufferedStreamBuf<char, std::char_traits<char>> HTTPBasicStreamBuf;
|
||||
|
||||
|
||||
}
|
||||
|
@ -1,53 +0,0 @@
|
||||
//
|
||||
// HTTPBufferAllocator.h
|
||||
//
|
||||
// Library: Net
|
||||
// Package: HTTP
|
||||
// Module: HTTPBufferAllocator
|
||||
//
|
||||
// Definition of the HTTPBufferAllocator class.
|
||||
//
|
||||
// Copyright (c) 2005-2006, Applied Informatics Software Engineering GmbH.
|
||||
// and Contributors.
|
||||
//
|
||||
// SPDX-License-Identifier: BSL-1.0
|
||||
//
|
||||
|
||||
|
||||
#ifndef Net_HTTPBufferAllocator_INCLUDED
|
||||
#define Net_HTTPBufferAllocator_INCLUDED
|
||||
|
||||
|
||||
#include <ios>
|
||||
#include "Poco/MemoryPool.h"
|
||||
#include "Poco/Net/Net.h"
|
||||
|
||||
|
||||
namespace Poco
|
||||
{
|
||||
namespace Net
|
||||
{
|
||||
|
||||
|
||||
class Net_API HTTPBufferAllocator
|
||||
/// A BufferAllocator for HTTP streams.
|
||||
{
|
||||
public:
|
||||
static char * allocate(std::streamsize size);
|
||||
static void deallocate(char * ptr, std::streamsize size);
|
||||
|
||||
enum
|
||||
{
|
||||
BUFFER_SIZE = 128 * 1024
|
||||
};
|
||||
|
||||
private:
|
||||
static Poco::MemoryPool _pool;
|
||||
};
|
||||
|
||||
|
||||
}
|
||||
} // namespace Poco::Net
|
||||
|
||||
|
||||
#endif // Net_HTTPBufferAllocator_INCLUDED
|
@ -21,7 +21,6 @@
|
||||
#include <cstddef>
|
||||
#include <istream>
|
||||
#include <ostream>
|
||||
#include "Poco/MemoryPool.h"
|
||||
#include "Poco/Net/HTTPBasicStreamBuf.h"
|
||||
#include "Poco/Net/Net.h"
|
||||
|
||||
@ -80,12 +79,6 @@ namespace Net
|
||||
public:
|
||||
HTTPChunkedInputStream(HTTPSession & session);
|
||||
~HTTPChunkedInputStream();
|
||||
|
||||
void * operator new(std::size_t size);
|
||||
void operator delete(void * ptr);
|
||||
|
||||
private:
|
||||
static Poco::MemoryPool _pool;
|
||||
};
|
||||
|
||||
|
||||
@ -95,12 +88,6 @@ namespace Net
|
||||
public:
|
||||
HTTPChunkedOutputStream(HTTPSession & session);
|
||||
~HTTPChunkedOutputStream();
|
||||
|
||||
void * operator new(std::size_t size);
|
||||
void operator delete(void * ptr);
|
||||
|
||||
private:
|
||||
static Poco::MemoryPool _pool;
|
||||
};
|
||||
|
||||
|
||||
|
@ -78,12 +78,6 @@ namespace Net
|
||||
public:
|
||||
HTTPFixedLengthInputStream(HTTPSession & session, HTTPFixedLengthStreamBuf::ContentLength length);
|
||||
~HTTPFixedLengthInputStream();
|
||||
|
||||
void * operator new(std::size_t size);
|
||||
void operator delete(void * ptr);
|
||||
|
||||
private:
|
||||
static Poco::MemoryPool _pool;
|
||||
};
|
||||
|
||||
|
||||
@ -93,12 +87,6 @@ namespace Net
|
||||
public:
|
||||
HTTPFixedLengthOutputStream(HTTPSession & session, HTTPFixedLengthStreamBuf::ContentLength length);
|
||||
~HTTPFixedLengthOutputStream();
|
||||
|
||||
void * operator new(std::size_t size);
|
||||
void operator delete(void * ptr);
|
||||
|
||||
private:
|
||||
static Poco::MemoryPool _pool;
|
||||
};
|
||||
|
||||
|
||||
|
@ -21,7 +21,6 @@
|
||||
#include <cstddef>
|
||||
#include <istream>
|
||||
#include <ostream>
|
||||
#include "Poco/MemoryPool.h"
|
||||
#include "Poco/Net/HTTPBasicStreamBuf.h"
|
||||
#include "Poco/Net/Net.h"
|
||||
|
||||
@ -74,12 +73,6 @@ namespace Net
|
||||
public:
|
||||
HTTPHeaderInputStream(HTTPSession & session);
|
||||
~HTTPHeaderInputStream();
|
||||
|
||||
void * operator new(std::size_t size);
|
||||
void operator delete(void * ptr);
|
||||
|
||||
private:
|
||||
static Poco::MemoryPool _pool;
|
||||
};
|
||||
|
||||
|
||||
@ -89,12 +82,6 @@ namespace Net
|
||||
public:
|
||||
HTTPHeaderOutputStream(HTTPSession & session);
|
||||
~HTTPHeaderOutputStream();
|
||||
|
||||
void * operator new(std::size_t size);
|
||||
void operator delete(void * ptr);
|
||||
|
||||
private:
|
||||
static Poco::MemoryPool _pool;
|
||||
};
|
||||
|
||||
|
||||
|
@ -192,7 +192,7 @@ namespace Net
|
||||
HTTPSession & operator=(const HTTPSession &);
|
||||
|
||||
StreamSocket _socket;
|
||||
char * _pBuffer;
|
||||
std::unique_ptr<char[]> _pBuffer;
|
||||
char * _pCurrent;
|
||||
char * _pEnd;
|
||||
bool _keepAlive;
|
||||
|
@ -21,7 +21,6 @@
|
||||
#include <cstddef>
|
||||
#include <istream>
|
||||
#include <ostream>
|
||||
#include "Poco/MemoryPool.h"
|
||||
#include "Poco/Net/HTTPBasicStreamBuf.h"
|
||||
#include "Poco/Net/Net.h"
|
||||
|
||||
@ -75,12 +74,6 @@ namespace Net
|
||||
public:
|
||||
HTTPInputStream(HTTPSession & session);
|
||||
~HTTPInputStream();
|
||||
|
||||
void * operator new(std::size_t size);
|
||||
void operator delete(void * ptr);
|
||||
|
||||
private:
|
||||
static Poco::MemoryPool _pool;
|
||||
};
|
||||
|
||||
|
||||
@ -90,12 +83,6 @@ namespace Net
|
||||
public:
|
||||
HTTPOutputStream(HTTPSession & session);
|
||||
~HTTPOutputStream();
|
||||
|
||||
void * operator new(std::size_t size);
|
||||
void operator delete(void * ptr);
|
||||
|
||||
private:
|
||||
static Poco::MemoryPool _pool;
|
||||
};
|
||||
|
||||
|
||||
|
@ -1,44 +0,0 @@
|
||||
//
|
||||
// HTTPBufferAllocator.cpp
|
||||
//
|
||||
// Library: Net
|
||||
// Package: HTTP
|
||||
// Module: HTTPBufferAllocator
|
||||
//
|
||||
// Copyright (c) 2005-2006, Applied Informatics Software Engineering GmbH.
|
||||
// and Contributors.
|
||||
//
|
||||
// SPDX-License-Identifier: BSL-1.0
|
||||
//
|
||||
|
||||
|
||||
#include "Poco/Net/HTTPBufferAllocator.h"
|
||||
|
||||
|
||||
using Poco::MemoryPool;
|
||||
|
||||
|
||||
namespace Poco {
|
||||
namespace Net {
|
||||
|
||||
|
||||
MemoryPool HTTPBufferAllocator::_pool(HTTPBufferAllocator::BUFFER_SIZE, 16);
|
||||
|
||||
|
||||
char* HTTPBufferAllocator::allocate(std::streamsize size)
|
||||
{
|
||||
poco_assert_dbg (size == BUFFER_SIZE);
|
||||
|
||||
return reinterpret_cast<char*>(_pool.get());
|
||||
}
|
||||
|
||||
|
||||
void HTTPBufferAllocator::deallocate(char* ptr, std::streamsize size)
|
||||
{
|
||||
poco_assert_dbg (size == BUFFER_SIZE);
|
||||
|
||||
_pool.release(ptr);
|
||||
}
|
||||
|
||||
|
||||
} } // namespace Poco::Net
|
@ -34,7 +34,7 @@ namespace Net {
|
||||
|
||||
|
||||
HTTPChunkedStreamBuf::HTTPChunkedStreamBuf(HTTPSession& session, openmode mode):
|
||||
HTTPBasicStreamBuf(HTTPBufferAllocator::BUFFER_SIZE, mode),
|
||||
HTTPBasicStreamBuf(HTTP_DEFAULT_BUFFER_SIZE, mode),
|
||||
_session(session),
|
||||
_mode(mode),
|
||||
_chunk(0)
|
||||
@ -181,10 +181,6 @@ HTTPChunkedStreamBuf* HTTPChunkedIOS::rdbuf()
|
||||
// HTTPChunkedInputStream
|
||||
//
|
||||
|
||||
|
||||
Poco::MemoryPool HTTPChunkedInputStream::_pool(sizeof(HTTPChunkedInputStream));
|
||||
|
||||
|
||||
HTTPChunkedInputStream::HTTPChunkedInputStream(HTTPSession& session):
|
||||
HTTPChunkedIOS(session, std::ios::in),
|
||||
std::istream(&_buf)
|
||||
@ -196,34 +192,10 @@ HTTPChunkedInputStream::~HTTPChunkedInputStream()
|
||||
{
|
||||
}
|
||||
|
||||
|
||||
void* HTTPChunkedInputStream::operator new(std::size_t size)
|
||||
{
|
||||
return _pool.get();
|
||||
}
|
||||
|
||||
|
||||
void HTTPChunkedInputStream::operator delete(void* ptr)
|
||||
{
|
||||
try
|
||||
{
|
||||
_pool.release(ptr);
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
poco_unexpected();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
//
|
||||
// HTTPChunkedOutputStream
|
||||
//
|
||||
|
||||
|
||||
Poco::MemoryPool HTTPChunkedOutputStream::_pool(sizeof(HTTPChunkedOutputStream));
|
||||
|
||||
|
||||
HTTPChunkedOutputStream::HTTPChunkedOutputStream(HTTPSession& session):
|
||||
HTTPChunkedIOS(session, std::ios::out),
|
||||
std::ostream(&_buf)
|
||||
@ -235,24 +207,4 @@ HTTPChunkedOutputStream::~HTTPChunkedOutputStream()
|
||||
{
|
||||
}
|
||||
|
||||
|
||||
void* HTTPChunkedOutputStream::operator new(std::size_t size)
|
||||
{
|
||||
return _pool.get();
|
||||
}
|
||||
|
||||
|
||||
void HTTPChunkedOutputStream::operator delete(void* ptr)
|
||||
{
|
||||
try
|
||||
{
|
||||
_pool.release(ptr);
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
poco_unexpected();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
} } // namespace Poco::Net
|
||||
|
@ -30,7 +30,7 @@ namespace Net {
|
||||
|
||||
|
||||
HTTPFixedLengthStreamBuf::HTTPFixedLengthStreamBuf(HTTPSession& session, ContentLength length, openmode mode):
|
||||
HTTPBasicStreamBuf(HTTPBufferAllocator::BUFFER_SIZE, mode),
|
||||
HTTPBasicStreamBuf(HTTP_DEFAULT_BUFFER_SIZE, mode),
|
||||
_session(session),
|
||||
_length(length),
|
||||
_count(0)
|
||||
@ -109,9 +109,6 @@ HTTPFixedLengthStreamBuf* HTTPFixedLengthIOS::rdbuf()
|
||||
//
|
||||
|
||||
|
||||
Poco::MemoryPool HTTPFixedLengthInputStream::_pool(sizeof(HTTPFixedLengthInputStream));
|
||||
|
||||
|
||||
HTTPFixedLengthInputStream::HTTPFixedLengthInputStream(HTTPSession& session, HTTPFixedLengthStreamBuf::ContentLength length):
|
||||
HTTPFixedLengthIOS(session, length, std::ios::in),
|
||||
std::istream(&_buf)
|
||||
@ -124,33 +121,10 @@ HTTPFixedLengthInputStream::~HTTPFixedLengthInputStream()
|
||||
}
|
||||
|
||||
|
||||
void* HTTPFixedLengthInputStream::operator new(std::size_t size)
|
||||
{
|
||||
return _pool.get();
|
||||
}
|
||||
|
||||
|
||||
void HTTPFixedLengthInputStream::operator delete(void* ptr)
|
||||
{
|
||||
try
|
||||
{
|
||||
_pool.release(ptr);
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
poco_unexpected();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
//
|
||||
// HTTPFixedLengthOutputStream
|
||||
//
|
||||
|
||||
|
||||
Poco::MemoryPool HTTPFixedLengthOutputStream::_pool(sizeof(HTTPFixedLengthOutputStream));
|
||||
|
||||
|
||||
HTTPFixedLengthOutputStream::HTTPFixedLengthOutputStream(HTTPSession& session, HTTPFixedLengthStreamBuf::ContentLength length):
|
||||
HTTPFixedLengthIOS(session, length, std::ios::out),
|
||||
std::ostream(&_buf)
|
||||
@ -163,23 +137,4 @@ HTTPFixedLengthOutputStream::~HTTPFixedLengthOutputStream()
|
||||
}
|
||||
|
||||
|
||||
void* HTTPFixedLengthOutputStream::operator new(std::size_t size)
|
||||
{
|
||||
return _pool.get();
|
||||
}
|
||||
|
||||
|
||||
void HTTPFixedLengthOutputStream::operator delete(void* ptr)
|
||||
{
|
||||
try
|
||||
{
|
||||
_pool.release(ptr);
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
poco_unexpected();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
} } // namespace Poco::Net
|
||||
|
@ -26,7 +26,7 @@ namespace Net {
|
||||
|
||||
|
||||
HTTPHeaderStreamBuf::HTTPHeaderStreamBuf(HTTPSession& session, openmode mode):
|
||||
HTTPBasicStreamBuf(HTTPBufferAllocator::BUFFER_SIZE, mode),
|
||||
HTTPBasicStreamBuf(HTTP_DEFAULT_BUFFER_SIZE, mode),
|
||||
_session(session),
|
||||
_end(false)
|
||||
{
|
||||
@ -101,10 +101,6 @@ HTTPHeaderStreamBuf* HTTPHeaderIOS::rdbuf()
|
||||
// HTTPHeaderInputStream
|
||||
//
|
||||
|
||||
|
||||
Poco::MemoryPool HTTPHeaderInputStream::_pool(sizeof(HTTPHeaderInputStream));
|
||||
|
||||
|
||||
HTTPHeaderInputStream::HTTPHeaderInputStream(HTTPSession& session):
|
||||
HTTPHeaderIOS(session, std::ios::in),
|
||||
std::istream(&_buf)
|
||||
@ -116,34 +112,10 @@ HTTPHeaderInputStream::~HTTPHeaderInputStream()
|
||||
{
|
||||
}
|
||||
|
||||
|
||||
void* HTTPHeaderInputStream::operator new(std::size_t size)
|
||||
{
|
||||
return _pool.get();
|
||||
}
|
||||
|
||||
|
||||
void HTTPHeaderInputStream::operator delete(void* ptr)
|
||||
{
|
||||
try
|
||||
{
|
||||
_pool.release(ptr);
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
poco_unexpected();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
//
|
||||
// HTTPHeaderOutputStream
|
||||
//
|
||||
|
||||
|
||||
Poco::MemoryPool HTTPHeaderOutputStream::_pool(sizeof(HTTPHeaderOutputStream));
|
||||
|
||||
|
||||
HTTPHeaderOutputStream::HTTPHeaderOutputStream(HTTPSession& session):
|
||||
HTTPHeaderIOS(session, std::ios::out),
|
||||
std::ostream(&_buf)
|
||||
@ -155,24 +127,4 @@ HTTPHeaderOutputStream::~HTTPHeaderOutputStream()
|
||||
{
|
||||
}
|
||||
|
||||
|
||||
void* HTTPHeaderOutputStream::operator new(std::size_t size)
|
||||
{
|
||||
return _pool.get();
|
||||
}
|
||||
|
||||
|
||||
void HTTPHeaderOutputStream::operator delete(void* ptr)
|
||||
{
|
||||
try
|
||||
{
|
||||
_pool.release(ptr);
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
poco_unexpected();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
} } // namespace Poco::Net
|
||||
|
@ -13,8 +13,8 @@
|
||||
|
||||
|
||||
#include "Poco/Net/HTTPSession.h"
|
||||
#include "Poco/Net/HTTPBufferAllocator.h"
|
||||
#include "Poco/Net/NetException.h"
|
||||
#include "Poco/Net/HTTPBasicStreamBuf.h"
|
||||
#include <cstring>
|
||||
|
||||
|
||||
@ -68,14 +68,6 @@ HTTPSession::HTTPSession(const StreamSocket& socket, bool keepAlive):
|
||||
|
||||
HTTPSession::~HTTPSession()
|
||||
{
|
||||
try
|
||||
{
|
||||
if (_pBuffer) HTTPBufferAllocator::deallocate(_pBuffer, HTTPBufferAllocator::BUFFER_SIZE);
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
poco_unexpected();
|
||||
}
|
||||
try
|
||||
{
|
||||
close();
|
||||
@ -177,10 +169,10 @@ void HTTPSession::refill()
|
||||
{
|
||||
if (!_pBuffer)
|
||||
{
|
||||
_pBuffer = HTTPBufferAllocator::allocate(HTTPBufferAllocator::BUFFER_SIZE);
|
||||
_pBuffer = std::make_unique<char[]>(HTTP_DEFAULT_BUFFER_SIZE);
|
||||
}
|
||||
_pCurrent = _pEnd = _pBuffer;
|
||||
int n = receive(_pBuffer, HTTPBufferAllocator::BUFFER_SIZE);
|
||||
_pCurrent = _pEnd = _pBuffer.get();
|
||||
int n = receive(_pBuffer.get(), HTTP_DEFAULT_BUFFER_SIZE);
|
||||
_pEnd += n;
|
||||
}
|
||||
|
||||
@ -199,7 +191,7 @@ void HTTPSession::connect(const SocketAddress& address)
|
||||
_socket.setNoDelay(true);
|
||||
// There may be leftover data from a previous (failed) request in the buffer,
|
||||
// so we clear it.
|
||||
_pCurrent = _pEnd = _pBuffer;
|
||||
_pCurrent = _pEnd = _pBuffer.get();
|
||||
}
|
||||
|
||||
|
||||
|
@ -26,7 +26,7 @@ namespace Net {
|
||||
|
||||
|
||||
HTTPStreamBuf::HTTPStreamBuf(HTTPSession& session, openmode mode):
|
||||
HTTPBasicStreamBuf(HTTPBufferAllocator::BUFFER_SIZE, mode),
|
||||
HTTPBasicStreamBuf(HTTP_DEFAULT_BUFFER_SIZE, mode),
|
||||
_session(session),
|
||||
_mode(mode)
|
||||
{
|
||||
@ -96,10 +96,6 @@ HTTPStreamBuf* HTTPIOS::rdbuf()
|
||||
// HTTPInputStream
|
||||
//
|
||||
|
||||
|
||||
Poco::MemoryPool HTTPInputStream::_pool(sizeof(HTTPInputStream));
|
||||
|
||||
|
||||
HTTPInputStream::HTTPInputStream(HTTPSession& session):
|
||||
HTTPIOS(session, std::ios::in),
|
||||
std::istream(&_buf)
|
||||
@ -112,33 +108,11 @@ HTTPInputStream::~HTTPInputStream()
|
||||
}
|
||||
|
||||
|
||||
void* HTTPInputStream::operator new(std::size_t size)
|
||||
{
|
||||
return _pool.get();
|
||||
}
|
||||
|
||||
|
||||
void HTTPInputStream::operator delete(void* ptr)
|
||||
{
|
||||
try
|
||||
{
|
||||
_pool.release(ptr);
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
poco_unexpected();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
//
|
||||
// HTTPOutputStream
|
||||
//
|
||||
|
||||
|
||||
Poco::MemoryPool HTTPOutputStream::_pool(sizeof(HTTPOutputStream));
|
||||
|
||||
|
||||
HTTPOutputStream::HTTPOutputStream(HTTPSession& session):
|
||||
HTTPIOS(session, std::ios::out),
|
||||
std::ostream(&_buf)
|
||||
@ -150,24 +124,4 @@ HTTPOutputStream::~HTTPOutputStream()
|
||||
{
|
||||
}
|
||||
|
||||
|
||||
void* HTTPOutputStream::operator new(std::size_t size)
|
||||
{
|
||||
return _pool.get();
|
||||
}
|
||||
|
||||
|
||||
void HTTPOutputStream::operator delete(void* ptr)
|
||||
{
|
||||
try
|
||||
{
|
||||
_pool.release(ptr);
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
poco_unexpected();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
} } // namespace Poco::Net
|
||||
|
2
contrib/libhdfs3
vendored
2
contrib/libhdfs3
vendored
@ -1 +1 @@
|
||||
Subproject commit 164b89253fad7991bce77882f01b51ab81d19f3d
|
||||
Subproject commit 377220ef351ae24994a5fcd2b5fa3930d00c4db0
|
@ -18,7 +18,7 @@ RUN apt-get update && env DEBIAN_FRONTEND=noninteractive apt-get install --yes \
|
||||
python3-pip \
|
||||
shellcheck \
|
||||
yamllint \
|
||||
&& pip3 install black==23.1.0 boto3 codespell==2.2.1 dohq-artifactory mypy PyGithub unidiff pylint==2.6.2 \
|
||||
&& pip3 install black==23.1.0 boto3 codespell==2.2.1 mypy==1.3.0 PyGithub unidiff pylint==2.6.2 \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /root/.cache/pip
|
||||
|
||||
|
29
docs/_description_templates/template-data-type.md
Normal file
29
docs/_description_templates/template-data-type.md
Normal file
@ -0,0 +1,29 @@
|
||||
---
|
||||
toc_priority:
|
||||
toc_title:
|
||||
---
|
||||
|
||||
# data_type_name {#data_type-name}
|
||||
|
||||
Description.
|
||||
|
||||
**Parameters** (Optional)
|
||||
|
||||
- `x` — Description. [Type name](relative/path/to/type/dscr.md#type).
|
||||
- `y` — Description. [Type name](relative/path/to/type/dscr.md#type).
|
||||
|
||||
**Examples**
|
||||
|
||||
```sql
|
||||
|
||||
```
|
||||
|
||||
## Additional Info {#additional-info} (Optional)
|
||||
|
||||
The name of an additional section can be any, for example, **Usage**.
|
||||
|
||||
**See Also** (Optional)
|
||||
|
||||
- [link](#)
|
||||
|
||||
[Original article](https://clickhouse.com/docs/en/data-types/<data-type-name>/) <!--hide-->
|
63
docs/_description_templates/template-engine.md
Normal file
63
docs/_description_templates/template-engine.md
Normal file
@ -0,0 +1,63 @@
|
||||
# EngineName {#enginename}
|
||||
|
||||
- What the Database/Table engine does.
|
||||
- Relations with other engines if they exist.
|
||||
|
||||
## Creating a Database {#creating-a-database}
|
||||
``` sql
|
||||
CREATE DATABASE ...
|
||||
```
|
||||
or
|
||||
|
||||
## Creating a Table {#creating-a-table}
|
||||
``` sql
|
||||
CREATE TABLE ...
|
||||
```
|
||||
|
||||
**Engine Parameters**
|
||||
|
||||
**Query Clauses** (for Table engines only)
|
||||
|
||||
## Virtual columns {#virtual-columns} (for Table engines only)
|
||||
|
||||
List and virtual columns with description, if they exist.
|
||||
|
||||
## Data Types Support {#data_types-support} (for Database engines only)
|
||||
|
||||
| EngineName | ClickHouse |
|
||||
|-----------------------|------------------------------------|
|
||||
| NativeDataTypeName | [ClickHouseDataTypeName](link#) |
|
||||
|
||||
|
||||
## Specifics and recommendations {#specifics-and-recommendations}
|
||||
|
||||
Algorithms
|
||||
Specifics of read and write processes
|
||||
Examples of tasks
|
||||
Recommendations for usage
|
||||
Specifics of data storage
|
||||
|
||||
## Usage Example {#usage-example}
|
||||
|
||||
The example must show usage and use cases. The following text contains the recommended parts of this section.
|
||||
|
||||
Input table:
|
||||
|
||||
``` text
|
||||
```
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
```
|
||||
|
||||
Follow up with any text to clarify the example.
|
||||
|
||||
**See Also**
|
||||
|
||||
- [link](#)
|
51
docs/_description_templates/template-function.md
Normal file
51
docs/_description_templates/template-function.md
Normal file
@ -0,0 +1,51 @@
|
||||
## functionName {#functionname-in-lower-case}
|
||||
|
||||
Short description.
|
||||
|
||||
**Syntax** (without SELECT)
|
||||
|
||||
``` sql
|
||||
<function syntax>
|
||||
```
|
||||
|
||||
Alias: `<alias name>`. (Optional)
|
||||
|
||||
More text (Optional).
|
||||
|
||||
**Arguments** (Optional)
|
||||
|
||||
- `x` — Description. Optional (only for optional arguments). Possible values: <values list>. Default value: <value>. [Type name](relative/path/to/type/dscr.md#type).
|
||||
- `y` — Description. Optional (only for optional arguments). Possible values: <values list>.Default value: <value>. [Type name](relative/path/to/type/dscr.md#type).
|
||||
|
||||
**Parameters** (Optional, only for parametric aggregate functions)
|
||||
|
||||
- `z` — Description. Optional (only for optional parameters). Possible values: <values list>. Default value: <value>. [Type name](relative/path/to/type/dscr.md#type).
|
||||
|
||||
**Returned value(s)**
|
||||
|
||||
- Returned values list.
|
||||
|
||||
Type: [Type name](relative/path/to/type/dscr.md#type).
|
||||
|
||||
**Example**
|
||||
|
||||
The example must show usage and/or a use cases. The following text contains recommended parts of an example.
|
||||
|
||||
Input table (Optional):
|
||||
|
||||
``` text
|
||||
```
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
```
|
||||
|
||||
**See Also** (Optional)
|
||||
|
||||
- [link](#)
|
33
docs/_description_templates/template-server-setting.md
Normal file
33
docs/_description_templates/template-server-setting.md
Normal file
@ -0,0 +1,33 @@
|
||||
## server_setting_name {#server_setting_name}
|
||||
|
||||
Description.
|
||||
|
||||
Describe what is configured in this section of settings.
|
||||
|
||||
Possible value: ...
|
||||
|
||||
Default value: ...
|
||||
|
||||
**Settings** (Optional)
|
||||
|
||||
If the section contains several settings, list them here. Specify possible values and default values:
|
||||
|
||||
- setting_1 — Description.
|
||||
- setting_2 — Description.
|
||||
|
||||
**Example**
|
||||
|
||||
```xml
|
||||
<server_setting_name>
|
||||
<setting_1> ... </setting_1>
|
||||
<setting_2> ... </setting_2>
|
||||
</server_setting_name>
|
||||
```
|
||||
|
||||
**Additional Info** (Optional)
|
||||
|
||||
The name of an additional section can be any, for example, **Usage**.
|
||||
|
||||
**See Also** (Optional)
|
||||
|
||||
- [link](#)
|
27
docs/_description_templates/template-setting.md
Normal file
27
docs/_description_templates/template-setting.md
Normal file
@ -0,0 +1,27 @@
|
||||
## setting_name {#setting_name}
|
||||
|
||||
Description.
|
||||
|
||||
For the switch setting, use the typical phrase: “Enables or disables something …”.
|
||||
|
||||
Possible values:
|
||||
|
||||
*For switcher setting:*
|
||||
|
||||
- 0 — Disabled.
|
||||
- 1 — Enabled.
|
||||
|
||||
*For another setting (typical phrases):*
|
||||
|
||||
- Positive integer.
|
||||
- 0 — Disabled or unlimited or something else.
|
||||
|
||||
Default value: `value`.
|
||||
|
||||
**Additional Info** (Optional)
|
||||
|
||||
The name of an additional section can be any, for example, **Usage**.
|
||||
|
||||
**See Also** (Optional)
|
||||
|
||||
- [link](#)
|
24
docs/_description_templates/template-statement.md
Normal file
24
docs/_description_templates/template-statement.md
Normal file
@ -0,0 +1,24 @@
|
||||
# Statement name (for example, SHOW USER) {#statement-name-in-lower-case}
|
||||
|
||||
Brief description of what the statement does.
|
||||
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
Syntax of the statement.
|
||||
```
|
||||
|
||||
## Other necessary sections of the description (Optional) {#anchor}
|
||||
|
||||
Examples of descriptions with a complicated structure:
|
||||
|
||||
- https://clickhouse.com/docs/en/sql-reference/statements/grant/
|
||||
- https://clickhouse.com/docs/en/sql-reference/statements/revoke/
|
||||
- https://clickhouse.com/docs/en/sql-reference/statements/select/join/
|
||||
|
||||
|
||||
**See Also** (Optional)
|
||||
|
||||
Links to related topics as a list.
|
||||
|
||||
- [link](#)
|
25
docs/_description_templates/template-system-table.md
Normal file
25
docs/_description_templates/template-system-table.md
Normal file
@ -0,0 +1,25 @@
|
||||
# system.table_name {#system-tables_table-name}
|
||||
|
||||
Description.
|
||||
|
||||
Columns:
|
||||
|
||||
- `column_name` ([data_type_name](path/to/data_type.md)) — Description.
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
SELECT * FROM system.table_name
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
Some output. It shouldn't be too long.
|
||||
```
|
||||
|
||||
**See Also**
|
||||
|
||||
- [Article name](path/to/article_name.md) — Some words about referenced information.
|
@ -378,6 +378,10 @@ request](https://github.com/ClickHouse/ClickHouse/commits/master) and find CI ch
|
||||
https://s3.amazonaws.com/clickhouse/builds/PRs/.../.../binary_aarch64_v80compat/clickhouse". You can then click the link to download the
|
||||
build.
|
||||
|
||||
### macOS-only: Install with Homebrew
|
||||
|
||||
To install ClickHouse using the popular `brew` package manager, follow the instructions listed in the [ClickHouse Homebrew tap](https://github.com/ClickHouse/homebrew-clickhouse).
|
||||
|
||||
## Launch {#launch}
|
||||
|
||||
To start the server as a daemon, run:
|
||||
|
@ -13,6 +13,7 @@ System tables provide information about:
|
||||
|
||||
- Server states, processes, and environment.
|
||||
- Server’s internal processes.
|
||||
- Options used when the ClickHouse binary was built.
|
||||
|
||||
System tables:
|
||||
|
||||
|
@ -283,7 +283,7 @@ The optional keyword `EXTENDED` currently has no effect, it only exists for MySQ
|
||||
|
||||
`SHOW INDEX` produces a result table with the following structure:
|
||||
- table - The name of the table (String)
|
||||
- non_unique - 0 if the index can contain duplicates, 1 otherwise (UInt8)
|
||||
- non_unique - 0 if the index cannot contain duplicates, 1 otherwise (UInt8)
|
||||
- key_name - The name of the index, `PRIMARY` if the index is a primary key index (String)
|
||||
- seq_in_index - Currently unused
|
||||
- column_name - Currently unused
|
||||
|
360
rust/skim/Cargo.lock
generated
360
rust/skim/Cargo.lock
generated
@ -14,13 +14,19 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "aho-corasick"
|
||||
version = "0.7.20"
|
||||
version = "1.0.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "cc936419f96fa211c1b9166887b38e5e40b19958e5b895be7c1f93adec7071ac"
|
||||
checksum = "43f6cb1bf222025340178f382c426f13757b2960e89779dfcb319c32542a5a41"
|
||||
dependencies = [
|
||||
"memchr",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "android-tzdata"
|
||||
version = "0.1.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "e999941b234f3131b00bc13c22d06e8c5ff726d1b6318ac7eb276997bbb4fef0"
|
||||
|
||||
[[package]]
|
||||
name = "android_system_properties"
|
||||
version = "0.1.5"
|
||||
@ -32,9 +38,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "arrayvec"
|
||||
version = "0.7.2"
|
||||
version = "0.7.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8da52d66c7071e2e3fa2a1e5c6d088fec47b593032b254f5e980de8ea54454d6"
|
||||
checksum = "96d30a06541fbafbc7f82ed10c06164cfbd2c401138f6addd8404629c4b16711"
|
||||
|
||||
[[package]]
|
||||
name = "atty"
|
||||
@ -42,7 +48,7 @@ version = "0.2.14"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8"
|
||||
dependencies = [
|
||||
"hermit-abi",
|
||||
"hermit-abi 0.1.19",
|
||||
"libc",
|
||||
"winapi",
|
||||
]
|
||||
@ -67,15 +73,15 @@ checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a"
|
||||
|
||||
[[package]]
|
||||
name = "bumpalo"
|
||||
version = "3.11.1"
|
||||
version = "3.13.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "572f695136211188308f16ad2ca5c851a712c464060ae6974944458eb83880ba"
|
||||
checksum = "a3e2c3daef883ecc1b5d58c15adae93470a91d425f3532ba1695849656af3fc1"
|
||||
|
||||
[[package]]
|
||||
name = "cc"
|
||||
version = "1.0.77"
|
||||
version = "1.0.79"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "e9f73505338f7d905b19d18738976aae232eb46b8efc15554ffc56deb5d9ebe4"
|
||||
checksum = "50d30906286121d95be3d479533b458f87493b30a4b5f79a607db8f5d11aa91f"
|
||||
|
||||
[[package]]
|
||||
name = "cfg-if"
|
||||
@ -85,13 +91,13 @@ checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd"
|
||||
|
||||
[[package]]
|
||||
name = "chrono"
|
||||
version = "0.4.23"
|
||||
version = "0.4.26"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "16b0a3d9ed01224b22057780a37bb8c5dbfe1be8ba48678e7bf57ec4b385411f"
|
||||
checksum = "ec837a71355b28f6556dbd569b37b3f363091c0bd4b2e735674521b4c5fd9bc5"
|
||||
dependencies = [
|
||||
"android-tzdata",
|
||||
"iana-time-zone",
|
||||
"js-sys",
|
||||
"num-integer",
|
||||
"num-traits",
|
||||
"time 0.1.45",
|
||||
"wasm-bindgen",
|
||||
@ -100,9 +106,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "clap"
|
||||
version = "3.2.23"
|
||||
version = "3.2.25"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "71655c45cb9845d3270c9d6df84ebe72b4dad3c2ba3f7023ad47c144e4e473a5"
|
||||
checksum = "4ea181bf566f71cb9a5d17a59e1871af638180a18fb0035c92ae62b705207123"
|
||||
dependencies = [
|
||||
"atty",
|
||||
"bitflags",
|
||||
@ -135,9 +141,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "core-foundation-sys"
|
||||
version = "0.8.3"
|
||||
version = "0.8.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "5827cebf4670468b8772dd191856768aedcb1b0278a04f989f7766351917b9dc"
|
||||
checksum = "e496a50fda8aacccc86d7529e2c1e0892dbd0f898a6b5645b5561b89c3210efa"
|
||||
|
||||
[[package]]
|
||||
name = "crossbeam"
|
||||
@ -155,9 +161,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "crossbeam-channel"
|
||||
version = "0.5.6"
|
||||
version = "0.5.8"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c2dd04ddaf88237dc3b8d8f9a3c1004b506b54b3313403944054d23c0870c521"
|
||||
checksum = "a33c2bf77f2df06183c3aa30d1e96c0695a313d4f9c453cc3762a6db39f99200"
|
||||
dependencies = [
|
||||
"cfg-if",
|
||||
"crossbeam-utils",
|
||||
@ -165,9 +171,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "crossbeam-deque"
|
||||
version = "0.8.2"
|
||||
version = "0.8.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "715e8152b692bba2d374b53d4875445368fdf21a94751410af607a5ac677d1fc"
|
||||
checksum = "ce6fd6f855243022dcecf8702fef0c297d4338e226845fe067f6341ad9fa0cef"
|
||||
dependencies = [
|
||||
"cfg-if",
|
||||
"crossbeam-epoch",
|
||||
@ -176,14 +182,14 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "crossbeam-epoch"
|
||||
version = "0.9.13"
|
||||
version = "0.9.15"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "01a9af1f4c2ef74bb8aa1f7e19706bc72d03598c8a570bb5de72243c7a9d9d5a"
|
||||
checksum = "ae211234986c545741a7dc064309f67ee1e5ad243d0e48335adc0484d960bcc7"
|
||||
dependencies = [
|
||||
"autocfg",
|
||||
"cfg-if",
|
||||
"crossbeam-utils",
|
||||
"memoffset 0.7.1",
|
||||
"memoffset 0.9.0",
|
||||
"scopeguard",
|
||||
]
|
||||
|
||||
@ -199,18 +205,18 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "crossbeam-utils"
|
||||
version = "0.8.14"
|
||||
version = "0.8.16"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "4fb766fa798726286dbbb842f174001dab8abc7b627a1dd86e0b7222a95d929f"
|
||||
checksum = "5a22b2d63d4d1dc0b7f1b6b2747dd0088008a9be28b6ddf0b1e7d335e3037294"
|
||||
dependencies = [
|
||||
"cfg-if",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "cxx"
|
||||
version = "1.0.83"
|
||||
version = "1.0.97"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "bdf07d07d6531bfcdbe9b8b739b104610c6508dcc4d63b410585faf338241daf"
|
||||
checksum = "e88abab2f5abbe4c56e8f1fb431b784d710b709888f35755a160e62e33fe38e8"
|
||||
dependencies = [
|
||||
"cc",
|
||||
"cxxbridge-flags",
|
||||
@ -220,9 +226,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "cxx-build"
|
||||
version = "1.0.83"
|
||||
version = "1.0.97"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d2eb5b96ecdc99f72657332953d4d9c50135af1bac34277801cc3937906ebd39"
|
||||
checksum = "5c0c11acd0e63bae27dcd2afced407063312771212b7a823b4fd72d633be30fb"
|
||||
dependencies = [
|
||||
"cc",
|
||||
"codespan-reporting",
|
||||
@ -230,31 +236,31 @@ dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"scratch",
|
||||
"syn",
|
||||
"syn 2.0.23",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "cxxbridge-flags"
|
||||
version = "1.0.83"
|
||||
version = "1.0.97"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ac040a39517fd1674e0f32177648334b0f4074625b5588a64519804ba0553b12"
|
||||
checksum = "8d3816ed957c008ccd4728485511e3d9aaf7db419aa321e3d2c5a2f3411e36c8"
|
||||
|
||||
[[package]]
|
||||
name = "cxxbridge-macro"
|
||||
version = "1.0.83"
|
||||
version = "1.0.97"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "1362b0ddcfc4eb0a1f57b68bd77dd99f0e826958a96abd0ae9bd092e114ffed6"
|
||||
checksum = "a26acccf6f445af85ea056362561a24ef56cdc15fcc685f03aec50b9c702cb6d"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn",
|
||||
"syn 2.0.23",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "darling"
|
||||
version = "0.14.2"
|
||||
version = "0.14.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "b0dd3cd20dc6b5a876612a6e5accfe7f3dd883db6d07acfbf14c128f61550dfa"
|
||||
checksum = "7b750cb3417fd1b327431a470f388520309479ab0bf5e323505daf0290cd3850"
|
||||
dependencies = [
|
||||
"darling_core",
|
||||
"darling_macro",
|
||||
@ -262,27 +268,27 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "darling_core"
|
||||
version = "0.14.2"
|
||||
version = "0.14.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "a784d2ccaf7c98501746bf0be29b2022ba41fd62a2e622af997a03e9f972859f"
|
||||
checksum = "109c1ca6e6b7f82cc233a97004ea8ed7ca123a9af07a8230878fcfda9b158bf0"
|
||||
dependencies = [
|
||||
"fnv",
|
||||
"ident_case",
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"strsim",
|
||||
"syn",
|
||||
"syn 1.0.109",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "darling_macro"
|
||||
version = "0.14.2"
|
||||
version = "0.14.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "7618812407e9402654622dd402b0a89dff9ba93badd6540781526117b92aab7e"
|
||||
checksum = "a4aab4dbc9f7611d8b55048a3a16d2d010c2c8334e46304b40ac1cc14bf3b48e"
|
||||
dependencies = [
|
||||
"darling_core",
|
||||
"quote",
|
||||
"syn",
|
||||
"syn 1.0.109",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@ -313,7 +319,7 @@ dependencies = [
|
||||
"darling",
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn",
|
||||
"syn 1.0.109",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@ -323,7 +329,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8f0314b72bed045f3a68671b3c86328386762c93f82d98c65c3cb5e5f573dd68"
|
||||
dependencies = [
|
||||
"derive_builder_core",
|
||||
"syn",
|
||||
"syn 1.0.109",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@ -349,9 +355,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "either"
|
||||
version = "1.8.0"
|
||||
version = "1.8.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "90e5c1c8368803113bf0c9584fc495a58b86dc8a29edbf8fe877d21d9507e797"
|
||||
checksum = "7fcaabb2fef8c910e7f4c7ce9f67a1283a1715879a7c230ca9d6d1ae31f16d91"
|
||||
|
||||
[[package]]
|
||||
name = "env_logger"
|
||||
@ -383,9 +389,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "getrandom"
|
||||
version = "0.2.8"
|
||||
version = "0.2.10"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c05aeb6a22b8f62540c194aac980f2115af067bfe15a0734d7277a768d396b31"
|
||||
checksum = "be4136b2a15dd319360be1c07d9933517ccf0be8f16bf62a3bee4f0d618df427"
|
||||
dependencies = [
|
||||
"cfg-if",
|
||||
"libc",
|
||||
@ -407,6 +413,12 @@ dependencies = [
|
||||
"libc",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "hermit-abi"
|
||||
version = "0.3.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "fed44880c466736ef9a5c5b5facefb5ed0785676d0c02d612db14e54f0d84286"
|
||||
|
||||
[[package]]
|
||||
name = "humantime"
|
||||
version = "2.1.0"
|
||||
@ -415,26 +427,25 @@ checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4"
|
||||
|
||||
[[package]]
|
||||
name = "iana-time-zone"
|
||||
version = "0.1.53"
|
||||
version = "0.1.57"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "64c122667b287044802d6ce17ee2ddf13207ed924c712de9a66a5814d5b64765"
|
||||
checksum = "2fad5b825842d2b38bd206f3e81d6957625fd7f0a361e345c30e01a0ae2dd613"
|
||||
dependencies = [
|
||||
"android_system_properties",
|
||||
"core-foundation-sys",
|
||||
"iana-time-zone-haiku",
|
||||
"js-sys",
|
||||
"wasm-bindgen",
|
||||
"winapi",
|
||||
"windows",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "iana-time-zone-haiku"
|
||||
version = "0.1.1"
|
||||
version = "0.1.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "0703ae284fc167426161c2e3f1da3ea71d94b21bedbcc9494e92b28e334e3dca"
|
||||
checksum = "f31827a206f56af32e590ba56d5d2d085f558508192593743f16b2306495269f"
|
||||
dependencies = [
|
||||
"cxx",
|
||||
"cxx-build",
|
||||
"cc",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@ -445,9 +456,9 @@ checksum = "b9e0384b61958566e926dc50660321d12159025e767c18e043daf26b70104c39"
|
||||
|
||||
[[package]]
|
||||
name = "indexmap"
|
||||
version = "1.9.2"
|
||||
version = "1.9.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "1885e79c1fc4b10f0e172c475f458b7f7b93061064d98c3293e98c5ba0c8b399"
|
||||
checksum = "bd070e393353796e801d209ad339e89596eb4c8d430d18ede6a1cced8fafbd99"
|
||||
dependencies = [
|
||||
"autocfg",
|
||||
"hashbrown",
|
||||
@ -455,9 +466,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "js-sys"
|
||||
version = "0.3.60"
|
||||
version = "0.3.64"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "49409df3e3bf0856b916e2ceaca09ee28e6871cf7d9ce97a692cacfdb2a25a47"
|
||||
checksum = "c5f195fe497f702db0f318b07fdd68edb16955aed830df8363d837542f8f935a"
|
||||
dependencies = [
|
||||
"wasm-bindgen",
|
||||
]
|
||||
@ -470,27 +481,24 @@ checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646"
|
||||
|
||||
[[package]]
|
||||
name = "libc"
|
||||
version = "0.2.138"
|
||||
version = "0.2.147"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "db6d7e329c562c5dfab7a46a2afabc8b987ab9a4834c9d1ca04dc54c1546cef8"
|
||||
checksum = "b4668fb0ea861c1df094127ac5f1da3409a82116a4ba74fca2e58ef927159bb3"
|
||||
|
||||
[[package]]
|
||||
name = "link-cplusplus"
|
||||
version = "1.0.7"
|
||||
version = "1.0.8"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "9272ab7b96c9046fbc5bc56c06c117cb639fe2d509df0c421cad82d2915cf369"
|
||||
checksum = "ecd207c9c713c34f95a097a5b029ac2ce6010530c7b49d7fea24d977dede04f5"
|
||||
dependencies = [
|
||||
"cc",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "log"
|
||||
version = "0.4.17"
|
||||
version = "0.4.19"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "abb12e687cfb44aa40f41fc3978ef76448f9b6038cad6aef4259d3c095a2382e"
|
||||
dependencies = [
|
||||
"cfg-if",
|
||||
]
|
||||
checksum = "b06a4cde4c0f271a446782e3eff8de789548ce57dbc8eca9292c27f4a42004b4"
|
||||
|
||||
[[package]]
|
||||
name = "memchr"
|
||||
@ -509,9 +517,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "memoffset"
|
||||
version = "0.7.1"
|
||||
version = "0.9.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "5de893c32cde5f383baa4c04c5d6dbdd735cfd4a794b0debdb2bb1b421da5ff4"
|
||||
checksum = "5a634b1c61a95585bd15607c6ab0c4e5b226e695ff2800ba0cdccddf208c406c"
|
||||
dependencies = [
|
||||
"autocfg",
|
||||
]
|
||||
@ -541,16 +549,6 @@ dependencies = [
|
||||
"pin-utils",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "num-integer"
|
||||
version = "0.1.45"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "225d3389fb3509a24c93f5c29eb6bde2586b98d9f016636dff58d7c6f7569cd9"
|
||||
dependencies = [
|
||||
"autocfg",
|
||||
"num-traits",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "num-traits"
|
||||
version = "0.2.15"
|
||||
@ -562,25 +560,25 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "num_cpus"
|
||||
version = "1.14.0"
|
||||
version = "1.16.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "f6058e64324c71e02bc2b150e4f3bc8286db6c83092132ffa3f6b1eab0f9def5"
|
||||
checksum = "4161fcb6d602d4d2081af7c3a45852d875a03dd337a6bfdd6e06407b61342a43"
|
||||
dependencies = [
|
||||
"hermit-abi",
|
||||
"hermit-abi 0.3.1",
|
||||
"libc",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "once_cell"
|
||||
version = "1.16.0"
|
||||
version = "1.18.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "86f0b0d4bf799edbc74508c1e8bf170ff5f41238e5f8225603ca7caaae2b7860"
|
||||
checksum = "dd8b5dd2ae5ed71462c540258bedcb51965123ad7e7ccf4b9a8cafaa4a63576d"
|
||||
|
||||
[[package]]
|
||||
name = "os_str_bytes"
|
||||
version = "6.4.1"
|
||||
version = "6.5.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "9b7820b9daea5457c9f21c69448905d723fbd21136ccf521748f23fd49e723ee"
|
||||
checksum = "4d5d9eb14b174ee9aa2ef96dc2b94637a2d4b6e7cb873c7e171f0c20c6cf3eac"
|
||||
|
||||
[[package]]
|
||||
name = "pin-utils"
|
||||
@ -590,27 +588,27 @@ checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184"
|
||||
|
||||
[[package]]
|
||||
name = "proc-macro2"
|
||||
version = "1.0.47"
|
||||
version = "1.0.63"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "5ea3d908b0e36316caf9e9e2c4625cdde190a7e6f440d794667ed17a1855e725"
|
||||
checksum = "7b368fba921b0dce7e60f5e04ec15e565b3303972b42bcfde1d0713b881959eb"
|
||||
dependencies = [
|
||||
"unicode-ident",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "quote"
|
||||
version = "1.0.21"
|
||||
version = "1.0.29"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "bbe448f377a7d6961e30f5955f9b8d106c3f5e449d493ee1b125c1d43c2b5179"
|
||||
checksum = "573015e8ab27661678357f27dc26460738fd2b6c86e46f386fde94cb5d913105"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "rayon"
|
||||
version = "1.6.1"
|
||||
version = "1.7.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "6db3a213adf02b3bcfd2d3846bb41cb22857d131789e01df434fb7e7bc0759b7"
|
||||
checksum = "1d2df5196e37bcc87abebc0053e20787d73847bb33134a69841207dd0a47f03b"
|
||||
dependencies = [
|
||||
"either",
|
||||
"rayon-core",
|
||||
@ -618,9 +616,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "rayon-core"
|
||||
version = "1.10.1"
|
||||
version = "1.11.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "cac410af5d00ab6884528b4ab69d1e8e146e8d471201800fa1b4524126de6ad3"
|
||||
checksum = "4b8f95bd6966f5c87776639160a66bd8ab9895d9d4ab01ddba9fc60661aebe8d"
|
||||
dependencies = [
|
||||
"crossbeam-channel",
|
||||
"crossbeam-deque",
|
||||
@ -650,9 +648,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "regex"
|
||||
version = "1.7.0"
|
||||
version = "1.8.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "e076559ef8e241f2ae3479e36f97bd5741c0330689e217ad51ce2c76808b868a"
|
||||
checksum = "d0ab3ca65655bb1e41f2a8c8cd662eb4fb035e67c3f78da1d61dffe89d07300f"
|
||||
dependencies = [
|
||||
"aho-corasick",
|
||||
"memchr",
|
||||
@ -661,15 +659,15 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "regex-syntax"
|
||||
version = "0.6.28"
|
||||
version = "0.7.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "456c603be3e8d448b072f410900c09faf164fbce2d480456f50eea6e25f9c848"
|
||||
checksum = "436b050e76ed2903236f032a59761c1eb99e1b0aead2c257922771dab1fc8c78"
|
||||
|
||||
[[package]]
|
||||
name = "rustversion"
|
||||
version = "1.0.9"
|
||||
version = "1.0.12"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "97477e48b4cf8603ad5f7aaf897467cf42ab4218a38ef76fb14c2d6773a6d6a8"
|
||||
checksum = "4f3208ce4d8448b3f3e7d168a73f5e0c43a61e32930de3bceeccedb388b6bf06"
|
||||
|
||||
[[package]]
|
||||
name = "scopeguard"
|
||||
@ -679,15 +677,15 @@ checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd"
|
||||
|
||||
[[package]]
|
||||
name = "scratch"
|
||||
version = "1.0.2"
|
||||
version = "1.0.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "9c8132065adcfd6e02db789d9285a0deb2f3fcb04002865ab67d5fb103533898"
|
||||
checksum = "1792db035ce95be60c3f8853017b3999209281c24e2ba5bc8e59bf97a0c590c1"
|
||||
|
||||
[[package]]
|
||||
name = "serde"
|
||||
version = "1.0.149"
|
||||
version = "1.0.164"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "256b9932320c590e707b94576e3cc1f7c9024d0ee6612dfbcf1cb106cbe8e055"
|
||||
checksum = "9e8c8cf938e98f769bc164923b06dce91cea1751522f46f8466461af04c9027d"
|
||||
|
||||
[[package]]
|
||||
name = "shlex"
|
||||
@ -697,9 +695,9 @@ checksum = "43b2853a4d09f215c24cc5489c992ce46052d359b5109343cbafbf26bc62f8a3"
|
||||
|
||||
[[package]]
|
||||
name = "skim"
|
||||
version = "0.10.2"
|
||||
version = "0.10.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "cebed5f897cd6c0d80fbe30adb36c0abf7400e93043a63ae56458495642b3485"
|
||||
checksum = "e5d28de0a6cb2cdd83a076f1de9d965b973ae08b244df1aa70b432946dda0f32"
|
||||
dependencies = [
|
||||
"atty",
|
||||
"beef",
|
||||
@ -717,7 +715,7 @@ dependencies = [
|
||||
"rayon",
|
||||
"regex",
|
||||
"shlex",
|
||||
"time 0.3.17",
|
||||
"time 0.3.22",
|
||||
"timer",
|
||||
"tuikit",
|
||||
"unicode-width",
|
||||
@ -732,9 +730,20 @@ checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623"
|
||||
|
||||
[[package]]
|
||||
name = "syn"
|
||||
version = "1.0.105"
|
||||
version = "1.0.109"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "60b9b43d45702de4c839cb9b51d9f529c5dd26a4aff255b42b1ebc03e88ee908"
|
||||
checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"unicode-ident",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "syn"
|
||||
version = "2.0.23"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "59fb7d6d8281a51045d62b8eb3a7d1ce347b76f312af50cd3dc0af39c87c1737"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
@ -754,9 +763,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "termcolor"
|
||||
version = "1.1.3"
|
||||
version = "1.2.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "bab24d30b911b2376f3a13cc2cd443142f0c81dda04c118693e35b3835757755"
|
||||
checksum = "be55cf8942feac5c765c2c993422806843c9a9a45d4d5c407ad6dd2ea95eb9b6"
|
||||
dependencies = [
|
||||
"winapi-util",
|
||||
]
|
||||
@ -769,30 +778,31 @@ checksum = "222a222a5bfe1bba4a77b45ec488a741b3cb8872e5e499451fd7d0129c9c7c3d"
|
||||
|
||||
[[package]]
|
||||
name = "thiserror"
|
||||
version = "1.0.37"
|
||||
version = "1.0.40"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "10deb33631e3c9018b9baf9dcbbc4f737320d2b576bac10f6aefa048fa407e3e"
|
||||
checksum = "978c9a314bd8dc99be594bc3c175faaa9794be04a5a5e153caba6915336cebac"
|
||||
dependencies = [
|
||||
"thiserror-impl",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "thiserror-impl"
|
||||
version = "1.0.37"
|
||||
version = "1.0.40"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "982d17546b47146b28f7c22e3d08465f6b8903d0ea13c1660d9d84a6e7adcdbb"
|
||||
checksum = "f9456a42c5b0d803c8cd86e73dd7cc9edd429499f37a3550d286d5e86720569f"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn",
|
||||
"syn 2.0.23",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "thread_local"
|
||||
version = "1.1.4"
|
||||
version = "1.1.7"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "5516c27b78311c50bf42c071425c560ac799b11c30b31f87e3081965fe5e0180"
|
||||
checksum = "3fdd6f064ccff2d6567adcb3873ca630700f00b5ad3f060c25b5dcfd9a4ce152"
|
||||
dependencies = [
|
||||
"cfg-if",
|
||||
"once_cell",
|
||||
]
|
||||
|
||||
@ -809,9 +819,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "time"
|
||||
version = "0.3.17"
|
||||
version = "0.3.22"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "a561bf4617eebd33bca6434b988f39ed798e527f51a1e797d0ee4f61c0a38376"
|
||||
checksum = "ea9e1b3cf1243ae005d9e74085d4d542f3125458f3a81af210d901dcd7411efd"
|
||||
dependencies = [
|
||||
"serde",
|
||||
"time-core",
|
||||
@ -819,9 +829,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "time-core"
|
||||
version = "0.1.0"
|
||||
version = "0.1.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "2e153e1f1acaef8acc537e68b44906d2db6436e2b35ac2c6b42640fff91f00fd"
|
||||
checksum = "7300fbefb4dadc1af235a9cef3737cea692a9d97e1b9cbcd4ebdae6f8868e6fb"
|
||||
|
||||
[[package]]
|
||||
name = "timer"
|
||||
@ -848,9 +858,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "unicode-ident"
|
||||
version = "1.0.5"
|
||||
version = "1.0.9"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "6ceab39d59e4c9499d4e5a8ee0e2735b891bb7308ac83dfb4e80cad195c9f6f3"
|
||||
checksum = "b15811caf2415fb889178633e7724bad2509101cde276048e013b9def5e51fa0"
|
||||
|
||||
[[package]]
|
||||
name = "unicode-width"
|
||||
@ -860,15 +870,15 @@ checksum = "c0edd1e5b14653f783770bce4a4dabb4a5108a5370a5f5d8cfe8710c361f6c8b"
|
||||
|
||||
[[package]]
|
||||
name = "utf8parse"
|
||||
version = "0.2.0"
|
||||
version = "0.2.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "936e4b492acfd135421d8dca4b1aa80a7bfc26e702ef3af710e0752684df5372"
|
||||
checksum = "711b9620af191e0cdc7468a8d14e709c3dcdb115b36f838e601583af800a370a"
|
||||
|
||||
[[package]]
|
||||
name = "vte"
|
||||
version = "0.11.0"
|
||||
version = "0.11.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "1aae21c12ad2ec2d168c236f369c38ff332bc1134f7246350dca641437365045"
|
||||
checksum = "f5022b5fbf9407086c180e9557be968742d839e68346af7792b8592489732197"
|
||||
dependencies = [
|
||||
"arrayvec",
|
||||
"utf8parse",
|
||||
@ -899,9 +909,9 @@ checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423"
|
||||
|
||||
[[package]]
|
||||
name = "wasm-bindgen"
|
||||
version = "0.2.83"
|
||||
version = "0.2.87"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "eaf9f5aceeec8be17c128b2e93e031fb8a4d469bb9c4ae2d7dc1888b26887268"
|
||||
checksum = "7706a72ab36d8cb1f80ffbf0e071533974a60d0a308d01a5d0375bf60499a342"
|
||||
dependencies = [
|
||||
"cfg-if",
|
||||
"wasm-bindgen-macro",
|
||||
@ -909,24 +919,24 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "wasm-bindgen-backend"
|
||||
version = "0.2.83"
|
||||
version = "0.2.87"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "4c8ffb332579b0557b52d268b91feab8df3615f265d5270fec2a8c95b17c1142"
|
||||
checksum = "5ef2b6d3c510e9625e5fe6f509ab07d66a760f0885d858736483c32ed7809abd"
|
||||
dependencies = [
|
||||
"bumpalo",
|
||||
"log",
|
||||
"once_cell",
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn",
|
||||
"syn 2.0.23",
|
||||
"wasm-bindgen-shared",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "wasm-bindgen-macro"
|
||||
version = "0.2.83"
|
||||
version = "0.2.87"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "052be0f94026e6cbc75cdefc9bae13fd6052cdcaf532fa6c45e7ae33a1e6c810"
|
||||
checksum = "dee495e55982a3bd48105a7b947fd2a9b4a8ae3010041b9e0faab3f9cd028f1d"
|
||||
dependencies = [
|
||||
"quote",
|
||||
"wasm-bindgen-macro-support",
|
||||
@ -934,22 +944,22 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "wasm-bindgen-macro-support"
|
||||
version = "0.2.83"
|
||||
version = "0.2.87"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "07bc0c051dc5f23e307b13285f9d75df86bfdf816c5721e573dec1f9b8aa193c"
|
||||
checksum = "54681b18a46765f095758388f2d0cf16eb8d4169b639ab575a8f5693af210c7b"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn",
|
||||
"syn 2.0.23",
|
||||
"wasm-bindgen-backend",
|
||||
"wasm-bindgen-shared",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "wasm-bindgen-shared"
|
||||
version = "0.2.83"
|
||||
version = "0.2.87"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "1c38c045535d93ec4f0b4defec448e4291638ee608530863b1e2ba115d4fff7f"
|
||||
checksum = "ca6ad05a4870b2bf5fe995117d3728437bd27d7cd5f06f13c17443ef369775a1"
|
||||
|
||||
[[package]]
|
||||
name = "winapi"
|
||||
@ -981,3 +991,69 @@ name = "winapi-x86_64-pc-windows-gnu"
|
||||
version = "0.4.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f"
|
||||
|
||||
[[package]]
|
||||
name = "windows"
|
||||
version = "0.48.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "e686886bc078bc1b0b600cac0147aadb815089b6e4da64016cbd754b6342700f"
|
||||
dependencies = [
|
||||
"windows-targets",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "windows-targets"
|
||||
version = "0.48.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "05d4b17490f70499f20b9e791dcf6a299785ce8af4d709018206dc5b4953e95f"
|
||||
dependencies = [
|
||||
"windows_aarch64_gnullvm",
|
||||
"windows_aarch64_msvc",
|
||||
"windows_i686_gnu",
|
||||
"windows_i686_msvc",
|
||||
"windows_x86_64_gnu",
|
||||
"windows_x86_64_gnullvm",
|
||||
"windows_x86_64_msvc",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "windows_aarch64_gnullvm"
|
||||
version = "0.48.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "91ae572e1b79dba883e0d315474df7305d12f569b400fcf90581b06062f7e1bc"
|
||||
|
||||
[[package]]
|
||||
name = "windows_aarch64_msvc"
|
||||
version = "0.48.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "b2ef27e0d7bdfcfc7b868b317c1d32c641a6fe4629c171b8928c7b08d98d7cf3"
|
||||
|
||||
[[package]]
|
||||
name = "windows_i686_gnu"
|
||||
version = "0.48.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "622a1962a7db830d6fd0a69683c80a18fda201879f0f447f065a3b7467daa241"
|
||||
|
||||
[[package]]
|
||||
name = "windows_i686_msvc"
|
||||
version = "0.48.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "4542c6e364ce21bf45d69fdd2a8e455fa38d316158cfd43b3ac1c5b1b19f8e00"
|
||||
|
||||
[[package]]
|
||||
name = "windows_x86_64_gnu"
|
||||
version = "0.48.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ca2b8a661f7628cbd23440e50b05d705db3686f894fc9580820623656af974b1"
|
||||
|
||||
[[package]]
|
||||
name = "windows_x86_64_gnullvm"
|
||||
version = "0.48.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "7896dbc1f41e08872e9d5e8f8baa8fdd2677f29468c4e156210174edc7f7b953"
|
||||
|
||||
[[package]]
|
||||
name = "windows_x86_64_msvc"
|
||||
version = "0.48.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "1a515f5799fe4961cb532f983ce2b23082366b898e52ffbce459c86f67c8378a"
|
||||
|
@ -228,6 +228,12 @@ ContextAccess::ContextAccess(const AccessControl & access_control_, const Params
|
||||
}
|
||||
|
||||
|
||||
ContextAccess::ContextAccess(FullAccess)
|
||||
: is_full_access(true), access(std::make_shared<AccessRights>(AccessRights::getFullAccess())), access_with_implicit(access)
|
||||
{
|
||||
}
|
||||
|
||||
|
||||
ContextAccess::~ContextAccess()
|
||||
{
|
||||
enabled_settings.reset();
|
||||
@ -413,14 +419,8 @@ std::optional<QuotaUsage> ContextAccess::getQuotaUsage() const
|
||||
|
||||
std::shared_ptr<const ContextAccess> ContextAccess::getFullAccess()
|
||||
{
|
||||
static const std::shared_ptr<const ContextAccess> res = []
|
||||
{
|
||||
auto full_access = std::make_shared<ContextAccess>();
|
||||
full_access->is_full_access = true;
|
||||
full_access->access = std::make_shared<AccessRights>(AccessRights::getFullAccess());
|
||||
full_access->access_with_implicit = full_access->access;
|
||||
return full_access;
|
||||
}();
|
||||
static const std::shared_ptr<const ContextAccess> res =
|
||||
[] { return std::shared_ptr<ContextAccess>(new ContextAccess{kFullAccess}); }();
|
||||
return res;
|
||||
}
|
||||
|
||||
|
@ -69,7 +69,6 @@ public:
|
||||
using Params = ContextAccessParams;
|
||||
const Params & getParams() const { return params; }
|
||||
|
||||
ContextAccess() { } /// NOLINT
|
||||
ContextAccess(const AccessControl & access_control_, const Params & params_);
|
||||
|
||||
/// Returns the current user. Throws if user is nullptr.
|
||||
@ -171,10 +170,17 @@ public:
|
||||
private:
|
||||
friend class AccessControl;
|
||||
|
||||
struct FullAccess {};
|
||||
static const FullAccess kFullAccess;
|
||||
|
||||
/// Makes an instance of ContextAccess which provides full access to everything
|
||||
/// without any limitations. This is used for the global context.
|
||||
explicit ContextAccess(FullAccess);
|
||||
|
||||
void initialize();
|
||||
void setUser(const UserPtr & user_) const;
|
||||
void setRolesInfo(const std::shared_ptr<const EnabledRolesInfo> & roles_info_) const;
|
||||
void calculateAccessRights() const;
|
||||
void setUser(const UserPtr & user_) const TSA_REQUIRES(mutex);
|
||||
void setRolesInfo(const std::shared_ptr<const EnabledRolesInfo> & roles_info_) const TSA_REQUIRES(mutex);
|
||||
void calculateAccessRights() const TSA_REQUIRES(mutex);
|
||||
|
||||
template <bool throw_if_denied, bool grant_option>
|
||||
bool checkAccessImpl(const AccessFlags & flags) const;
|
||||
@ -217,20 +223,23 @@ private:
|
||||
|
||||
const AccessControl * access_control = nullptr;
|
||||
const Params params;
|
||||
bool is_full_access = false;
|
||||
mutable Poco::Logger * trace_log = nullptr;
|
||||
mutable UserPtr user;
|
||||
mutable String user_name;
|
||||
mutable bool user_was_dropped = false;
|
||||
mutable scope_guard subscription_for_user_change;
|
||||
mutable std::shared_ptr<const EnabledRoles> enabled_roles;
|
||||
mutable scope_guard subscription_for_roles_changes;
|
||||
mutable std::shared_ptr<const EnabledRolesInfo> roles_info;
|
||||
mutable std::shared_ptr<const AccessRights> access;
|
||||
mutable std::shared_ptr<const AccessRights> access_with_implicit;
|
||||
mutable std::shared_ptr<const EnabledRowPolicies> enabled_row_policies;
|
||||
mutable std::shared_ptr<const EnabledQuota> enabled_quota;
|
||||
mutable std::shared_ptr<const EnabledSettings> enabled_settings;
|
||||
const bool is_full_access = false;
|
||||
|
||||
mutable std::atomic<bool> user_was_dropped = false;
|
||||
mutable std::atomic<Poco::Logger *> trace_log = nullptr;
|
||||
|
||||
mutable UserPtr user TSA_GUARDED_BY(mutex);
|
||||
mutable String user_name TSA_GUARDED_BY(mutex);
|
||||
mutable scope_guard subscription_for_user_change TSA_GUARDED_BY(mutex);
|
||||
mutable std::shared_ptr<const EnabledRoles> enabled_roles TSA_GUARDED_BY(mutex);
|
||||
mutable scope_guard subscription_for_roles_changes TSA_GUARDED_BY(mutex);
|
||||
mutable std::shared_ptr<const EnabledRolesInfo> roles_info TSA_GUARDED_BY(mutex);
|
||||
mutable std::shared_ptr<const AccessRights> access TSA_GUARDED_BY(mutex);
|
||||
mutable std::shared_ptr<const AccessRights> access_with_implicit TSA_GUARDED_BY(mutex);
|
||||
mutable std::shared_ptr<const EnabledRowPolicies> enabled_row_policies TSA_GUARDED_BY(mutex);
|
||||
mutable std::shared_ptr<const EnabledQuota> enabled_quota TSA_GUARDED_BY(mutex);
|
||||
mutable std::shared_ptr<const EnabledSettings> enabled_settings TSA_GUARDED_BY(mutex);
|
||||
|
||||
mutable std::mutex mutex;
|
||||
};
|
||||
|
||||
|
@ -188,6 +188,7 @@
|
||||
M(CacheDetachedFileSegments, "Number of existing detached cache file segments") \
|
||||
M(FilesystemCacheSize, "Filesystem cache size in bytes") \
|
||||
M(FilesystemCacheElements, "Filesystem cache elements (file segments)") \
|
||||
M(FilesystemCacheDownloadQueueElements, "Filesystem cache elements in download queue") \
|
||||
M(AsyncInsertCacheSize, "Number of async insert hash id in cache") \
|
||||
M(S3Requests, "S3 requests") \
|
||||
M(KeeperAliveConnections, "Number of alive connections") \
|
||||
|
@ -67,8 +67,8 @@ ThreadGroup::ThreadGroup()
|
||||
: master_thread_id(CurrentThread::get().thread_id)
|
||||
{}
|
||||
|
||||
ThreadStatus::ThreadStatus()
|
||||
: thread_id{getThreadId()}
|
||||
ThreadStatus::ThreadStatus(bool check_current_thread_on_destruction_)
|
||||
: thread_id{getThreadId()}, check_current_thread_on_destruction(check_current_thread_on_destruction_)
|
||||
{
|
||||
last_rusage = std::make_unique<RUsageCounters>();
|
||||
|
||||
@ -201,8 +201,11 @@ ThreadStatus::~ThreadStatus()
|
||||
|
||||
/// Only change current_thread if it's currently being used by this ThreadStatus
|
||||
/// For example, PushingToViews chain creates and deletes ThreadStatus instances while running in the main query thread
|
||||
if (current_thread == this)
|
||||
if (check_current_thread_on_destruction)
|
||||
{
|
||||
assert(current_thread == this);
|
||||
current_thread = nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
void ThreadStatus::updatePerformanceCounters()
|
||||
|
@ -224,8 +224,10 @@ private:
|
||||
|
||||
Poco::Logger * log = nullptr;
|
||||
|
||||
bool check_current_thread_on_destruction;
|
||||
|
||||
public:
|
||||
ThreadStatus();
|
||||
explicit ThreadStatus(bool check_current_thread_on_destruction_ = true);
|
||||
~ThreadStatus();
|
||||
|
||||
ThreadGroupPtr getThreadGroup() const;
|
||||
|
@ -37,7 +37,7 @@ SipHash getHashOfLoadedBinary()
|
||||
std::string getHashOfLoadedBinaryHex()
|
||||
{
|
||||
SipHash hash = getHashOfLoadedBinary();
|
||||
std::array<UInt64, 2> checksum;
|
||||
UInt128 checksum;
|
||||
hash.get128(checksum);
|
||||
return getHexUIntUppercase(checksum);
|
||||
}
|
||||
|
@ -49,8 +49,8 @@ static void validateChecksum(char * data, size_t size, const Checksum expected_c
|
||||
|
||||
/// TODO mess up of endianness in error message.
|
||||
message << "Checksum doesn't match: corrupted data."
|
||||
" Reference: " + getHexUIntLowercase(expected_checksum.high64) + getHexUIntLowercase(expected_checksum.low64)
|
||||
+ ". Actual: " + getHexUIntLowercase(calculated_checksum.high64) + getHexUIntLowercase(calculated_checksum.low64)
|
||||
" Reference: " + getHexUIntLowercase(expected_checksum)
|
||||
+ ". Actual: " + getHexUIntLowercase(calculated_checksum)
|
||||
+ ". Size of compressed block: " + toString(size);
|
||||
|
||||
const char * message_hardware_failure = "This is most likely due to hardware failure. "
|
||||
|
@ -1,10 +1,11 @@
|
||||
#include <base/defines.h>
|
||||
#include <Core/SettingsQuirks.h>
|
||||
#include <Core/Settings.h>
|
||||
#include <Poco/Environment.h>
|
||||
#include <Poco/Platform.h>
|
||||
#include <Common/VersionNumber.h>
|
||||
#include <Common/logger_useful.h>
|
||||
#include <cstdlib>
|
||||
|
||||
|
||||
namespace
|
||||
{
|
||||
@ -71,6 +72,12 @@ void applySettingsQuirks(Settings & settings, Poco::Logger * log)
|
||||
}
|
||||
}
|
||||
|
||||
#if defined(THREAD_SANITIZER)
|
||||
settings.use_hedged_requests.value = false;
|
||||
if (log)
|
||||
LOG_WARNING(log, "use_hedged_requests has been disabled for the build with Thread Sanitizer, because they are using fibers, leading to a failed assertion inside TSan");
|
||||
#endif
|
||||
|
||||
if (!queryProfilerWorks())
|
||||
{
|
||||
if (settings.query_profiler_real_time_period_ns)
|
||||
|
@ -74,19 +74,22 @@ CachedOnDiskReadBufferFromFile::CachedOnDiskReadBufferFromFile(
|
||||
}
|
||||
|
||||
void CachedOnDiskReadBufferFromFile::appendFilesystemCacheLog(
|
||||
const FileSegment::Range & file_segment_range, CachedOnDiskReadBufferFromFile::ReadType type)
|
||||
const FileSegment & file_segment, CachedOnDiskReadBufferFromFile::ReadType type)
|
||||
{
|
||||
if (!cache_log)
|
||||
return;
|
||||
|
||||
const auto range = file_segment.range();
|
||||
FilesystemCacheLogElement elem
|
||||
{
|
||||
.event_time = std::chrono::system_clock::to_time_t(std::chrono::system_clock::now()),
|
||||
.query_id = query_id,
|
||||
.source_file_path = source_file_path,
|
||||
.file_segment_range = { file_segment_range.left, file_segment_range.right },
|
||||
.file_segment_range = { range.left, range.right },
|
||||
.requested_range = { first_offset, read_until_position },
|
||||
.file_segment_size = file_segment_range.size(),
|
||||
.file_segment_key = file_segment.key().toString(),
|
||||
.file_segment_offset = file_segment.offset(),
|
||||
.file_segment_size = range.size(),
|
||||
.read_from_cache_attempted = true,
|
||||
.read_buffer_id = current_buffer_id,
|
||||
.profile_counters = std::make_shared<ProfileEvents::Counters::Snapshot>(
|
||||
@ -495,7 +498,7 @@ bool CachedOnDiskReadBufferFromFile::completeFileSegmentAndGetNext()
|
||||
auto completed_range = current_file_segment->range();
|
||||
|
||||
if (cache_log)
|
||||
appendFilesystemCacheLog(completed_range, read_type);
|
||||
appendFilesystemCacheLog(*current_file_segment, read_type);
|
||||
|
||||
chassert(file_offset_of_buffer_end > completed_range.right);
|
||||
|
||||
@ -518,7 +521,7 @@ CachedOnDiskReadBufferFromFile::~CachedOnDiskReadBufferFromFile()
|
||||
{
|
||||
if (cache_log && file_segments && !file_segments->empty())
|
||||
{
|
||||
appendFilesystemCacheLog(file_segments->front().range(), read_type);
|
||||
appendFilesystemCacheLog(file_segments->front(), read_type);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -90,7 +90,7 @@ private:
|
||||
|
||||
bool completeFileSegmentAndGetNext();
|
||||
|
||||
void appendFilesystemCacheLog(const FileSegment::Range & file_segment_range, ReadType read_type);
|
||||
void appendFilesystemCacheLog(const FileSegment & file_segment, ReadType read_type);
|
||||
|
||||
bool writeCache(char * data, size_t size, size_t offset, FileSegment & file_segment);
|
||||
|
||||
|
@ -90,6 +90,8 @@ void ReadBufferFromRemoteFSGather::appendUncachedReadInfo()
|
||||
.source_file_path = current_object.remote_path,
|
||||
.file_segment_range = { 0, current_object.bytes_size },
|
||||
.cache_type = FilesystemCacheLogElement::CacheType::READ_FROM_FS_BYPASSING_CACHE,
|
||||
.file_segment_key = {},
|
||||
.file_segment_offset = {},
|
||||
.file_segment_size = current_object.bytes_size,
|
||||
.read_from_cache_attempted = false,
|
||||
};
|
||||
|
@ -11,6 +11,7 @@
|
||||
#include <base/hex.h>
|
||||
#include <pcg-random/pcg_random.hpp>
|
||||
#include <Common/randomSeed.h>
|
||||
#include <Common/ThreadPool.h>
|
||||
#include <Common/ElapsedTimeProfileEventIncrement.h>
|
||||
|
||||
#include <filesystem>
|
||||
@ -54,9 +55,10 @@ FileCache::FileCache(const FileCacheSettings & settings)
|
||||
: max_file_segment_size(settings.max_file_segment_size)
|
||||
, bypass_cache_threshold(settings.enable_bypass_cache_with_threashold ? settings.bypass_cache_threashold : 0)
|
||||
, delayed_cleanup_interval_ms(settings.delayed_cleanup_interval_ms)
|
||||
, boundary_alignment(settings.boundary_alignment)
|
||||
, background_download_threads(settings.background_download_threads)
|
||||
, log(&Poco::Logger::get("FileCache"))
|
||||
, metadata(settings.base_path)
|
||||
, boundary_alignment(settings.boundary_alignment)
|
||||
{
|
||||
main_priority = std::make_unique<LRUFileCachePriority>(settings.max_size, settings.max_elements);
|
||||
|
||||
@ -129,6 +131,9 @@ void FileCache::initialize()
|
||||
|
||||
is_initialized = true;
|
||||
|
||||
for (size_t i = 0; i < background_download_threads; ++i)
|
||||
download_threads.emplace_back([this] { metadata.downloadThreadFunc(); });
|
||||
|
||||
cleanup_task = Context::getGlobalContextInstance()->getSchedulePool().createTask("FileCacheCleanup", [this]{ cleanupThreadFunc(); });
|
||||
cleanup_task->activate();
|
||||
cleanup_task->scheduleAfter(delayed_cleanup_interval_ms);
|
||||
@ -423,7 +428,12 @@ FileSegmentsHolderPtr FileCache::set(
|
||||
}
|
||||
|
||||
FileSegmentsHolderPtr
|
||||
FileCache::getOrSet(const Key & key, size_t offset, size_t size, size_t file_size, const CreateFileSegmentSettings & settings)
|
||||
FileCache::getOrSet(
|
||||
const Key & key,
|
||||
size_t offset,
|
||||
size_t size,
|
||||
size_t file_size,
|
||||
const CreateFileSegmentSettings & settings)
|
||||
{
|
||||
ProfileEventTimeIncrement<Microseconds> watch(ProfileEvents::FilesystemCacheGetOrSetMicroseconds);
|
||||
|
||||
@ -645,27 +655,14 @@ bool FileCache::tryReserve(FileSegment & file_segment, const size_t size)
|
||||
|
||||
if (segment_metadata->releasable())
|
||||
{
|
||||
auto segment = segment_metadata->file_segment;
|
||||
if (segment->state() == FileSegment::State::DOWNLOADED)
|
||||
{
|
||||
const auto & key = segment->key();
|
||||
const auto & key = segment_metadata->file_segment->key();
|
||||
auto it = to_delete.find(key);
|
||||
if (it == to_delete.end())
|
||||
it = to_delete.emplace(key, locked_key.getKeyMetadata()).first;
|
||||
it->second.add(segment_metadata);
|
||||
|
||||
auto it = to_delete.find(key);
|
||||
if (it == to_delete.end())
|
||||
it = to_delete.emplace(key, locked_key.getKeyMetadata()).first;
|
||||
it->second.add(segment_metadata);
|
||||
|
||||
freeable_space += segment_metadata->size();
|
||||
++freeable_count;
|
||||
|
||||
return PriorityIterationResult::CONTINUE;
|
||||
}
|
||||
|
||||
ProfileEvents::increment(ProfileEvents::FilesystemCacheEvictedFileSegments);
|
||||
ProfileEvents::increment(ProfileEvents::FilesystemCacheEvictedBytes, segment->getDownloadedSize(false));
|
||||
|
||||
locked_key.removeFileSegment(segment->offset(), segment->lock());
|
||||
return PriorityIterationResult::REMOVE_AND_CONTINUE;
|
||||
freeable_space += segment_metadata->size();
|
||||
++freeable_count;
|
||||
}
|
||||
return PriorityIterationResult::CONTINUE;
|
||||
};
|
||||
@ -809,6 +806,13 @@ bool FileCache::tryReserve(FileSegment & file_segment, const size_t size)
|
||||
return true;
|
||||
}
|
||||
|
||||
void FileCache::removeKey(const Key & key)
|
||||
{
|
||||
assertInitialized();
|
||||
auto locked_key = metadata.lockKeyMetadata(key, CacheMetadata::KeyNotFoundPolicy::THROW);
|
||||
locked_key->removeAll();
|
||||
}
|
||||
|
||||
void FileCache::removeKeyIfExists(const Key & key)
|
||||
{
|
||||
assertInitialized();
|
||||
@ -821,7 +825,14 @@ void FileCache::removeKeyIfExists(const Key & key)
|
||||
/// But if we have multiple replicated zero-copy tables on the same server
|
||||
/// it became possible to start removing something from cache when it is used
|
||||
/// by other "zero-copy" tables. That is why it's not an error.
|
||||
locked_key->removeAllReleasable();
|
||||
locked_key->removeAll(/* if_releasable */true);
|
||||
}
|
||||
|
||||
void FileCache::removeFileSegment(const Key & key, size_t offset)
|
||||
{
|
||||
assertInitialized();
|
||||
auto locked_key = metadata.lockKeyMetadata(key, CacheMetadata::KeyNotFoundPolicy::THROW);
|
||||
locked_key->removeFileSegment(offset);
|
||||
}
|
||||
|
||||
void FileCache::removePathIfExists(const String & path)
|
||||
@ -833,22 +844,12 @@ void FileCache::removeAllReleasable()
|
||||
{
|
||||
assertInitialized();
|
||||
|
||||
auto lock = lockCache();
|
||||
|
||||
main_priority->iterate([&](LockedKey & locked_key, const FileSegmentMetadataPtr & segment_metadata)
|
||||
{
|
||||
if (segment_metadata->releasable())
|
||||
{
|
||||
auto file_segment = segment_metadata->file_segment;
|
||||
locked_key.removeFileSegment(file_segment->offset(), file_segment->lock());
|
||||
return PriorityIterationResult::REMOVE_AND_CONTINUE;
|
||||
}
|
||||
return PriorityIterationResult::CONTINUE;
|
||||
}, lock);
|
||||
metadata.iterate([](LockedKey & locked_key) { locked_key.removeAll(/* if_releasable */true); });
|
||||
|
||||
if (stash)
|
||||
{
|
||||
/// Remove all access information.
|
||||
auto lock = lockCache();
|
||||
stash->records.clear();
|
||||
stash->queue->removeAll(lock);
|
||||
}
|
||||
@ -918,7 +919,7 @@ void FileCache::loadMetadata()
|
||||
continue;
|
||||
}
|
||||
|
||||
const auto key = Key(unhexUInt<UInt128>(key_directory.filename().string().data()));
|
||||
const auto key = Key::fromKeyString(key_directory.filename().string());
|
||||
auto locked_key = metadata.lockKeyMetadata(key, CacheMetadata::KeyNotFoundPolicy::CREATE_EMPTY, /* is_initial_load */true);
|
||||
|
||||
for (fs::directory_iterator offset_it{key_directory}; offset_it != fs::directory_iterator(); ++offset_it)
|
||||
@ -1027,6 +1028,11 @@ void FileCache::deactivateBackgroundOperations()
|
||||
{
|
||||
if (cleanup_task)
|
||||
cleanup_task->deactivate();
|
||||
|
||||
metadata.cancelDownload();
|
||||
for (auto & thread : download_threads)
|
||||
if (thread.joinable())
|
||||
thread.join();
|
||||
}
|
||||
|
||||
void FileCache::cleanup()
|
||||
@ -1038,10 +1044,6 @@ void FileCache::cleanupThreadFunc()
|
||||
{
|
||||
try
|
||||
{
|
||||
#ifdef ABORT_ON_LOGICAL_ERROR
|
||||
assertCacheCorrectness();
|
||||
#endif
|
||||
|
||||
cleanup();
|
||||
}
|
||||
catch (...)
|
||||
@ -1072,7 +1074,7 @@ FileSegmentsHolderPtr FileCache::getSnapshot()
|
||||
FileSegmentsHolderPtr FileCache::getSnapshot(const Key & key)
|
||||
{
|
||||
FileSegments file_segments;
|
||||
auto locked_key = metadata.lockKeyMetadata(key, CacheMetadata::KeyNotFoundPolicy::THROW);
|
||||
auto locked_key = metadata.lockKeyMetadata(key, CacheMetadata::KeyNotFoundPolicy::THROW_LOGICAL);
|
||||
for (const auto & [_, file_segment_metadata] : *locked_key->getKeyMetadata())
|
||||
file_segments.push_back(FileSegment::getSnapshot(file_segment_metadata->file_segment));
|
||||
return std::make_unique<FileSegmentsHolder>(std::move(file_segments));
|
||||
|
@ -83,13 +83,19 @@ public:
|
||||
|
||||
FileSegmentsHolderPtr set(const Key & key, size_t offset, size_t size, const CreateFileSegmentSettings & settings);
|
||||
|
||||
/// Remove files by `key`. Removes files which might be used at the moment.
|
||||
/// Remove file segment by `key` and `offset`. Throws if file segment does not exist.
|
||||
void removeFileSegment(const Key & key, size_t offset);
|
||||
|
||||
/// Remove files by `key`. Throws if key does not exist.
|
||||
void removeKey(const Key & key);
|
||||
|
||||
/// Remove files by `key`.
|
||||
void removeKeyIfExists(const Key & key);
|
||||
|
||||
/// Removes files by `path`. Removes files which might be used at the moment.
|
||||
/// Removes files by `path`.
|
||||
void removePathIfExists(const String & path);
|
||||
|
||||
/// Remove files by `key`. Will not remove files which are used at the moment.
|
||||
/// Remove files by `key`.
|
||||
void removeAllReleasable();
|
||||
|
||||
std::vector<String> tryGetCachePaths(const Key & key);
|
||||
@ -136,6 +142,8 @@ private:
|
||||
const size_t max_file_segment_size;
|
||||
const size_t bypass_cache_threshold = 0;
|
||||
const size_t delayed_cleanup_interval_ms;
|
||||
const size_t boundary_alignment;
|
||||
const size_t background_download_threads;
|
||||
|
||||
Poco::Logger * log;
|
||||
|
||||
@ -180,9 +188,9 @@ private:
|
||||
*/
|
||||
BackgroundSchedulePool::TaskHolder cleanup_task;
|
||||
|
||||
void assertInitialized() const;
|
||||
std::vector<ThreadFromGlobalPool> download_threads;
|
||||
|
||||
size_t boundary_alignment;
|
||||
void assertInitialized() const;
|
||||
|
||||
void assertCacheCorrectness();
|
||||
|
||||
|
@ -28,4 +28,9 @@ FileCacheKey FileCacheKey::random()
|
||||
return FileCacheKey(UUIDHelpers::generateV4().toUnderType());
|
||||
}
|
||||
|
||||
FileCacheKey FileCacheKey::fromKeyString(const std::string & key_str)
|
||||
{
|
||||
return FileCacheKey(unhexUInt<UInt128>(key_str.data()));
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -21,6 +21,8 @@ struct FileCacheKey
|
||||
static FileCacheKey random();
|
||||
|
||||
bool operator==(const FileCacheKey & other) const { return key == other.key; }
|
||||
|
||||
static FileCacheKey fromKeyString(const std::string & key_str);
|
||||
};
|
||||
|
||||
using FileCacheKeyAndOffset = std::pair<FileCacheKey, size_t>;
|
||||
|
@ -31,10 +31,9 @@ void FileCacheSettings::loadFromConfig(const Poco::Util::AbstractConfiguration &
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Disk Cache requires non-empty `path` field (cache base path) in config");
|
||||
|
||||
max_elements = config.getUInt64(config_prefix + ".max_elements", FILECACHE_DEFAULT_MAX_ELEMENTS);
|
||||
|
||||
if (config.has(config_prefix + ".max_file_segment_size"))
|
||||
max_file_segment_size = parseWithSizeSuffix<uint64_t>(config.getString(config_prefix + ".max_file_segment_size"));
|
||||
else
|
||||
max_file_segment_size = FILECACHE_DEFAULT_MAX_FILE_SEGMENT_SIZE;
|
||||
|
||||
cache_on_write_operations = config.getUInt64(config_prefix + ".cache_on_write_operations", false);
|
||||
enable_filesystem_query_cache_limit = config.getUInt64(config_prefix + ".enable_filesystem_query_cache_limit", false);
|
||||
@ -44,10 +43,12 @@ void FileCacheSettings::loadFromConfig(const Poco::Util::AbstractConfiguration &
|
||||
|
||||
if (config.has(config_prefix + ".bypass_cache_threashold"))
|
||||
bypass_cache_threashold = parseWithSizeSuffix<uint64_t>(config.getString(config_prefix + ".bypass_cache_threashold"));
|
||||
else
|
||||
bypass_cache_threashold = FILECACHE_BYPASS_THRESHOLD;
|
||||
|
||||
boundary_alignment = config.getUInt64(config_prefix + ".boundary_alignment", DBMS_DEFAULT_BUFFER_SIZE);
|
||||
if (config.has(config_prefix + ".boundary_alignment"))
|
||||
boundary_alignment = parseWithSizeSuffix<uint64_t>(config.getString(config_prefix + ".boundary_alignment"));
|
||||
|
||||
if (config.has(config_prefix + ".background_download_threads"))
|
||||
background_download_threads = config.getUInt(config_prefix + ".background_download_threads");
|
||||
|
||||
delayed_cleanup_interval_ms = config.getUInt64(config_prefix + ".delayed_cleanup_interval_ms", FILECACHE_DELAYED_CLEANUP_INTERVAL_MS);
|
||||
}
|
||||
|
@ -26,7 +26,8 @@ struct FileCacheSettings
|
||||
size_t bypass_cache_threashold = FILECACHE_BYPASS_THRESHOLD;
|
||||
size_t delayed_cleanup_interval_ms = FILECACHE_DELAYED_CLEANUP_INTERVAL_MS;
|
||||
|
||||
size_t boundary_alignment = DBMS_DEFAULT_BUFFER_SIZE;
|
||||
size_t boundary_alignment = FILECACHE_DEFAULT_FILE_SEGMENT_ALIGNMENT;
|
||||
size_t background_download_threads = FILECACHE_DEFAULT_BACKGROUND_DOWNLOAD_THREADS;
|
||||
|
||||
void loadFromConfig(const Poco::Util::AbstractConfiguration & config, const std::string & config_prefix);
|
||||
};
|
||||
|
@ -4,7 +4,9 @@
|
||||
namespace DB
|
||||
{
|
||||
|
||||
static constexpr int FILECACHE_DEFAULT_MAX_FILE_SEGMENT_SIZE = 8 * 1024 * 1024;
|
||||
static constexpr int FILECACHE_DEFAULT_MAX_FILE_SEGMENT_SIZE = 32 * 1024 * 1024; /// 32Mi
|
||||
static constexpr int FILECACHE_DEFAULT_FILE_SEGMENT_ALIGNMENT = 4 * 1024 * 1024; /// 4Mi
|
||||
static constexpr int FILECACHE_DEFAULT_BACKGROUND_DOWNLOAD_THREADS = 2;
|
||||
static constexpr int FILECACHE_DEFAULT_MAX_ELEMENTS = 10000000;
|
||||
static constexpr int FILECACHE_DEFAULT_HITS_THRESHOLD = 0;
|
||||
static constexpr size_t FILECACHE_BYPASS_THRESHOLD = 256 * 1024 * 1024;
|
||||
|
@ -237,8 +237,10 @@ void FileSegment::resetDownloadingStateUnlocked(const FileSegmentGuard::Lock & l
|
||||
/// range().size() can equal 0 in case of write-though cache.
|
||||
if (!is_unbound && current_downloaded_size != 0 && current_downloaded_size == range().size())
|
||||
setDownloadedUnlocked(lock);
|
||||
else
|
||||
else if (current_downloaded_size)
|
||||
setDownloadState(State::PARTIALLY_DOWNLOADED, lock);
|
||||
else
|
||||
setDownloadState(State::EMPTY, lock);
|
||||
}
|
||||
|
||||
void FileSegment::resetDownloader()
|
||||
@ -303,22 +305,13 @@ void FileSegment::resetRemoteFileReader()
|
||||
|
||||
FileSegment::RemoteFileReaderPtr FileSegment::extractRemoteFileReader()
|
||||
{
|
||||
auto locked_key = lockKeyMetadata(false);
|
||||
if (!locked_key)
|
||||
auto lock = lockFileSegment();
|
||||
if (remote_file_reader && (download_state == State::DOWNLOADED
|
||||
|| download_state == State::PARTIALLY_DOWNLOADED_NO_CONTINUATION))
|
||||
{
|
||||
assert(isDetached());
|
||||
return std::move(remote_file_reader);
|
||||
}
|
||||
|
||||
auto segment_lock = lockFileSegment();
|
||||
|
||||
assert(download_state != State::DETACHED);
|
||||
|
||||
bool is_last_holder = locked_key->isLastOwnerOfFileSegment(offset());
|
||||
if (!downloader_id.empty() || !is_last_holder)
|
||||
return nullptr;
|
||||
|
||||
return std::move(remote_file_reader);
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
void FileSegment::setRemoteFileReader(RemoteFileReaderPtr remote_file_reader_)
|
||||
@ -635,16 +628,6 @@ void FileSegment::complete()
|
||||
resetDownloaderUnlocked(segment_lock);
|
||||
}
|
||||
|
||||
if (is_downloader || is_last_holder)
|
||||
{
|
||||
if (cache_writer)
|
||||
{
|
||||
cache_writer->finalize();
|
||||
cache_writer.reset();
|
||||
}
|
||||
remote_file_reader.reset();
|
||||
}
|
||||
|
||||
if (segment_kind == FileSegmentKind::Temporary && is_last_holder)
|
||||
{
|
||||
LOG_TEST(log, "Removing temporary file segment: {}", getInfoForLogUnlocked(segment_lock));
|
||||
@ -659,6 +642,7 @@ void FileSegment::complete()
|
||||
chassert(current_downloaded_size == range().size());
|
||||
chassert(current_downloaded_size == fs::file_size(getPathInLocalCache()));
|
||||
chassert(!cache_writer);
|
||||
chassert(!remote_file_reader);
|
||||
break;
|
||||
}
|
||||
case State::DOWNLOADING:
|
||||
@ -667,7 +651,33 @@ void FileSegment::complete()
|
||||
break;
|
||||
}
|
||||
case State::EMPTY:
|
||||
{
|
||||
if (is_last_holder)
|
||||
locked_key->removeFileSegment(offset(), segment_lock);
|
||||
break;
|
||||
}
|
||||
case State::PARTIALLY_DOWNLOADED:
|
||||
{
|
||||
chassert(current_downloaded_size > 0);
|
||||
|
||||
if (is_last_holder)
|
||||
{
|
||||
if (remote_file_reader)
|
||||
{
|
||||
LOG_TEST(
|
||||
log, "Submitting file segment for background download "
|
||||
"(having {}/{})", downloaded_size, range().size());
|
||||
|
||||
locked_key->addToDownloadQueue(offset(), segment_lock); /// Finish download in background.
|
||||
}
|
||||
else
|
||||
{
|
||||
locked_key->shrinkFileSegmentToDownloadedSize(offset(), segment_lock);
|
||||
setDetachedState(segment_lock); /// See comment below.
|
||||
}
|
||||
}
|
||||
break;
|
||||
}
|
||||
case State::PARTIALLY_DOWNLOADED_NO_CONTINUATION:
|
||||
{
|
||||
chassert(current_downloaded_size != range().size());
|
||||
@ -676,7 +686,6 @@ void FileSegment::complete()
|
||||
{
|
||||
if (current_downloaded_size == 0)
|
||||
{
|
||||
LOG_TEST(log, "Remove file segment {} (nothing downloaded)", range().toString());
|
||||
locked_key->removeFileSegment(offset(), segment_lock);
|
||||
}
|
||||
else
|
||||
@ -698,9 +707,8 @@ void FileSegment::complete()
|
||||
|
||||
/// We mark current file segment with state DETACHED, even though the data is still in cache
|
||||
/// (but a separate file segment) because is_last_holder is satisfied, so it does not matter.
|
||||
setDetachedState(segment_lock);
|
||||
}
|
||||
|
||||
setDetachedState(segment_lock);
|
||||
}
|
||||
break;
|
||||
}
|
||||
@ -870,6 +878,8 @@ void FileSegment::setDetachedState(const FileSegmentGuard::Lock & lock)
|
||||
key_metadata.reset();
|
||||
cache = nullptr;
|
||||
queue_iterator = nullptr;
|
||||
cache_writer.reset();
|
||||
remote_file_reader.reset();
|
||||
}
|
||||
|
||||
void FileSegment::detach(const FileSegmentGuard::Lock & lock, const LockedKey &)
|
||||
|
@ -1,12 +1,18 @@
|
||||
#include <Interpreters/Cache/Metadata.h>
|
||||
#include <Interpreters/Cache/FileCache.h>
|
||||
#include <Interpreters/Cache/FileSegment.h>
|
||||
#include "Common/Exception.h"
|
||||
#include <Common/logger_useful.h>
|
||||
#include <Common/ElapsedTimeProfileEventIncrement.h>
|
||||
#include <filesystem>
|
||||
|
||||
namespace fs = std::filesystem;
|
||||
|
||||
namespace CurrentMetrics
|
||||
{
|
||||
extern const Metric FilesystemCacheDownloadQueueElements;
|
||||
}
|
||||
|
||||
namespace ProfileEvents
|
||||
{
|
||||
extern const Event FilesystemCacheLockKeyMicroseconds;
|
||||
@ -19,6 +25,7 @@ namespace DB
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int LOGICAL_ERROR;
|
||||
extern const int BAD_ARGUMENTS;
|
||||
}
|
||||
|
||||
FileSegmentMetadata::FileSegmentMetadata(FileSegmentPtr && file_segment_)
|
||||
@ -53,11 +60,13 @@ KeyMetadata::KeyMetadata(
|
||||
const Key & key_,
|
||||
const std::string & key_path_,
|
||||
CleanupQueue & cleanup_queue_,
|
||||
DownloadQueue & download_queue_,
|
||||
Poco::Logger * log_,
|
||||
bool created_base_directory_)
|
||||
: key(key_)
|
||||
, key_path(key_path_)
|
||||
, cleanup_queue(cleanup_queue_)
|
||||
, download_queue(download_queue_)
|
||||
, created_base_directory(created_base_directory_)
|
||||
, log(log_)
|
||||
{
|
||||
@ -134,6 +143,7 @@ private:
|
||||
CacheMetadata::CacheMetadata(const std::string & path_)
|
||||
: path(path_)
|
||||
, cleanup_queue(std::make_unique<CleanupQueue>())
|
||||
, download_queue(std::make_unique<DownloadQueue>())
|
||||
, log(&Poco::Logger::get("CacheMetadata"))
|
||||
{
|
||||
}
|
||||
@ -182,13 +192,15 @@ LockedKeyPtr CacheMetadata::lockKeyMetadata(
|
||||
if (it == end())
|
||||
{
|
||||
if (key_not_found_policy == KeyNotFoundPolicy::THROW)
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "No such key `{}` in cache", key);
|
||||
else if (key_not_found_policy == KeyNotFoundPolicy::THROW_LOGICAL)
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "No such key `{}` in cache", key);
|
||||
else if (key_not_found_policy == KeyNotFoundPolicy::RETURN_NULL)
|
||||
return nullptr;
|
||||
|
||||
it = emplace(
|
||||
key, std::make_shared<KeyMetadata>(
|
||||
key, getPathForKey(key), *cleanup_queue, log, is_initial_load)).first;
|
||||
key, getPathForKey(key), *cleanup_queue, *download_queue, log, is_initial_load)).first;
|
||||
}
|
||||
|
||||
key_metadata = it->second;
|
||||
@ -206,6 +218,8 @@ LockedKeyPtr CacheMetadata::lockKeyMetadata(
|
||||
return locked_metadata;
|
||||
|
||||
if (key_not_found_policy == KeyNotFoundPolicy::THROW)
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "No such key `{}` in cache", key);
|
||||
else if (key_not_found_policy == KeyNotFoundPolicy::THROW_LOGICAL)
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "No such key `{}` in cache", key);
|
||||
|
||||
if (key_not_found_policy == KeyNotFoundPolicy::RETURN_NULL)
|
||||
@ -279,6 +293,7 @@ void CacheMetadata::doCleanup()
|
||||
continue;
|
||||
}
|
||||
|
||||
chassert(it->second->empty());
|
||||
locked_metadata->markAsRemoved();
|
||||
erase(it);
|
||||
LOG_DEBUG(log, "Key {} is removed from metadata", cleanup_key);
|
||||
@ -319,6 +334,199 @@ void CacheMetadata::doCleanup()
|
||||
}
|
||||
}
|
||||
|
||||
class DownloadQueue
|
||||
{
|
||||
friend struct CacheMetadata;
|
||||
public:
|
||||
void add(FileSegmentPtr file_segment)
|
||||
{
|
||||
{
|
||||
std::lock_guard lock(mutex);
|
||||
queue.emplace(file_segment->key(), file_segment->offset(), file_segment);
|
||||
}
|
||||
|
||||
CurrentMetrics::add(CurrentMetrics::FilesystemCacheDownloadQueueElements);
|
||||
cv.notify_one();
|
||||
}
|
||||
|
||||
private:
|
||||
void cancel()
|
||||
{
|
||||
{
|
||||
std::lock_guard lock(mutex);
|
||||
cancelled = true;
|
||||
}
|
||||
cv.notify_all();
|
||||
}
|
||||
|
||||
std::mutex mutex;
|
||||
std::condition_variable cv;
|
||||
bool cancelled = false;
|
||||
|
||||
struct DownloadInfo
|
||||
{
|
||||
CacheMetadata::Key key;
|
||||
size_t offset;
|
||||
/// We keep weak pointer to file segment
|
||||
/// instead of just getting it from file_segment_metadata,
|
||||
/// because file segment at key:offset count be removed and added back to metadata
|
||||
/// before we actually started background download.
|
||||
std::weak_ptr<FileSegment> file_segment;
|
||||
};
|
||||
std::queue<DownloadInfo> queue;
|
||||
};
|
||||
|
||||
void CacheMetadata::downloadThreadFunc()
|
||||
{
|
||||
std::optional<Memory<>> memory;
|
||||
while (true)
|
||||
{
|
||||
Key key;
|
||||
size_t offset;
|
||||
std::weak_ptr<FileSegment> file_segment_weak;
|
||||
|
||||
{
|
||||
std::unique_lock lock(download_queue->mutex);
|
||||
|
||||
if (download_queue->cancelled)
|
||||
return;
|
||||
|
||||
if (download_queue->queue.empty())
|
||||
{
|
||||
download_queue->cv.wait(lock);
|
||||
continue;
|
||||
}
|
||||
|
||||
auto entry = download_queue->queue.front();
|
||||
key = entry.key;
|
||||
offset = entry.offset;
|
||||
file_segment_weak = entry.file_segment;
|
||||
|
||||
download_queue->queue.pop();
|
||||
}
|
||||
|
||||
CurrentMetrics::sub(CurrentMetrics::FilesystemCacheDownloadQueueElements);
|
||||
|
||||
FileSegmentsHolderPtr holder;
|
||||
try
|
||||
{
|
||||
{
|
||||
auto locked_key = lockKeyMetadata(key, KeyNotFoundPolicy::RETURN_NULL);
|
||||
if (!locked_key)
|
||||
continue;
|
||||
|
||||
auto file_segment_metadata = locked_key->tryGetByOffset(offset);
|
||||
if (!file_segment_metadata || file_segment_metadata->evicting())
|
||||
continue;
|
||||
|
||||
auto file_segment = file_segment_weak.lock();
|
||||
|
||||
if (!file_segment
|
||||
|| file_segment != file_segment_metadata->file_segment
|
||||
|| file_segment->state() != FileSegment::State::PARTIALLY_DOWNLOADED)
|
||||
continue;
|
||||
|
||||
holder = std::make_unique<FileSegmentsHolder>(FileSegments{file_segment});
|
||||
}
|
||||
|
||||
downloadImpl(holder->front(), memory);
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
if (holder)
|
||||
{
|
||||
const auto & file_segment = holder->front();
|
||||
LOG_ERROR(
|
||||
log, "Error during background download of {}:{} ({}): {}",
|
||||
file_segment.key(), file_segment.offset(),
|
||||
file_segment.getInfoForLog(), getCurrentExceptionMessage(true));
|
||||
}
|
||||
else
|
||||
{
|
||||
tryLogCurrentException(__PRETTY_FUNCTION__);
|
||||
chassert(false);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void CacheMetadata::downloadImpl(FileSegment & file_segment, std::optional<Memory<>> & memory)
|
||||
{
|
||||
chassert(file_segment.assertCorrectness());
|
||||
|
||||
if (file_segment.getOrSetDownloader() != FileSegment::getCallerId())
|
||||
return;
|
||||
|
||||
if (file_segment.getDownloadedSize(false) == file_segment.range().size())
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "File segment is already fully downloaded");
|
||||
|
||||
LOG_TEST(
|
||||
log, "Downloading {} bytes for file segment {}",
|
||||
file_segment.range().size() - file_segment.getDownloadedSize(false), file_segment.getInfoForLog());
|
||||
|
||||
auto reader = file_segment.getRemoteFileReader();
|
||||
|
||||
if (!reader)
|
||||
{
|
||||
throw Exception(
|
||||
ErrorCodes::LOGICAL_ERROR, "No reader. "
|
||||
"File segment should not have been submitted for background download ({})",
|
||||
file_segment.getInfoForLog());
|
||||
}
|
||||
|
||||
/// If remote_fs_read_method == 'threadpool',
|
||||
/// reader itself never owns/allocates the buffer.
|
||||
if (reader->internalBuffer().empty())
|
||||
{
|
||||
if (!memory)
|
||||
memory.emplace(DBMS_DEFAULT_BUFFER_SIZE);
|
||||
reader->set(memory->data(), memory->size());
|
||||
}
|
||||
|
||||
size_t offset = file_segment.getCurrentWriteOffset(false);
|
||||
if (offset != static_cast<size_t>(reader->getPosition()))
|
||||
reader->seek(offset, SEEK_SET);
|
||||
|
||||
while (!reader->eof())
|
||||
{
|
||||
auto size = reader->available();
|
||||
|
||||
if (!file_segment.reserve(size))
|
||||
{
|
||||
LOG_TEST(
|
||||
log, "Failed to reserve space during background download "
|
||||
"for {}:{} (downloaded size: {}/{})",
|
||||
file_segment.key(), file_segment.offset(),
|
||||
file_segment.getDownloadedSize(false), file_segment.range().size());
|
||||
return;
|
||||
}
|
||||
|
||||
try
|
||||
{
|
||||
file_segment.write(reader->position(), size, offset);
|
||||
offset += size;
|
||||
reader->position() += size;
|
||||
}
|
||||
catch (ErrnoException & e)
|
||||
{
|
||||
int code = e.getErrno();
|
||||
if (code == /* No space left on device */28 || code == /* Quota exceeded */122)
|
||||
{
|
||||
LOG_INFO(log, "Insert into cache is skipped due to insufficient disk space. ({})", e.displayText());
|
||||
return;
|
||||
}
|
||||
throw;
|
||||
}
|
||||
}
|
||||
|
||||
LOG_TEST(log, "Downloaded file segment: {}", file_segment.getInfoForLog());
|
||||
}
|
||||
|
||||
void CacheMetadata::cancelDownload()
|
||||
{
|
||||
download_queue->cancel();
|
||||
}
|
||||
|
||||
LockedKey::LockedKey(std::shared_ptr<KeyMetadata> key_metadata_)
|
||||
: key_metadata(key_metadata_)
|
||||
, lock(key_metadata->guard.lock())
|
||||
@ -355,11 +563,11 @@ bool LockedKey::isLastOwnerOfFileSegment(size_t offset) const
|
||||
return file_segment_metadata->file_segment.use_count() == 2;
|
||||
}
|
||||
|
||||
void LockedKey::removeAllReleasable()
|
||||
void LockedKey::removeAll(bool if_releasable)
|
||||
{
|
||||
for (auto it = key_metadata->begin(); it != key_metadata->end();)
|
||||
{
|
||||
if (!it->second->releasable())
|
||||
if (if_releasable && !it->second->releasable())
|
||||
{
|
||||
++it;
|
||||
continue;
|
||||
@ -380,17 +588,32 @@ void LockedKey::removeAllReleasable()
|
||||
}
|
||||
}
|
||||
|
||||
KeyMetadata::iterator LockedKey::removeFileSegment(size_t offset)
|
||||
{
|
||||
auto it = key_metadata->find(offset);
|
||||
if (it == key_metadata->end())
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "There is no offset {}", offset);
|
||||
|
||||
auto file_segment = it->second->file_segment;
|
||||
return removeFileSegmentImpl(it, file_segment->lock());
|
||||
}
|
||||
|
||||
KeyMetadata::iterator LockedKey::removeFileSegment(size_t offset, const FileSegmentGuard::Lock & segment_lock)
|
||||
{
|
||||
auto it = key_metadata->find(offset);
|
||||
if (it == key_metadata->end())
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "There is no offset {}", offset);
|
||||
|
||||
return removeFileSegmentImpl(it, segment_lock);
|
||||
}
|
||||
|
||||
KeyMetadata::iterator LockedKey::removeFileSegmentImpl(KeyMetadata::iterator it, const FileSegmentGuard::Lock & segment_lock)
|
||||
{
|
||||
auto file_segment = it->second->file_segment;
|
||||
|
||||
LOG_DEBUG(
|
||||
key_metadata->log, "Remove from cache. Key: {}, offset: {}, size: {}",
|
||||
getKey(), offset, file_segment->reserved_size);
|
||||
getKey(), file_segment->offset(), file_segment->reserved_size);
|
||||
|
||||
chassert(file_segment->assertCorrectnessUnlocked(segment_lock));
|
||||
|
||||
@ -456,6 +679,14 @@ void LockedKey::shrinkFileSegmentToDownloadedSize(
|
||||
chassert(file_segment->assertCorrectnessUnlocked(segment_lock));
|
||||
}
|
||||
|
||||
void LockedKey::addToDownloadQueue(size_t offset, const FileSegmentGuard::Lock &)
|
||||
{
|
||||
auto it = key_metadata->find(offset);
|
||||
if (it == key_metadata->end())
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "There is not offset {}", offset);
|
||||
key_metadata->download_queue.add(it->second->file_segment);
|
||||
}
|
||||
|
||||
std::optional<FileSegment::Range> LockedKey::hasIntersectingRange(const FileSegment::Range & range) const
|
||||
{
|
||||
if (key_metadata->empty())
|
||||
|
@ -8,8 +8,12 @@
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
class CleanupQueue;
|
||||
using CleanupQueuePtr = std::shared_ptr<CleanupQueue>;
|
||||
class DownloadQueue;
|
||||
using DownloadQueuePtr = std::shared_ptr<DownloadQueue>;
|
||||
using FileSegmentsHolderPtr = std::unique_ptr<FileSegmentsHolder>;
|
||||
|
||||
|
||||
struct FileSegmentMetadata : private boost::noncopyable
|
||||
@ -44,6 +48,7 @@ struct KeyMetadata : public std::map<size_t, FileSegmentMetadataPtr>,
|
||||
const Key & key_,
|
||||
const std::string & key_path_,
|
||||
CleanupQueue & cleanup_queue_,
|
||||
DownloadQueue & download_queue_,
|
||||
Poco::Logger * log_,
|
||||
bool created_base_directory_ = false);
|
||||
|
||||
@ -70,6 +75,7 @@ private:
|
||||
KeyState key_state = KeyState::ACTIVE;
|
||||
KeyGuard guard;
|
||||
CleanupQueue & cleanup_queue;
|
||||
DownloadQueue & download_queue;
|
||||
std::atomic<bool> created_base_directory = false;
|
||||
Poco::Logger * log;
|
||||
};
|
||||
@ -81,7 +87,7 @@ struct CacheMetadata : public std::unordered_map<FileCacheKey, KeyMetadataPtr>,
|
||||
{
|
||||
public:
|
||||
using Key = FileCacheKey;
|
||||
using IterateCacheMetadataFunc = std::function<void(const LockedKey &)>;
|
||||
using IterateCacheMetadataFunc = std::function<void(LockedKey &)>;
|
||||
|
||||
explicit CacheMetadata(const std::string & path_);
|
||||
|
||||
@ -100,6 +106,7 @@ public:
|
||||
enum class KeyNotFoundPolicy
|
||||
{
|
||||
THROW,
|
||||
THROW_LOGICAL,
|
||||
CREATE_EMPTY,
|
||||
RETURN_NULL,
|
||||
};
|
||||
@ -111,12 +118,19 @@ public:
|
||||
|
||||
void doCleanup();
|
||||
|
||||
void downloadThreadFunc();
|
||||
|
||||
void cancelDownload();
|
||||
|
||||
private:
|
||||
CacheMetadataGuard::Lock lockMetadata() const;
|
||||
const std::string path; /// Cache base path
|
||||
mutable CacheMetadataGuard guard;
|
||||
const CleanupQueuePtr cleanup_queue;
|
||||
const DownloadQueuePtr download_queue;
|
||||
Poco::Logger * log;
|
||||
|
||||
void downloadImpl(FileSegment & file_segment, std::optional<Memory<>> & memory);
|
||||
};
|
||||
|
||||
|
||||
@ -156,12 +170,15 @@ struct LockedKey : private boost::noncopyable
|
||||
std::shared_ptr<const KeyMetadata> getKeyMetadata() const { return key_metadata; }
|
||||
std::shared_ptr<KeyMetadata> getKeyMetadata() { return key_metadata; }
|
||||
|
||||
void removeAllReleasable();
|
||||
void removeAll(bool if_releasable = true);
|
||||
|
||||
KeyMetadata::iterator removeFileSegment(size_t offset, const FileSegmentGuard::Lock &);
|
||||
KeyMetadata::iterator removeFileSegment(size_t offset);
|
||||
|
||||
void shrinkFileSegmentToDownloadedSize(size_t offset, const FileSegmentGuard::Lock &);
|
||||
|
||||
void addToDownloadQueue(size_t offset, const FileSegmentGuard::Lock &);
|
||||
|
||||
bool isLastOwnerOfFileSegment(size_t offset) const;
|
||||
|
||||
std::optional<FileSegment::Range> hasIntersectingRange(const FileSegment::Range & range) const;
|
||||
@ -173,6 +190,8 @@ struct LockedKey : private boost::noncopyable
|
||||
std::string toString() const;
|
||||
|
||||
private:
|
||||
KeyMetadata::iterator removeFileSegmentImpl(KeyMetadata::iterator it, const FileSegmentGuard::Lock &);
|
||||
|
||||
const std::shared_ptr<KeyMetadata> key_metadata;
|
||||
KeyGuard::Lock lock; /// `lock` must be destructed before `key_metadata`.
|
||||
};
|
||||
|
@ -40,6 +40,8 @@ NamesAndTypesList FilesystemCacheLogElement::getNamesAndTypes()
|
||||
{"source_file_path", std::make_shared<DataTypeString>()},
|
||||
{"file_segment_range", std::make_shared<DataTypeTuple>(types)},
|
||||
{"total_requested_range", std::make_shared<DataTypeTuple>(types)},
|
||||
{"key", std::make_shared<DataTypeString>()},
|
||||
{"offset", std::make_shared<DataTypeUInt64>()},
|
||||
{"size", std::make_shared<DataTypeUInt64>()},
|
||||
{"read_type", std::make_shared<DataTypeString>()},
|
||||
{"read_from_cache_attempted", std::make_shared<DataTypeUInt8>()},
|
||||
@ -60,6 +62,8 @@ void FilesystemCacheLogElement::appendToBlock(MutableColumns & columns) const
|
||||
columns[i++]->insert(source_file_path);
|
||||
columns[i++]->insert(Tuple{file_segment_range.first, file_segment_range.second});
|
||||
columns[i++]->insert(Tuple{requested_range.first, requested_range.second});
|
||||
columns[i++]->insert(file_segment_key);
|
||||
columns[i++]->insert(file_segment_offset);
|
||||
columns[i++]->insert(file_segment_size);
|
||||
columns[i++]->insert(typeToString(cache_type));
|
||||
columns[i++]->insert(read_from_cache_attempted);
|
||||
|
@ -39,6 +39,8 @@ struct FilesystemCacheLogElement
|
||||
std::pair<size_t, size_t> file_segment_range{};
|
||||
std::pair<size_t, size_t> requested_range{};
|
||||
CacheType cache_type{};
|
||||
std::string file_segment_key;
|
||||
size_t file_segment_offset;
|
||||
size_t file_segment_size;
|
||||
bool read_from_cache_attempted;
|
||||
String read_buffer_id;
|
||||
|
@ -19,11 +19,15 @@ static Block getSampleBlock()
|
||||
ColumnWithTypeAndName{std::make_shared<DataTypeUInt64>(), "max_size"},
|
||||
ColumnWithTypeAndName{std::make_shared<DataTypeUInt64>(), "max_elements"},
|
||||
ColumnWithTypeAndName{std::make_shared<DataTypeUInt64>(), "max_file_segment_size"},
|
||||
ColumnWithTypeAndName{std::make_shared<DataTypeUInt64>(), "boundary_alignment"},
|
||||
ColumnWithTypeAndName{std::make_shared<DataTypeNumber<UInt8>>(), "cache_on_write_operations"},
|
||||
ColumnWithTypeAndName{std::make_shared<DataTypeNumber<UInt8>>(), "cache_hits_threshold"},
|
||||
ColumnWithTypeAndName{std::make_shared<DataTypeUInt64>(), "current_size"},
|
||||
ColumnWithTypeAndName{std::make_shared<DataTypeUInt64>(), "current_elements"},
|
||||
ColumnWithTypeAndName{std::make_shared<DataTypeString>(), "path"},
|
||||
ColumnWithTypeAndName{std::make_shared<DataTypeNumber<UInt64>>(), "delayed_cleanup_interval_ms"},
|
||||
ColumnWithTypeAndName{std::make_shared<DataTypeNumber<UInt64>>(), "background_download_threads"},
|
||||
ColumnWithTypeAndName{std::make_shared<DataTypeNumber<UInt64>>(), "enable_bypass_cache_with_threshold"},
|
||||
};
|
||||
return Block(columns);
|
||||
}
|
||||
@ -40,14 +44,19 @@ BlockIO InterpreterDescribeCacheQuery::execute()
|
||||
const auto & settings = cache_data.settings;
|
||||
const auto & cache = cache_data.cache;
|
||||
|
||||
res_columns[0]->insert(settings.max_size);
|
||||
res_columns[1]->insert(settings.max_elements);
|
||||
res_columns[2]->insert(settings.max_file_segment_size);
|
||||
res_columns[3]->insert(settings.cache_on_write_operations);
|
||||
res_columns[4]->insert(settings.cache_hits_threshold);
|
||||
res_columns[5]->insert(cache->getUsedCacheSize());
|
||||
res_columns[6]->insert(cache->getFileSegmentsNum());
|
||||
res_columns[7]->insert(cache->getBasePath());
|
||||
size_t i = 0;
|
||||
res_columns[i++]->insert(settings.max_size);
|
||||
res_columns[i++]->insert(settings.max_elements);
|
||||
res_columns[i++]->insert(settings.max_file_segment_size);
|
||||
res_columns[i++]->insert(settings.boundary_alignment);
|
||||
res_columns[i++]->insert(settings.cache_on_write_operations);
|
||||
res_columns[i++]->insert(settings.cache_hits_threshold);
|
||||
res_columns[i++]->insert(cache->getUsedCacheSize());
|
||||
res_columns[i++]->insert(cache->getFileSegmentsNum());
|
||||
res_columns[i++]->insert(cache->getBasePath());
|
||||
res_columns[i++]->insert(settings.delayed_cleanup_interval_ms);
|
||||
res_columns[i++]->insert(settings.background_download_threads);
|
||||
res_columns[i++]->insert(settings.enable_bypass_cache_with_threashold);
|
||||
|
||||
BlockIO res;
|
||||
size_t num_rows = res_columns[0]->size();
|
||||
|
@ -370,7 +370,18 @@ BlockIO InterpreterSystemQuery::execute()
|
||||
else
|
||||
{
|
||||
auto cache = FileCacheFactory::instance().getByName(query.filesystem_cache_name).cache;
|
||||
cache->removeAllReleasable();
|
||||
if (query.delete_key.empty())
|
||||
{
|
||||
cache->removeAllReleasable();
|
||||
}
|
||||
else
|
||||
{
|
||||
auto key = FileCacheKey::fromKeyString(query.delete_key);
|
||||
if (query.delete_offset.has_value())
|
||||
cache->removeFileSegment(key, query.delete_offset.value());
|
||||
else
|
||||
cache->removeKey(key);
|
||||
}
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
@ -544,8 +544,8 @@ TEST_F(FileCacheTest, get)
|
||||
cv.notify_one();
|
||||
|
||||
file_segment2.wait(file_segment2.range().left);
|
||||
ASSERT_TRUE(file_segment2.state() == DB::FileSegment::State::PARTIALLY_DOWNLOADED);
|
||||
ASSERT_TRUE(file_segment2.getOrSetDownloader() == DB::FileSegment::getCallerId());
|
||||
ASSERT_EQ(file_segment2.state(), DB::FileSegment::State::EMPTY);
|
||||
ASSERT_EQ(file_segment2.getOrSetDownloader(), DB::FileSegment::getCallerId());
|
||||
download(file_segment2);
|
||||
});
|
||||
|
||||
|
@ -210,7 +210,15 @@ void ASTSystemQuery::formatImpl(const FormatSettings & settings, FormatState &,
|
||||
else if (type == Type::DROP_FILESYSTEM_CACHE)
|
||||
{
|
||||
if (!filesystem_cache_name.empty())
|
||||
{
|
||||
settings.ostr << (settings.hilite ? hilite_none : "") << " " << filesystem_cache_name;
|
||||
if (!delete_key.empty())
|
||||
{
|
||||
settings.ostr << (settings.hilite ? hilite_none : "") << " KEY " << delete_key;
|
||||
if (delete_offset.has_value())
|
||||
settings.ostr << (settings.hilite ? hilite_none : "") << " OFFSET " << delete_offset.value();
|
||||
}
|
||||
}
|
||||
}
|
||||
else if (type == Type::UNFREEZE)
|
||||
{
|
||||
|
@ -107,6 +107,8 @@ public:
|
||||
UInt64 seconds{};
|
||||
|
||||
String filesystem_cache_name;
|
||||
std::string delete_key;
|
||||
std::optional<size_t> delete_offset;
|
||||
|
||||
String backup_name;
|
||||
|
||||
|
@ -405,7 +405,15 @@ bool ParserSystemQuery::parseImpl(IParser::Pos & pos, ASTPtr & node, Expected &
|
||||
ParserLiteral path_parser;
|
||||
ASTPtr ast;
|
||||
if (path_parser.parse(pos, ast, expected))
|
||||
{
|
||||
res->filesystem_cache_name = ast->as<ASTLiteral>()->value.safeGet<String>();
|
||||
if (ParserKeyword{"KEY"}.ignore(pos, expected) && ParserIdentifier().parse(pos, ast, expected))
|
||||
{
|
||||
res->delete_key = ast->as<ASTIdentifier>()->name();
|
||||
if (ParserKeyword{"OFFSET"}.ignore(pos, expected) && ParserLiteral().parse(pos, ast, expected))
|
||||
res->delete_offset = ast->as<ASTLiteral>()->value.safeGet<UInt64>();
|
||||
}
|
||||
}
|
||||
if (!parseQueryWithOnCluster(res, pos, expected))
|
||||
return false;
|
||||
break;
|
||||
|
@ -282,7 +282,7 @@ Chain buildPushingToViewsChain(
|
||||
auto * original_thread = current_thread;
|
||||
SCOPE_EXIT({ current_thread = original_thread; });
|
||||
|
||||
std::unique_ptr<ThreadStatus> view_thread_status_ptr = std::make_unique<ThreadStatus>();
|
||||
std::unique_ptr<ThreadStatus> view_thread_status_ptr = std::make_unique<ThreadStatus>(/*check_current_thread_on_destruction=*/ false);
|
||||
/// Copy of a ThreadStatus should be internal.
|
||||
view_thread_status_ptr->setInternalThread();
|
||||
view_thread_status_ptr->attachToGroup(running_group);
|
||||
|
@ -39,9 +39,8 @@ DistributedAsyncInsertHeader DistributedAsyncInsertHeader::read(ReadBufferFromFi
|
||||
if (expected_checksum != calculated_checksum)
|
||||
{
|
||||
throw Exception(ErrorCodes::CHECKSUM_DOESNT_MATCH,
|
||||
"Checksum of extra info doesn't match: corrupted data. Reference: {}{}. Actual: {}{}.",
|
||||
getHexUIntLowercase(expected_checksum.high64), getHexUIntLowercase(expected_checksum.low64),
|
||||
getHexUIntLowercase(calculated_checksum.high64), getHexUIntLowercase(calculated_checksum.low64));
|
||||
"Checksum of extra info doesn't match: corrupted data. Reference: {}. Actual: {}.",
|
||||
getHexUIntLowercase(expected_checksum), getHexUIntLowercase(calculated_checksum));
|
||||
}
|
||||
|
||||
/// Read the parts of the header.
|
||||
|
@ -80,6 +80,11 @@ DataPartStorageIteratorPtr DataPartStorageOnDiskFull::iterate() const
|
||||
volume->getDisk()->iterateDirectory(fs::path(root_path) / part_dir));
|
||||
}
|
||||
|
||||
Poco::Timestamp DataPartStorageOnDiskFull::getFileLastModified(const String & file_name) const
|
||||
{
|
||||
return volume->getDisk()->getLastModified(fs::path(root_path) / part_dir / file_name);
|
||||
}
|
||||
|
||||
size_t DataPartStorageOnDiskFull::getFileSize(const String & file_name) const
|
||||
{
|
||||
return volume->getDisk()->getFileSize(fs::path(root_path) / part_dir / file_name);
|
||||
|
@ -20,6 +20,7 @@ public:
|
||||
bool isDirectory(const std::string & name) const override;
|
||||
|
||||
DataPartStorageIteratorPtr iterate() const override;
|
||||
Poco::Timestamp getFileLastModified(const String & file_name) const override;
|
||||
size_t getFileSize(const std::string & file_name) const override;
|
||||
UInt32 getRefCount(const std::string & file_name) const override;
|
||||
std::string getRemotePath(const std::string & file_name) const override;
|
||||
|
@ -122,6 +122,7 @@ public:
|
||||
virtual DataPartStorageIteratorPtr iterate() const = 0;
|
||||
|
||||
/// Get metadata for a file inside path dir.
|
||||
virtual Poco::Timestamp getFileLastModified(const std::string & file_name) const = 0;
|
||||
virtual size_t getFileSize(const std::string & file_name) const = 0;
|
||||
virtual UInt32 getRefCount(const std::string & file_name) const = 0;
|
||||
|
||||
|
@ -116,6 +116,8 @@ public:
|
||||
/// Otherwise return information about column size on disk.
|
||||
ColumnSize getColumnSize(const String & column_name) const;
|
||||
|
||||
virtual std::optional<time_t> getColumnModificationTime(const String & column_name) const = 0;
|
||||
|
||||
/// NOTE: Returns zeros if secondary indexes are not found in checksums.
|
||||
/// Otherwise return information about secondary index size on disk.
|
||||
IndexSize getSecondaryIndexSize(const String & secondary_index_name) const;
|
||||
|
@ -4529,9 +4529,8 @@ MergeTreeData::DataPartPtr MergeTreeData::getActiveContainingPart(
|
||||
}
|
||||
|
||||
|
||||
void MergeTreeData::swapActivePart(MergeTreeData::DataPartPtr part_copy)
|
||||
void MergeTreeData::swapActivePart(MergeTreeData::DataPartPtr part_copy, DataPartsLock &)
|
||||
{
|
||||
auto lock = lockParts();
|
||||
for (auto original_active_part : getDataPartsStateRange(DataPartState::Active)) // NOLINT (copy is intended)
|
||||
{
|
||||
if (part_copy->name == original_active_part->name)
|
||||
@ -4587,6 +4586,12 @@ MergeTreeData::DataPartPtr MergeTreeData::getActiveContainingPart(const String &
|
||||
return getActiveContainingPart(part_info);
|
||||
}
|
||||
|
||||
MergeTreeData::DataPartPtr MergeTreeData::getActiveContainingPart(const String & part_name, DataPartsLock & lock) const
|
||||
{
|
||||
auto part_info = MergeTreePartInfo::fromPartName(part_name, format_version);
|
||||
return getActiveContainingPart(part_info, DataPartState::Active, lock);
|
||||
}
|
||||
|
||||
MergeTreeData::DataPartsVector MergeTreeData::getVisibleDataPartsVectorInPartition(ContextPtr local_context, const String & partition_id) const
|
||||
{
|
||||
return getVisibleDataPartsVectorInPartition(local_context->getCurrentTransaction().get(), partition_id);
|
||||
|
@ -504,12 +504,13 @@ public:
|
||||
|
||||
/// Returns a part in Active state with the given name or a part containing it. If there is no such part, returns nullptr.
|
||||
DataPartPtr getActiveContainingPart(const String & part_name) const;
|
||||
DataPartPtr getActiveContainingPart(const String & part_name, DataPartsLock & lock) const;
|
||||
DataPartPtr getActiveContainingPart(const MergeTreePartInfo & part_info) const;
|
||||
DataPartPtr getActiveContainingPart(const MergeTreePartInfo & part_info, DataPartState state, DataPartsLock & lock) const;
|
||||
|
||||
/// Swap part with it's identical copy (possible with another path on another disk).
|
||||
/// If original part is not active or doesn't exist exception will be thrown.
|
||||
void swapActivePart(MergeTreeData::DataPartPtr part_copy);
|
||||
void swapActivePart(MergeTreeData::DataPartPtr part_copy, DataPartsLock &);
|
||||
|
||||
/// Returns all parts in specified partition
|
||||
DataPartsVector getVisibleDataPartsVectorInPartition(MergeTreeTransaction * txn, const String & partition_id, DataPartsLock * acquired_lock = nullptr) const;
|
||||
|
@ -307,19 +307,7 @@ static void updateHash(SipHash & hash, const std::string & data)
|
||||
/// Hash is the same as MinimalisticDataPartChecksums::hash_of_all_files
|
||||
String MergeTreeDataPartChecksums::getTotalChecksumHex() const
|
||||
{
|
||||
SipHash hash_of_all_files;
|
||||
|
||||
for (const auto & [name, checksum] : files)
|
||||
{
|
||||
updateHash(hash_of_all_files, name);
|
||||
hash_of_all_files.update(checksum.file_hash);
|
||||
}
|
||||
|
||||
UInt64 lo;
|
||||
UInt64 hi;
|
||||
hash_of_all_files.get128(lo, hi);
|
||||
|
||||
return getHexUIntUppercase(hi) + getHexUIntUppercase(lo);
|
||||
return getHexUIntUppercase(getTotalChecksumUInt128());
|
||||
}
|
||||
|
||||
MergeTreeDataPartChecksums::Checksum::uint128 MergeTreeDataPartChecksums::getTotalChecksumUInt128() const
|
||||
|
@ -144,6 +144,11 @@ bool MergeTreeDataPartCompact::hasColumnFiles(const NameAndTypePair & column) co
|
||||
return (bin_checksum != checksums.files.end() && mrk_checksum != checksums.files.end());
|
||||
}
|
||||
|
||||
std::optional<time_t> MergeTreeDataPartCompact::getColumnModificationTime(const String & /* column_name */) const
|
||||
{
|
||||
return getDataPartStorage().getFileLastModified(DATA_FILE_NAME_WITH_EXTENSION).epochTime();
|
||||
}
|
||||
|
||||
void MergeTreeDataPartCompact::checkConsistency(bool require_part_metadata) const
|
||||
{
|
||||
checkConsistencyBase();
|
||||
|
@ -55,6 +55,8 @@ public:
|
||||
|
||||
bool hasColumnFiles(const NameAndTypePair & column) const override;
|
||||
|
||||
std::optional<time_t> getColumnModificationTime(const String & column_name) const override;
|
||||
|
||||
String getFileNameForColumn(const NameAndTypePair & /* column */) const override { return DATA_FILE_NAME; }
|
||||
|
||||
~MergeTreeDataPartCompact() override;
|
||||
|
@ -43,6 +43,7 @@ public:
|
||||
String getFileNameForColumn(const NameAndTypePair & /* column */) const override { return ""; }
|
||||
void renameTo(const String & new_relative_path, bool remove_new_dir_if_exists) override;
|
||||
DataPartStoragePtr makeCloneInDetached(const String & prefix, const StorageMetadataPtr & metadata_snapshot) const override;
|
||||
std::optional<time_t> getColumnModificationTime(const String & /* column_name */) const override { return {}; }
|
||||
|
||||
MutableDataPartStoragePtr flushToDisk(const String & new_relative_path, const StorageMetadataPtr & metadata_snapshot) const;
|
||||
|
||||
|
@ -260,6 +260,18 @@ bool MergeTreeDataPartWide::hasColumnFiles(const NameAndTypePair & column) const
|
||||
return res;
|
||||
}
|
||||
|
||||
std::optional<time_t> MergeTreeDataPartWide::getColumnModificationTime(const String & column_name) const
|
||||
{
|
||||
try
|
||||
{
|
||||
return getDataPartStorage().getFileLastModified(column_name + DATA_FILE_EXTENSION).epochTime();
|
||||
}
|
||||
catch (const fs::filesystem_error &)
|
||||
{
|
||||
return {};
|
||||
}
|
||||
}
|
||||
|
||||
String MergeTreeDataPartWide::getFileNameForColumn(const NameAndTypePair & column) const
|
||||
{
|
||||
String filename;
|
||||
|
@ -54,6 +54,8 @@ public:
|
||||
|
||||
bool hasColumnFiles(const NameAndTypePair & column) const override;
|
||||
|
||||
std::optional<time_t> getColumnModificationTime(const String & column_name) const override;
|
||||
|
||||
protected:
|
||||
static void loadIndexGranularityImpl(
|
||||
MergeTreeIndexGranularity & index_granularity_, MergeTreeIndexGranularityInfo & index_granularity_info_,
|
||||
|
@ -263,7 +263,10 @@ void MergeTreePartsMover::swapClonedPart(TemporaryClonedPart & cloned_part) cons
|
||||
if (moves_blocker.isCancelled())
|
||||
throw Exception(ErrorCodes::ABORTED, "Cancelled moving parts.");
|
||||
|
||||
auto active_part = data->getActiveContainingPart(cloned_part.part->name);
|
||||
/// `getActiveContainingPart` and `swapActivePart` are called under the same lock
|
||||
/// to prevent part becoming inactive between calls
|
||||
auto part_lock = data->lockParts();
|
||||
auto active_part = data->getActiveContainingPart(cloned_part.part->name, part_lock);
|
||||
|
||||
/// It's ok, because we don't block moving parts for merges or mutations
|
||||
if (!active_part || active_part->name != cloned_part.part->name)
|
||||
@ -284,7 +287,7 @@ void MergeTreePartsMover::swapClonedPart(TemporaryClonedPart & cloned_part) cons
|
||||
cloned_part.part->renameTo(active_part->name, false);
|
||||
|
||||
/// TODO what happen if server goes down here?
|
||||
data->swapActivePart(cloned_part.part);
|
||||
data->swapActivePart(cloned_part.part, part_lock);
|
||||
|
||||
LOG_TRACE(log, "Part {} was moved to {}", cloned_part.part->name, cloned_part.part->getDataPartStorage().getFullPath());
|
||||
|
||||
|
@ -250,8 +250,8 @@ std::unordered_map<String, IPartMetadataManager::uint128> PartMetadataManagerWit
|
||||
ErrorCodes::CORRUPTED_DATA,
|
||||
"Checksums doesn't match in part {} for {}. Expected: {}. Found {}.",
|
||||
part->name, file_path,
|
||||
getHexUIntUppercase(disk_checksum.high64) + getHexUIntUppercase(disk_checksum.low64),
|
||||
getHexUIntUppercase(cache_checksums[i].high64) + getHexUIntUppercase(cache_checksums[i].low64));
|
||||
getHexUIntUppercase(disk_checksum),
|
||||
getHexUIntUppercase(cache_checksums[i]));
|
||||
|
||||
disk_checksums.push_back(disk_checksum);
|
||||
continue;
|
||||
@ -287,8 +287,8 @@ std::unordered_map<String, IPartMetadataManager::uint128> PartMetadataManagerWit
|
||||
ErrorCodes::CORRUPTED_DATA,
|
||||
"Checksums doesn't match in projection part {} {}. Expected: {}. Found {}.",
|
||||
part->name, proj_name,
|
||||
getHexUIntUppercase(disk_checksum.high64) + getHexUIntUppercase(disk_checksum.low64),
|
||||
getHexUIntUppercase(cache_checksums[i].high64) + getHexUIntUppercase(cache_checksums[i].low64));
|
||||
getHexUIntUppercase(disk_checksum),
|
||||
getHexUIntUppercase(cache_checksums[i]));
|
||||
disk_checksums.push_back(disk_checksum);
|
||||
}
|
||||
return results;
|
||||
|
@ -2900,8 +2900,7 @@ void StorageReplicatedMergeTree::cloneReplica(const String & source_replica, Coo
|
||||
desired_checksums = MinimalisticDataPartChecksums::deserializeFrom(desired_checksums_str);
|
||||
}
|
||||
|
||||
const auto [lo, hi] = desired_checksums.hash_of_all_files;
|
||||
log_entry.part_checksum = getHexUIntUppercase(hi) + getHexUIntUppercase(lo);
|
||||
log_entry.part_checksum = getHexUIntUppercase(desired_checksums.hash_of_all_files);
|
||||
}
|
||||
else
|
||||
{
|
||||
|
@ -252,17 +252,17 @@ void StorageSystemParts::processNextStorage(
|
||||
if (columns_mask[src_index++])
|
||||
{
|
||||
auto checksum = helper.hash_of_all_files;
|
||||
columns[res_index++]->insert(getHexUIntLowercase(checksum.high64) + getHexUIntLowercase(checksum.low64));
|
||||
columns[res_index++]->insert(getHexUIntLowercase(checksum));
|
||||
}
|
||||
if (columns_mask[src_index++])
|
||||
{
|
||||
auto checksum = helper.hash_of_uncompressed_files;
|
||||
columns[res_index++]->insert(getHexUIntLowercase(checksum.high64) + getHexUIntLowercase(checksum.low64));
|
||||
columns[res_index++]->insert(getHexUIntLowercase(checksum));
|
||||
}
|
||||
if (columns_mask[src_index++])
|
||||
{
|
||||
auto checksum = helper.uncompressed_hash_of_compressed_files;
|
||||
columns[res_index++]->insert(getHexUIntLowercase(checksum.high64) + getHexUIntLowercase(checksum.low64));
|
||||
columns[res_index++]->insert(getHexUIntLowercase(checksum));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -8,6 +8,7 @@
|
||||
#include <DataTypes/DataTypeDate.h>
|
||||
#include <DataTypes/DataTypeArray.h>
|
||||
#include <DataTypes/DataTypeNested.h>
|
||||
#include <DataTypes/DataTypeNullable.h>
|
||||
#include <DataTypes/NestedUtils.h>
|
||||
#include <DataTypes/DataTypeUUID.h>
|
||||
#include <Storages/VirtualColumnUtils.h>
|
||||
@ -62,6 +63,8 @@ StorageSystemPartsColumns::StorageSystemPartsColumns(const StorageID & table_id_
|
||||
{"column_data_compressed_bytes", std::make_shared<DataTypeUInt64>()},
|
||||
{"column_data_uncompressed_bytes", std::make_shared<DataTypeUInt64>()},
|
||||
{"column_marks_bytes", std::make_shared<DataTypeUInt64>()},
|
||||
{"column_modification_time", std::make_shared<DataTypeNullable>(std::make_shared<DataTypeDateTime>())},
|
||||
|
||||
{"serialization_kind", std::make_shared<DataTypeString>()},
|
||||
{"subcolumns.names", std::make_shared<DataTypeArray>(std::make_shared<DataTypeString>())},
|
||||
{"subcolumns.types", std::make_shared<DataTypeArray>(std::make_shared<DataTypeString>())},
|
||||
@ -235,6 +238,13 @@ void StorageSystemPartsColumns::processNextStorage(
|
||||
columns[res_index++]->insert(column_size.data_uncompressed);
|
||||
if (columns_mask[src_index++])
|
||||
columns[res_index++]->insert(column_size.marks);
|
||||
if (columns_mask[src_index++])
|
||||
{
|
||||
if (auto column_modification_time = part->getColumnModificationTime(column.name))
|
||||
columns[res_index++]->insert(UInt64(column_modification_time.value()));
|
||||
else
|
||||
columns[res_index++]->insertDefault();
|
||||
}
|
||||
|
||||
auto serialization = part->getSerialization(column.name);
|
||||
if (columns_mask[src_index++])
|
||||
|
@ -221,17 +221,17 @@ void StorageSystemProjectionParts::processNextStorage(
|
||||
if (columns_mask[src_index++])
|
||||
{
|
||||
auto checksum = helper.hash_of_all_files;
|
||||
columns[res_index++]->insert(getHexUIntLowercase(checksum.high64) + getHexUIntLowercase(checksum.low64));
|
||||
columns[res_index++]->insert(getHexUIntLowercase(checksum));
|
||||
}
|
||||
if (columns_mask[src_index++])
|
||||
{
|
||||
auto checksum = helper.hash_of_uncompressed_files;
|
||||
columns[res_index++]->insert(getHexUIntLowercase(checksum.high64) + getHexUIntLowercase(checksum.low64));
|
||||
columns[res_index++]->insert(getHexUIntLowercase(checksum));
|
||||
}
|
||||
if (columns_mask[src_index++])
|
||||
{
|
||||
auto checksum = helper.uncompressed_hash_of_compressed_files;
|
||||
columns[res_index++]->insert(getHexUIntLowercase(checksum.high64) + getHexUIntLowercase(checksum.low64));
|
||||
columns[res_index++]->insert(getHexUIntLowercase(checksum));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -7,6 +7,7 @@
|
||||
#include <DataTypes/DataTypeDateTime.h>
|
||||
#include <DataTypes/DataTypeDate.h>
|
||||
#include <DataTypes/DataTypeUUID.h>
|
||||
#include <DataTypes/DataTypeNullable.h>
|
||||
#include <Storages/VirtualColumnUtils.h>
|
||||
#include <Databases/IDatabase.h>
|
||||
#include <Parsers/queryToString.h>
|
||||
@ -66,7 +67,8 @@ StorageSystemProjectionPartsColumns::StorageSystemProjectionPartsColumns(const S
|
||||
{"column_bytes_on_disk", std::make_shared<DataTypeUInt64>()},
|
||||
{"column_data_compressed_bytes", std::make_shared<DataTypeUInt64>()},
|
||||
{"column_data_uncompressed_bytes", std::make_shared<DataTypeUInt64>()},
|
||||
{"column_marks_bytes", std::make_shared<DataTypeUInt64>()}
|
||||
{"column_marks_bytes", std::make_shared<DataTypeUInt64>()},
|
||||
{"column_modification_time", std::make_shared<DataTypeNullable>(std::make_shared<DataTypeDateTime>())},
|
||||
}
|
||||
)
|
||||
{
|
||||
@ -247,6 +249,13 @@ void StorageSystemProjectionPartsColumns::processNextStorage(
|
||||
columns[res_index++]->insert(column_size.data_uncompressed);
|
||||
if (columns_mask[src_index++])
|
||||
columns[res_index++]->insert(column_size.marks);
|
||||
if (columns_mask[src_index++])
|
||||
{
|
||||
if (auto column_modification_time = part->getColumnModificationTime(column.name))
|
||||
columns[res_index++]->insert(UInt64(column_modification_time.value()));
|
||||
else
|
||||
columns[res_index++]->insertDefault();
|
||||
}
|
||||
|
||||
if (has_state_column)
|
||||
columns[res_index++]->insert(part->stateString());
|
||||
|
@ -7,7 +7,7 @@ from typing import Dict, List, Literal, Optional, Union
|
||||
import logging
|
||||
|
||||
from github import Github
|
||||
from github.GithubObject import _NotSetType, NotSet as NotSet # type: ignore
|
||||
from github.GithubObject import _NotSetType, NotSet as NotSet
|
||||
from github.Commit import Commit
|
||||
from github.CommitStatus import CommitStatus
|
||||
from github.IssueComment import IssueComment
|
||||
|
@ -111,7 +111,7 @@ class GitHub(github.Github):
|
||||
# See https://github.com/PyGithub/PyGithub/issues/2202,
|
||||
# obj._rawData doesn't spend additional API requests
|
||||
# pylint: disable=protected-access
|
||||
repo_url = issue._rawData["repository_url"] # type: ignore
|
||||
repo_url = issue._rawData["repository_url"]
|
||||
if repo_url not in repos:
|
||||
repos[repo_url] = issue.repository
|
||||
prs.append(
|
||||
|
@ -154,7 +154,7 @@ def get_workflows_for_head(repo: Repository, head_sha: str) -> List[WorkflowRun]
|
||||
return list(
|
||||
PaginatedList(
|
||||
WorkflowRun,
|
||||
repo._requester, # type:ignore # pylint:disable=protected-access
|
||||
repo._requester, # pylint:disable=protected-access
|
||||
f"{repo.url}/actions/runs",
|
||||
{"head_sha": head_sha},
|
||||
list_item="workflow_runs",
|
||||
|
@ -5,4 +5,5 @@
|
||||
<!-- Default is 60 seconds, but let's make tests more aggressive -->
|
||||
<merge_tree_clear_old_temporary_directories_interval_seconds>5</merge_tree_clear_old_temporary_directories_interval_seconds>
|
||||
</merge_tree>
|
||||
<allow_remove_stale_moving_parts>true</allow_remove_stale_moving_parts>
|
||||
</clickhouse>
|
||||
|
@ -203,6 +203,9 @@ def update_configs(
|
||||
|
||||
|
||||
def test_stuck_replica(started_cluster):
|
||||
if NODES["node"].is_built_with_thread_sanitizer():
|
||||
pytest.skip("Hedged requests don't work under Thread Sanitizer")
|
||||
|
||||
update_configs()
|
||||
|
||||
cluster.pause_container("node_1")
|
||||
@ -233,6 +236,9 @@ def test_stuck_replica(started_cluster):
|
||||
|
||||
|
||||
def test_long_query(started_cluster):
|
||||
if NODES["node"].is_built_with_thread_sanitizer():
|
||||
pytest.skip("Hedged requests don't work under Thread Sanitizer")
|
||||
|
||||
update_configs()
|
||||
|
||||
# Restart to reset pool states.
|
||||
@ -249,12 +255,18 @@ def test_long_query(started_cluster):
|
||||
|
||||
|
||||
def test_send_table_status_sleep(started_cluster):
|
||||
if NODES["node"].is_built_with_thread_sanitizer():
|
||||
pytest.skip("Hedged requests don't work under Thread Sanitizer")
|
||||
|
||||
update_configs(node_1_sleep_in_send_tables_status=sleep_time)
|
||||
check_query(expected_replica="node_2")
|
||||
check_changing_replica_events(1)
|
||||
|
||||
|
||||
def test_send_table_status_sleep2(started_cluster):
|
||||
if NODES["node"].is_built_with_thread_sanitizer():
|
||||
pytest.skip("Hedged requests don't work under Thread Sanitizer")
|
||||
|
||||
update_configs(
|
||||
node_1_sleep_in_send_tables_status=sleep_time,
|
||||
node_2_sleep_in_send_tables_status=sleep_time,
|
||||
@ -264,12 +276,18 @@ def test_send_table_status_sleep2(started_cluster):
|
||||
|
||||
|
||||
def test_send_data(started_cluster):
|
||||
if NODES["node"].is_built_with_thread_sanitizer():
|
||||
pytest.skip("Hedged requests don't work under Thread Sanitizer")
|
||||
|
||||
update_configs(node_1_sleep_in_send_data=sleep_time)
|
||||
check_query(expected_replica="node_2")
|
||||
check_changing_replica_events(1)
|
||||
|
||||
|
||||
def test_send_data2(started_cluster):
|
||||
if NODES["node"].is_built_with_thread_sanitizer():
|
||||
pytest.skip("Hedged requests don't work under Thread Sanitizer")
|
||||
|
||||
update_configs(
|
||||
node_1_sleep_in_send_data=sleep_time, node_2_sleep_in_send_data=sleep_time
|
||||
)
|
||||
@ -278,6 +296,9 @@ def test_send_data2(started_cluster):
|
||||
|
||||
|
||||
def test_combination1(started_cluster):
|
||||
if NODES["node"].is_built_with_thread_sanitizer():
|
||||
pytest.skip("Hedged requests don't work under Thread Sanitizer")
|
||||
|
||||
update_configs(
|
||||
node_1_sleep_in_send_tables_status=sleep_time,
|
||||
node_2_sleep_in_send_data=sleep_time,
|
||||
@ -287,6 +308,9 @@ def test_combination1(started_cluster):
|
||||
|
||||
|
||||
def test_combination2(started_cluster):
|
||||
if NODES["node"].is_built_with_thread_sanitizer():
|
||||
pytest.skip("Hedged requests don't work under Thread Sanitizer")
|
||||
|
||||
update_configs(
|
||||
node_1_sleep_in_send_data=sleep_time,
|
||||
node_2_sleep_in_send_tables_status=sleep_time,
|
||||
@ -296,6 +320,9 @@ def test_combination2(started_cluster):
|
||||
|
||||
|
||||
def test_combination3(started_cluster):
|
||||
if NODES["node"].is_built_with_thread_sanitizer():
|
||||
pytest.skip("Hedged requests don't work under Thread Sanitizer")
|
||||
|
||||
update_configs(
|
||||
node_1_sleep_in_send_data=sleep_time,
|
||||
node_2_sleep_in_send_tables_status=1000,
|
||||
@ -306,6 +333,9 @@ def test_combination3(started_cluster):
|
||||
|
||||
|
||||
def test_combination4(started_cluster):
|
||||
if NODES["node"].is_built_with_thread_sanitizer():
|
||||
pytest.skip("Hedged requests don't work under Thread Sanitizer")
|
||||
|
||||
update_configs(
|
||||
node_1_sleep_in_send_tables_status=1000,
|
||||
node_1_sleep_in_send_data=sleep_time,
|
||||
@ -317,6 +347,9 @@ def test_combination4(started_cluster):
|
||||
|
||||
|
||||
def test_receive_timeout1(started_cluster):
|
||||
if NODES["node"].is_built_with_thread_sanitizer():
|
||||
pytest.skip("Hedged requests don't work under Thread Sanitizer")
|
||||
|
||||
# Check the situation when first two replicas get receive timeout
|
||||
# in establishing connection, but the third replica is ok.
|
||||
update_configs(
|
||||
@ -329,6 +362,9 @@ def test_receive_timeout1(started_cluster):
|
||||
|
||||
|
||||
def test_receive_timeout2(started_cluster):
|
||||
if NODES["node"].is_built_with_thread_sanitizer():
|
||||
pytest.skip("Hedged requests don't work under Thread Sanitizer")
|
||||
|
||||
# Check the situation when first replica get receive timeout
|
||||
# in packet receiving but there are replicas in process of
|
||||
# connection establishing.
|
||||
@ -342,6 +378,9 @@ def test_receive_timeout2(started_cluster):
|
||||
|
||||
|
||||
def test_initial_receive_timeout(started_cluster):
|
||||
if NODES["node"].is_built_with_thread_sanitizer():
|
||||
pytest.skip("Hedged requests don't work under Thread Sanitizer")
|
||||
|
||||
# Check the situation when replicas don't respond after
|
||||
# receiving query (so, no packets were send to initiator)
|
||||
update_configs(
|
||||
@ -360,6 +399,9 @@ def test_initial_receive_timeout(started_cluster):
|
||||
|
||||
|
||||
def test_async_connect(started_cluster):
|
||||
if NODES["node"].is_built_with_thread_sanitizer():
|
||||
pytest.skip("Hedged requests don't work under Thread Sanitizer")
|
||||
|
||||
update_configs()
|
||||
|
||||
NODES["node"].restart_clickhouse()
|
||||
@ -390,6 +432,9 @@ def test_async_connect(started_cluster):
|
||||
|
||||
|
||||
def test_async_query_sending(started_cluster):
|
||||
if NODES["node"].is_built_with_thread_sanitizer():
|
||||
pytest.skip("Hedged requests don't work under Thread Sanitizer")
|
||||
|
||||
update_configs(
|
||||
node_1_sleep_after_receiving_query=5000,
|
||||
node_2_sleep_after_receiving_query=5000,
|
||||
|
@ -172,6 +172,9 @@ def update_configs(
|
||||
|
||||
|
||||
def test_send_table_status_sleep(started_cluster):
|
||||
if NODES["node"].is_built_with_thread_sanitizer():
|
||||
pytest.skip("Hedged requests don't work under Thread Sanitizer")
|
||||
|
||||
update_configs(
|
||||
node_1_sleep_in_send_tables_status=sleep_time,
|
||||
node_2_sleep_in_send_tables_status=sleep_time,
|
||||
@ -181,6 +184,9 @@ def test_send_table_status_sleep(started_cluster):
|
||||
|
||||
|
||||
def test_send_data(started_cluster):
|
||||
if NODES["node"].is_built_with_thread_sanitizer():
|
||||
pytest.skip("Hedged requests don't work under Thread Sanitizer")
|
||||
|
||||
update_configs(
|
||||
node_1_sleep_in_send_data=sleep_time, node_2_sleep_in_send_data=sleep_time
|
||||
)
|
||||
@ -189,6 +195,9 @@ def test_send_data(started_cluster):
|
||||
|
||||
|
||||
def test_combination1(started_cluster):
|
||||
if NODES["node"].is_built_with_thread_sanitizer():
|
||||
pytest.skip("Hedged requests don't work under Thread Sanitizer")
|
||||
|
||||
update_configs(
|
||||
node_1_sleep_in_send_tables_status=1000,
|
||||
node_2_sleep_in_send_tables_status=1000,
|
||||
@ -199,6 +208,9 @@ def test_combination1(started_cluster):
|
||||
|
||||
|
||||
def test_combination2(started_cluster):
|
||||
if NODES["node"].is_built_with_thread_sanitizer():
|
||||
pytest.skip("Hedged requests don't work under Thread Sanitizer")
|
||||
|
||||
update_configs(
|
||||
node_1_sleep_in_send_data=sleep_time,
|
||||
node_2_sleep_in_send_tables_status=1000,
|
||||
@ -210,6 +222,9 @@ def test_combination2(started_cluster):
|
||||
|
||||
|
||||
def test_query_with_no_data_to_sample(started_cluster):
|
||||
if NODES["node"].is_built_with_thread_sanitizer():
|
||||
pytest.skip("Hedged requests don't work under Thread Sanitizer")
|
||||
|
||||
update_configs(
|
||||
node_1_sleep_in_send_data=sleep_time, node_2_sleep_in_send_data=sleep_time
|
||||
)
|
||||
|
@ -58,6 +58,9 @@ def test(started_cluster):
|
||||
config.format(sleep_in_send_data_ms=1000000),
|
||||
)
|
||||
|
||||
if NODES["node1"].is_built_with_thread_sanitizer():
|
||||
pytest.skip("Hedged requests don't work under Thread Sanitizer")
|
||||
|
||||
attempts = 0
|
||||
while attempts < 1000:
|
||||
setting = NODES["node2"].http_query(
|
||||
|
@ -1,45 +0,0 @@
|
||||
<test>
|
||||
<substitutions>
|
||||
<substitution>
|
||||
<name>table_size</name>
|
||||
<values>
|
||||
<value>100000000</value>
|
||||
</values>
|
||||
</substitution>
|
||||
</substitutions>
|
||||
|
||||
<settings>
|
||||
<join_algorithm>full_sorting_merge</join_algorithm>
|
||||
</settings>
|
||||
|
||||
<create_query>
|
||||
CREATE TABLE t1 (x UInt64, y UInt64) ENGINE = MergeTree ORDER BY y
|
||||
AS SELECT
|
||||
sipHash64(number, 't1_x') % {table_size} AS x,
|
||||
sipHash64(number, 't1_y') % {table_size} AS y
|
||||
FROM numbers({table_size})
|
||||
</create_query>
|
||||
|
||||
<create_query>
|
||||
CREATE TABLE t2 (x UInt64, y UInt64) ENGINE = MergeTree ORDER BY y
|
||||
AS SELECT
|
||||
sipHash64(number, 't2_x') % {table_size} AS x,
|
||||
sipHash64(number, 't2_y') % {table_size} AS y
|
||||
FROM numbers({table_size})
|
||||
</create_query>
|
||||
|
||||
<query>SELECT * FROM t1 JOIN t2 ON t1.x = t2.x WHERE less(t1.y, 10000)</query>
|
||||
<query>SELECT * FROM t2 JOIN t1 ON t1.x = t2.x WHERE less(t1.y, 10000)</query>
|
||||
|
||||
<query>SELECT * FROM t1 JOIN t2 ON t1.x = t2.x WHERE greater(t1.y, {table_size} - 10000)</query>
|
||||
<query>SELECT * FROM t2 JOIN t1 ON t1.x = t2.x WHERE greater(t1.y, {table_size} - 10000)</query>
|
||||
|
||||
<query>SELECT * FROM t1 JOIN t2 ON t1.x = t2.x WHERE t1.y % 100 = 0</query>
|
||||
<query>SELECT * FROM t2 JOIN t1 ON t1.x = t2.x WHERE t1.y % 100 = 0</query>
|
||||
|
||||
<query>SELECT * FROM t1 JOIN t2 ON t1.x = t2.x WHERE t1.y % 1000 = 0</query>
|
||||
<query>SELECT * FROM t2 JOIN t1 ON t1.x = t2.x WHERE t1.y % 1000 = 0</query>
|
||||
|
||||
<drop_query>DROP TABLE IF EXISTS t1</drop_query>
|
||||
<drop_query>DROP TABLE IF EXISTS t2</drop_query>
|
||||
</test>
|
@ -12,6 +12,6 @@ ${CLICKHOUSE_CLIENT} --query="select * from orc_load FORMAT ORC" > "${CLICKHOUSE
|
||||
${CLICKHOUSE_CLIENT} --query="truncate table orc_load"
|
||||
|
||||
cat "${CLICKHOUSE_TMP}"/test.orc | ${CLICKHOUSE_CLIENT} -q "insert into orc_load format ORC"
|
||||
timeout 3 ${CLICKHOUSE_CLIENT} -q "insert into orc_load format ORC" < "${CLICKHOUSE_TMP}"/test.orc
|
||||
${CLICKHOUSE_CLIENT} -q "insert into orc_load format ORC" < "${CLICKHOUSE_TMP}"/test.orc
|
||||
${CLICKHOUSE_CLIENT} --query="select * from orc_load"
|
||||
${CLICKHOUSE_CLIENT} --query="drop table orc_load"
|
||||
|
@ -12,7 +12,10 @@ $CLICKHOUSE_CLIENT --allow_deprecated_database_ordinary=1 --query "CREATE DATABA
|
||||
|
||||
function thread1()
|
||||
{
|
||||
while true; do $CLICKHOUSE_CLIENT -n --query "CREATE TABLE test_01320.r (x UInt64) ENGINE = ReplicatedMergeTree('/test/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/table', 'r') ORDER BY x; DROP TABLE test_01320.r;"; done
|
||||
while true; do
|
||||
$CLICKHOUSE_CLIENT -n --query "CREATE TABLE test_01320.r (x UInt64) ENGINE = ReplicatedMergeTree('/test/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/table', 'r') ORDER BY x;
|
||||
DROP TABLE test_01320.r;" 2>&1 | grep -F "Code:" | grep -v "UNKNOWN_DATABASE"
|
||||
done
|
||||
}
|
||||
|
||||
function thread2()
|
||||
|
@ -5,4 +5,4 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||
# shellcheck source=../shell_config.sh
|
||||
. "$CURDIR"/../shell_config.sh
|
||||
|
||||
$CLICKHOUSE_CLIENT --connections_with_failover_max_tries 10 --query "SELECT hostName() FROM remote('128.1.2.3', default.tmp)" 2>&1 | grep -o -P 'Timeout exceeded while connecting to socket|Network is unreachable' | wc -l
|
||||
$CLICKHOUSE_CLIENT --connections_with_failover_max_tries 10 --query "SELECT hostName() FROM remote('128.1.2.3', default.tmp)" 2>&1 | grep -o -P 'Timeout exceeded while connecting to socket|Network is unreachable|Timeout: connect timed out' | wc -l
|
||||
|
@ -1 +1,2 @@
|
||||
-- Tags: no-tsan
|
||||
select number from remote('127.0.0.{3|2}', numbers(2)) where number global in (select number from numbers(1)) settings async_socket_for_remote=1, use_hedged_requests = 1, sleep_in_send_data_ms=10, receive_data_timeout_ms=1;
|
||||
|
@ -1,10 +1,10 @@
|
||||
255.255.255.255
|
||||
HedgedConnectionsFactory: Connection failed at try №1
|
||||
ConnectionPoolWithFailover: Connection failed at try №1
|
||||
executeQuery: Code: 519.: All attempts to get table structure failed.
|
||||
127.2,255.255.255.255
|
||||
0
|
||||
HedgedConnectionsFactory: Connection failed at try №1
|
||||
ConnectionPoolWithFailover: Connection failed at try №1
|
||||
255.255.255.255,127.2
|
||||
0
|
||||
HedgedConnectionsFactory: Connection failed at try №1
|
||||
HedgedConnectionsFactory: Connection failed at try №1
|
||||
ConnectionPoolWithFailover: Connection failed at try №1
|
||||
ConnectionPoolWithFailover: Connection failed at try №1
|
||||
|
@ -25,7 +25,7 @@ function execute_query()
|
||||
# clickhouse-client 2> >(wc -l)
|
||||
#
|
||||
# May dump output of "wc -l" after some other programs.
|
||||
$CLICKHOUSE_CLIENT "${opts[@]}" --query "select * from remote('$hosts', system.one)" 2>"$stderr"
|
||||
$CLICKHOUSE_CLIENT "${opts[@]}" --query "select * from remote('$hosts', system.one) settings use_hedged_requests=0" 2>"$stderr"
|
||||
process_log_safe "$stderr"
|
||||
}
|
||||
execute_query 255.255.255.255
|
||||
|
@ -565,6 +565,7 @@ CREATE TABLE system.parts_columns
|
||||
`column_data_compressed_bytes` UInt64,
|
||||
`column_data_uncompressed_bytes` UInt64,
|
||||
`column_marks_bytes` UInt64,
|
||||
`column_modification_time` Nullable(DateTime),
|
||||
`serialization_kind` String,
|
||||
`subcolumns.names` Array(String),
|
||||
`subcolumns.types` Array(String),
|
||||
@ -750,6 +751,7 @@ CREATE TABLE system.projection_parts_columns
|
||||
`column_data_compressed_bytes` UInt64,
|
||||
`column_data_uncompressed_bytes` UInt64,
|
||||
`column_marks_bytes` UInt64,
|
||||
`column_modification_time` Nullable(DateTime),
|
||||
`bytes` UInt64,
|
||||
`marks_size` UInt64,
|
||||
`part_name` String
|
||||
|
@ -17,6 +17,8 @@ opts=(
|
||||
--allow_experimental_parallel_reading_from_replicas 1
|
||||
--parallel_replicas_for_non_replicated_merge_tree 1
|
||||
--max_parallel_replicas 3
|
||||
--use_hedged_requests 0
|
||||
--cluster_for_parallel_replicas parallel_replicas
|
||||
|
||||
--iterations 1
|
||||
)
|
||||
|
@ -0,0 +1,2 @@
|
||||
0
|
||||
1
|
@ -0,0 +1,11 @@
|
||||
DROP TABLE IF EXISTS lc_nullable_string;
|
||||
|
||||
CREATE TABLE lc_nullable_string(`c1` LowCardinality(Nullable(String)) DEFAULT CAST(NULL, 'LowCardinality(Nullable(String))'))
|
||||
ENGINE = Memory;
|
||||
|
||||
INSERT INTO lc_nullable_string (c1) FORMAT Values (0);
|
||||
INSERT INTO lc_nullable_string (c1) Values (1);
|
||||
|
||||
SELECT * FROM lc_nullable_string ORDER BY c1;
|
||||
|
||||
DROP TABLE lc_nullable_string;
|
@ -1,2 +1,2 @@
|
||||
134217728 10000000 8388608 1 0 0 0 /var/lib/clickhouse/caches/s3_cache/
|
||||
134217728 10000000 104857600 0 0 0 0 /var/lib/clickhouse/caches/s3_cache_2/
|
||||
134217728 10000000 33554432 4194304 1 0 0 0 /var/lib/clickhouse/caches/s3_cache/ 100 2 0
|
||||
134217728 10000000 104857600 4194304 0 0 0 0 /var/lib/clickhouse/caches/s3_cache_2/ 100 2 0
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user