mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-12-11 17:02:25 +00:00
Fixed stack overflow on Field destruction
This commit is contained in:
parent
c7ae150ffb
commit
af603c2cc6
@ -8,6 +8,7 @@
|
|||||||
|
|
||||||
#include <Common/MemoryTracker.h>
|
#include <Common/MemoryTracker.h>
|
||||||
#include <Common/CurrentThread.h>
|
#include <Common/CurrentThread.h>
|
||||||
|
#include <Common/Arena.h>
|
||||||
|
|
||||||
#include <Interpreters/Context.h>
|
#include <Interpreters/Context.h>
|
||||||
|
|
||||||
|
@ -34,7 +34,7 @@ try
|
|||||||
DB::Memory<> memory;
|
DB::Memory<> memory;
|
||||||
memory.resize(output_buffer_size + codec->getAdditionalSizeAtTheEndOfBuffer());
|
memory.resize(output_buffer_size + codec->getAdditionalSizeAtTheEndOfBuffer());
|
||||||
|
|
||||||
codec->doDecompressData(reinterpret_cast<const char *>(data), size, memory.data(), output_buffer_size);
|
codec->doDecompressData(reinterpret_cast<const char *>(data), static_cast<UInt32>(size), memory.data(), static_cast<UInt32>(output_buffer_size));
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -34,7 +34,7 @@ try
|
|||||||
DB::Memory<> memory;
|
DB::Memory<> memory;
|
||||||
memory.resize(output_buffer_size + codec->getAdditionalSizeAtTheEndOfBuffer());
|
memory.resize(output_buffer_size + codec->getAdditionalSizeAtTheEndOfBuffer());
|
||||||
|
|
||||||
codec->doDecompressData(reinterpret_cast<const char *>(data), size, memory.data(), output_buffer_size);
|
codec->doDecompressData(reinterpret_cast<const char *>(data), static_cast<UInt32>(size), memory.data(), static_cast<UInt32>(output_buffer_size));
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -292,10 +292,10 @@ try
|
|||||||
|
|
||||||
DB::Memory<> memory;
|
DB::Memory<> memory;
|
||||||
memory.resize(input.size() + codec_128->getAdditionalSizeAtTheEndOfBuffer());
|
memory.resize(input.size() + codec_128->getAdditionalSizeAtTheEndOfBuffer());
|
||||||
codec_128->doDecompressData(input.data(), input.size(), memory.data(), input.size() - 31);
|
codec_128->doDecompressData(input.data(), static_cast<UInt32>(input.size()), memory.data(), static_cast<UInt32>(input.size() - 31));
|
||||||
|
|
||||||
memory.resize(input.size() + codec_128->getAdditionalSizeAtTheEndOfBuffer());
|
memory.resize(input.size() + codec_128->getAdditionalSizeAtTheEndOfBuffer());
|
||||||
codec_256->doDecompressData(input.data(), input.size(), memory.data(), input.size() - 31);
|
codec_256->doDecompressData(input.data(), static_cast<UInt32>(input.size()), memory.data(), static_cast<UInt32>(input.size() - 31));
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
catch (...)
|
catch (...)
|
||||||
|
@ -24,7 +24,7 @@ try
|
|||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
const auto * p = reinterpret_cast<const AuxiliaryRandomData *>(data);
|
const auto * p = reinterpret_cast<const AuxiliaryRandomData *>(data);
|
||||||
auto codec = DB::getCompressionCodecLZ4(p->level);
|
auto codec = DB::getCompressionCodecLZ4(static_cast<int>(p->level));
|
||||||
|
|
||||||
size_t output_buffer_size = p->decompressed_size % 65536;
|
size_t output_buffer_size = p->decompressed_size % 65536;
|
||||||
size -= sizeof(AuxiliaryRandomData);
|
size -= sizeof(AuxiliaryRandomData);
|
||||||
@ -37,7 +37,7 @@ try
|
|||||||
DB::Memory<> memory;
|
DB::Memory<> memory;
|
||||||
memory.resize(output_buffer_size + LZ4::ADDITIONAL_BYTES_AT_END_OF_BUFFER);
|
memory.resize(output_buffer_size + LZ4::ADDITIONAL_BYTES_AT_END_OF_BUFFER);
|
||||||
|
|
||||||
codec->doDecompressData(reinterpret_cast<const char *>(data), size, memory.data(), output_buffer_size);
|
codec->doDecompressData(reinterpret_cast<const char *>(data), static_cast<UInt32>(size), memory.data(), static_cast<UInt32>(output_buffer_size));
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -28,6 +28,7 @@ namespace ErrorCodes
|
|||||||
extern const int NOT_IMPLEMENTED;
|
extern const int NOT_IMPLEMENTED;
|
||||||
extern const int LOGICAL_ERROR;
|
extern const int LOGICAL_ERROR;
|
||||||
extern const int ILLEGAL_TYPE_OF_ARGUMENT;
|
extern const int ILLEGAL_TYPE_OF_ARGUMENT;
|
||||||
|
extern const int TOO_DEEP_RECURSION;
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr Null NEGATIVE_INFINITY{Null::Value::NegativeInfinity};
|
constexpr Null NEGATIVE_INFINITY{Null::Value::NegativeInfinity};
|
||||||
@ -291,6 +292,11 @@ decltype(auto) castToNearestFieldType(T && x)
|
|||||||
*/
|
*/
|
||||||
#define DBMS_MIN_FIELD_SIZE 32
|
#define DBMS_MIN_FIELD_SIZE 32
|
||||||
|
|
||||||
|
#if defined(SANITIZER) || !defined(NDEBUG)
|
||||||
|
#define DBMS_MAX_NESTED_FIELD_DEPTH 64
|
||||||
|
#else
|
||||||
|
#define DBMS_MAX_NESTED_FIELD_DEPTH 256
|
||||||
|
#endif
|
||||||
|
|
||||||
/** Discriminated union of several types.
|
/** Discriminated union of several types.
|
||||||
* Made for replacement of `boost::variant`
|
* Made for replacement of `boost::variant`
|
||||||
@ -671,6 +677,27 @@ private:
|
|||||||
|
|
||||||
Types::Which which;
|
Types::Which which;
|
||||||
|
|
||||||
|
/// Field may contain a Field inside in case when Field stores Array, Tuple, Map or Object.
|
||||||
|
/// As the result stack overflow on destruction is possible
|
||||||
|
/// and to avoid it we need to count the depth and have a threshold.
|
||||||
|
size_t nested_field_depth = 0;
|
||||||
|
|
||||||
|
/// Check whether T is already a Field with composite underlying type.
|
||||||
|
template <typename StorageType, typename Original>
|
||||||
|
size_t calculateAndCheckFieldDepth(Original && x)
|
||||||
|
{
|
||||||
|
size_t result = 0;
|
||||||
|
|
||||||
|
if constexpr (std::is_same_v<StorageType, Array> || std::is_same_v<StorageType, Tuple> || std::is_same_v<StorageType, Map>)
|
||||||
|
std::for_each(x.begin(), x.end(), [this, &x](auto & elem){ nested_field_depth = std::max(nested_field_depth, elem.nested_field_depth); });
|
||||||
|
else if constexpr (std::is_same_v<StorageType, Object>)
|
||||||
|
std::for_each(x.begin(), x.end(), [this, &x](auto & elem){ nested_field_depth = std::max(nested_field_depth, elem.second.nested_field_depth); });
|
||||||
|
|
||||||
|
if (result >= DBMS_MAX_NESTED_FIELD_DEPTH)
|
||||||
|
throw Exception(ErrorCodes::TOO_DEEP_RECURSION, "Too deep Field");
|
||||||
|
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
/// Assuming there was no allocated state or it was deallocated (see destroy).
|
/// Assuming there was no allocated state or it was deallocated (see destroy).
|
||||||
template <typename T>
|
template <typename T>
|
||||||
@ -686,6 +713,8 @@ private:
|
|||||||
using StorageType = NearestFieldType<UnqualifiedType>;
|
using StorageType = NearestFieldType<UnqualifiedType>;
|
||||||
new (&storage) StorageType(std::forward<T>(x));
|
new (&storage) StorageType(std::forward<T>(x));
|
||||||
which = TypeToEnum<UnqualifiedType>::value;
|
which = TypeToEnum<UnqualifiedType>::value;
|
||||||
|
/// Incrementing the depth since we create a new Field.
|
||||||
|
nested_field_depth = calculateAndCheckFieldDepth<StorageType>(x) + 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Assuming same types.
|
/// Assuming same types.
|
||||||
@ -696,6 +725,8 @@ private:
|
|||||||
assert(which == TypeToEnum<JustT>::value);
|
assert(which == TypeToEnum<JustT>::value);
|
||||||
JustT * MAY_ALIAS ptr = reinterpret_cast<JustT *>(&storage);
|
JustT * MAY_ALIAS ptr = reinterpret_cast<JustT *>(&storage);
|
||||||
*ptr = std::forward<T>(x);
|
*ptr = std::forward<T>(x);
|
||||||
|
/// Do not increment the depth, because it is an assignment.
|
||||||
|
nested_field_depth = calculateAndCheckFieldDepth<JustT>(x);
|
||||||
}
|
}
|
||||||
|
|
||||||
template <typename CharT>
|
template <typename CharT>
|
||||||
@ -781,7 +812,7 @@ private:
|
|||||||
}
|
}
|
||||||
|
|
||||||
template <typename T>
|
template <typename T>
|
||||||
void destroy()
|
ALWAYS_INLINE void destroy()
|
||||||
{
|
{
|
||||||
T * MAY_ALIAS ptr = reinterpret_cast<T*>(&storage);
|
T * MAY_ALIAS ptr = reinterpret_cast<T*>(&storage);
|
||||||
ptr->~T();
|
ptr->~T();
|
||||||
|
@ -62,7 +62,7 @@ DataTypePtr DataTypeFactory::getImpl(const String & full_name) const
|
|||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
ast = parseQuery(parser, full_name.data(), full_name.data() + full_name.size(), "data type", false, data_type_max_parse_depth);
|
ast = parseQuery(parser, full_name.data(), full_name.data() + full_name.size(), "data type", DBMS_DEFAULT_MAX_QUERY_SIZE, data_type_max_parse_depth);
|
||||||
}
|
}
|
||||||
|
|
||||||
return getImpl<nullptr_on_error>(ast);
|
return getImpl<nullptr_on_error>(ast);
|
||||||
|
@ -1449,8 +1449,10 @@ struct Transformer
|
|||||||
if constexpr (std::is_same_v<Additions, DateTimeAccurateConvertStrategyAdditions>
|
if constexpr (std::is_same_v<Additions, DateTimeAccurateConvertStrategyAdditions>
|
||||||
|| std::is_same_v<Additions, DateTimeAccurateOrNullConvertStrategyAdditions>)
|
|| std::is_same_v<Additions, DateTimeAccurateOrNullConvertStrategyAdditions>)
|
||||||
{
|
{
|
||||||
|
# pragma clang diagnostic push
|
||||||
|
# pragma clang diagnostic ignored "-Wimplicit-const-int-float-conversion"
|
||||||
bool is_valid_input = vec_from[i] >= 0 && vec_from[i] <= 0xFFFFFFFFL;
|
bool is_valid_input = vec_from[i] >= 0 && vec_from[i] <= 0xFFFFFFFFL;
|
||||||
|
# pragma clang diagnostic pop
|
||||||
if (!is_valid_input)
|
if (!is_valid_input)
|
||||||
{
|
{
|
||||||
if constexpr (std::is_same_v<Additions, DateTimeAccurateOrNullConvertStrategyAdditions>)
|
if constexpr (std::is_same_v<Additions, DateTimeAccurateOrNullConvertStrategyAdditions>)
|
||||||
|
Loading…
Reference in New Issue
Block a user