mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-24 16:42:05 +00:00
Avoid too large stack frames
This commit is contained in:
parent
42acb627fb
commit
475af33319
@ -20,6 +20,9 @@ endif ()
|
||||
|
||||
option (WEVERYTHING "Enables -Weverything option with some exceptions. This is intended for exploration of new compiler warnings that may be found to be useful. Only makes sense for clang." ON)
|
||||
|
||||
# Control maximum size of stack frames. It can be important if the code is run in fibers with small stack size.
|
||||
add_warning(frame-larger-than=32768)
|
||||
|
||||
if (COMPILER_CLANG)
|
||||
add_warning(pedantic)
|
||||
no_warning(vla-extension)
|
||||
|
@ -149,7 +149,7 @@ static void getNotEnoughMemoryMessage(std::string & msg)
|
||||
#if defined(__linux__)
|
||||
try
|
||||
{
|
||||
static constexpr size_t buf_size = 4096;
|
||||
static constexpr size_t buf_size = 1024;
|
||||
char buf[buf_size];
|
||||
|
||||
UInt64 max_map_count = 0;
|
||||
|
@ -318,7 +318,7 @@ protected:
|
||||
|
||||
/** max needle length is 255, max distinct ngrams for case-sensitive is (255 - 1), case-insensitive is 4 * (255 - 1)
|
||||
* storage of 64K ngrams (n = 2, 128 KB) should be large enough for both cases */
|
||||
VolnitskyTraits::Offset hash[VolnitskyTraits::hash_size]; /// Hash table.
|
||||
std::unique_ptr<VolnitskyTraits::Offset[]> hash; /// Hash table.
|
||||
|
||||
const bool fallback; /// Do we need to use the fallback algorithm.
|
||||
|
||||
@ -340,7 +340,7 @@ public:
|
||||
if (fallback)
|
||||
return;
|
||||
|
||||
memset(hash, 0, sizeof(hash));
|
||||
hash = std::unique_ptr<VolnitskyTraits::Offset[]>(new VolnitskyTraits::Offset[VolnitskyTraits::hash_size]{});
|
||||
|
||||
auto callback = [this](const VolnitskyTraits::Ngram ngram, const int offset) { return this->putNGramBase(ngram, offset); };
|
||||
/// ssize_t is used here because unsigned can't be used with condition like `i >= 0`, unsigned always >= 0
|
||||
@ -419,7 +419,7 @@ private:
|
||||
VolnitskyTraits::Offset off;
|
||||
};
|
||||
|
||||
OffsetId hash[VolnitskyTraits::hash_size];
|
||||
std::unique_ptr<OffsetId[]> hash;
|
||||
|
||||
/// step for each bunch of strings
|
||||
size_t step;
|
||||
@ -434,6 +434,7 @@ public:
|
||||
MultiVolnitskyBase(const std::vector<StringRef> & needles_) : needles{needles_}, step{0}, last{0}
|
||||
{
|
||||
fallback_searchers.reserve(needles.size());
|
||||
hash = std::unique_ptr<OffsetId[]>(new OffsetId[VolnitskyTraits::hash_size]); /// No zero initialization, it will be done later.
|
||||
}
|
||||
|
||||
/**
|
||||
@ -454,7 +455,7 @@ public:
|
||||
if (last == needles.size())
|
||||
return false;
|
||||
|
||||
memset(hash, 0, sizeof(hash));
|
||||
memset(hash.get(), 0, VolnitskyTraits::hash_size * sizeof(OffsetId));
|
||||
fallback_needles.clear();
|
||||
step = std::numeric_limits<size_t>::max();
|
||||
|
||||
|
@ -79,8 +79,8 @@ String getFilesystemName([[maybe_unused]] const String & mount_point)
|
||||
throw DB::Exception("Cannot open /etc/mtab to get name of filesystem", ErrorCodes::SYSTEM_ERROR);
|
||||
mntent fs_info;
|
||||
constexpr size_t buf_size = 4096; /// The same as buffer used for getmntent in glibc. It can happen that it's not enough
|
||||
char buf[buf_size];
|
||||
while (getmntent_r(mounted_filesystems, &fs_info, buf, buf_size) && fs_info.mnt_dir != mount_point)
|
||||
std::vector<char> buf(buf_size);
|
||||
while (getmntent_r(mounted_filesystems, &fs_info, buf.data(), buf_size) && fs_info.mnt_dir != mount_point)
|
||||
;
|
||||
endmntent(mounted_filesystems);
|
||||
if (fs_info.mnt_dir != mount_point)
|
||||
|
@ -234,8 +234,7 @@ void DatabaseOrdinary::alterTable(
|
||||
String statement;
|
||||
|
||||
{
|
||||
char in_buf[METADATA_FILE_BUFFER_SIZE];
|
||||
ReadBufferFromFile in(table_metadata_path, METADATA_FILE_BUFFER_SIZE, -1, in_buf);
|
||||
ReadBufferFromFile in(table_metadata_path, METADATA_FILE_BUFFER_SIZE);
|
||||
readStringUntilEOF(statement, in);
|
||||
}
|
||||
|
||||
|
@ -48,11 +48,11 @@ struct NgramDistanceImpl
|
||||
/// Max codepoints to store at once. 16 is for batching usage and PODArray has this padding.
|
||||
static constexpr size_t simultaneously_codepoints_num = default_padding + N - 1;
|
||||
|
||||
/** This fits mostly in L2 cache all the time.
|
||||
/** map_size of this fits mostly in L2 cache all the time.
|
||||
* Actually use UInt16 as addings and subtractions do not UB overflow. But think of it as a signed
|
||||
* integer array.
|
||||
*/
|
||||
using NgramStats = UInt16[map_size];
|
||||
using NgramCount = UInt16;
|
||||
|
||||
static ALWAYS_INLINE UInt16 calculateASCIIHash(const CodePoint * code_points)
|
||||
{
|
||||
@ -169,8 +169,8 @@ struct NgramDistanceImpl
|
||||
static ALWAYS_INLINE inline size_t calculateNeedleStats(
|
||||
const char * data,
|
||||
const size_t size,
|
||||
NgramStats & ngram_stats,
|
||||
[[maybe_unused]] UInt16 * ngram_storage,
|
||||
NgramCount * ngram_stats,
|
||||
[[maybe_unused]] NgramCount * ngram_storage,
|
||||
size_t (*read_code_points)(CodePoint *, const char *&, const char *),
|
||||
UInt16 (*hash_functor)(const CodePoint *))
|
||||
{
|
||||
@ -202,7 +202,7 @@ struct NgramDistanceImpl
|
||||
static ALWAYS_INLINE inline UInt64 calculateHaystackStatsAndMetric(
|
||||
const char * data,
|
||||
const size_t size,
|
||||
NgramStats & ngram_stats,
|
||||
NgramCount * ngram_stats,
|
||||
size_t & distance,
|
||||
[[maybe_unused]] UInt16 * ngram_storage,
|
||||
size_t (*read_code_points)(CodePoint *, const char *&, const char *),
|
||||
@ -256,7 +256,7 @@ struct NgramDistanceImpl
|
||||
|
||||
static void constantConstant(std::string data, std::string needle, Float32 & res)
|
||||
{
|
||||
NgramStats common_stats = {};
|
||||
std::unique_ptr<NgramCount[]> common_stats{new NgramCount[map_size]{}};
|
||||
|
||||
/// We use unsafe versions of getting ngrams, so I decided to use padded strings.
|
||||
const size_t needle_size = needle.size();
|
||||
@ -264,11 +264,11 @@ struct NgramDistanceImpl
|
||||
needle.resize(needle_size + default_padding);
|
||||
data.resize(data_size + default_padding);
|
||||
|
||||
size_t second_size = dispatchSearcher(calculateNeedleStats<false>, needle.data(), needle_size, common_stats, nullptr);
|
||||
size_t second_size = dispatchSearcher(calculateNeedleStats<false>, needle.data(), needle_size, common_stats.get(), nullptr);
|
||||
size_t distance = second_size;
|
||||
if (data_size <= max_string_size)
|
||||
{
|
||||
size_t first_size = dispatchSearcher(calculateHaystackStatsAndMetric<false>, data.data(), data_size, common_stats, distance, nullptr);
|
||||
size_t first_size = dispatchSearcher(calculateHaystackStatsAndMetric<false>, data.data(), data_size, common_stats.get(), distance, nullptr);
|
||||
/// For !symmetric version we should not use first_size.
|
||||
if constexpr (symmetric)
|
||||
res = distance * 1.f / std::max(first_size + second_size, size_t(1));
|
||||
@ -295,7 +295,7 @@ struct NgramDistanceImpl
|
||||
size_t prev_haystack_offset = 0;
|
||||
size_t prev_needle_offset = 0;
|
||||
|
||||
NgramStats common_stats = {};
|
||||
std::unique_ptr<NgramCount[]> common_stats{new NgramCount[map_size]{}};
|
||||
|
||||
/// The main motivation is to not allocate more on stack because we have already allocated a lot (128Kb).
|
||||
/// And we can reuse these storages in one thread because we care only about what was written to first places.
|
||||
@ -316,7 +316,7 @@ struct NgramDistanceImpl
|
||||
calculateNeedleStats<true>,
|
||||
needle,
|
||||
needle_size,
|
||||
common_stats,
|
||||
common_stats.get(),
|
||||
needle_ngram_storage.get());
|
||||
|
||||
size_t distance = needle_stats_size;
|
||||
@ -326,7 +326,7 @@ struct NgramDistanceImpl
|
||||
calculateHaystackStatsAndMetric<true>,
|
||||
haystack,
|
||||
haystack_size,
|
||||
common_stats,
|
||||
common_stats.get(),
|
||||
distance,
|
||||
haystack_ngram_storage.get());
|
||||
|
||||
@ -378,7 +378,7 @@ struct NgramDistanceImpl
|
||||
const size_t needle_offsets_size = needle_offsets.size();
|
||||
size_t prev_offset = 0;
|
||||
|
||||
NgramStats common_stats = {};
|
||||
std::unique_ptr<NgramCount[]> common_stats{new NgramCount[map_size]{}};
|
||||
|
||||
std::unique_ptr<UInt16[]> needle_ngram_storage(new UInt16[max_string_size]);
|
||||
std::unique_ptr<UInt16[]> haystack_ngram_storage(new UInt16[max_string_size]);
|
||||
@ -394,7 +394,7 @@ struct NgramDistanceImpl
|
||||
calculateNeedleStats<true>,
|
||||
needle,
|
||||
needle_size,
|
||||
common_stats,
|
||||
common_stats.get(),
|
||||
needle_ngram_storage.get());
|
||||
|
||||
size_t distance = needle_stats_size;
|
||||
@ -403,7 +403,7 @@ struct NgramDistanceImpl
|
||||
calculateHaystackStatsAndMetric<true>,
|
||||
haystack.data(),
|
||||
haystack_size,
|
||||
common_stats,
|
||||
common_stats.get(),
|
||||
distance,
|
||||
haystack_ngram_storage.get());
|
||||
|
||||
@ -430,17 +430,16 @@ struct NgramDistanceImpl
|
||||
PaddedPODArray<Float32> & res)
|
||||
{
|
||||
/// zeroing our map
|
||||
NgramStats common_stats = {};
|
||||
std::unique_ptr<NgramCount[]> common_stats{new NgramCount[map_size]{}};
|
||||
|
||||
/// The main motivation is to not allocate more on stack because we have already allocated a lot (128Kb).
|
||||
/// And we can reuse these storages in one thread because we care only about what was written to first places.
|
||||
std::unique_ptr<UInt16[]> ngram_storage(new UInt16[max_string_size]);
|
||||
/// We can reuse these storages in one thread because we care only about what was written to first places.
|
||||
std::unique_ptr<UInt16[]> ngram_storage(new NgramCount[max_string_size]);
|
||||
|
||||
/// We use unsafe versions of getting ngrams, so I decided to use padded_data even in needle case.
|
||||
const size_t needle_size = needle.size();
|
||||
needle.resize(needle_size + default_padding);
|
||||
|
||||
const size_t needle_stats_size = dispatchSearcher(calculateNeedleStats<false>, needle.data(), needle_size, common_stats, nullptr);
|
||||
const size_t needle_stats_size = dispatchSearcher(calculateNeedleStats<false>, needle.data(), needle_size, common_stats.get(), nullptr);
|
||||
|
||||
size_t distance = needle_stats_size;
|
||||
size_t prev_offset = 0;
|
||||
@ -453,7 +452,7 @@ struct NgramDistanceImpl
|
||||
size_t haystack_stats_size = dispatchSearcher(
|
||||
calculateHaystackStatsAndMetric<true>,
|
||||
reinterpret_cast<const char *>(haystack),
|
||||
haystack_size, common_stats,
|
||||
haystack_size, common_stats.get(),
|
||||
distance,
|
||||
ngram_storage.get());
|
||||
/// For !symmetric version we should not use haystack_stats_size.
|
||||
|
@ -42,12 +42,12 @@ void AIOContextPool::doMonitor()
|
||||
void AIOContextPool::waitForCompletion()
|
||||
{
|
||||
/// array to hold completion events
|
||||
io_event events[max_concurrent_events];
|
||||
std::vector<io_event> events(max_concurrent_events);
|
||||
|
||||
try
|
||||
{
|
||||
const auto num_events = getCompletionEvents(events, max_concurrent_events);
|
||||
fulfillPromises(events, num_events);
|
||||
const auto num_events = getCompletionEvents(events.data(), max_concurrent_events);
|
||||
fulfillPromises(events.data(), num_events);
|
||||
notifyProducers(num_events);
|
||||
}
|
||||
catch (...)
|
||||
|
Loading…
Reference in New Issue
Block a user