BLI: reduce wasted memory in linear allocator

The main change is that large allocations are done separately now.
Also, buffers that small allocations are packed into, have a maximum
size now. Using larger buffers does not really provider performance
benefits, but increases wasted memory.
This commit is contained in:
2021-03-07 14:15:20 +01:00
parent 84da76a96c
commit 456d3cc85e
2 changed files with 49 additions and 13 deletions

View File

@@ -38,18 +38,20 @@ template<typename Allocator = GuardedAllocator> class LinearAllocator : NonCopya
uintptr_t current_begin_; uintptr_t current_begin_;
uintptr_t current_end_; uintptr_t current_end_;
int64_t next_min_alloc_size_;
#ifdef DEBUG #ifdef DEBUG
int64_t debug_allocated_amount_ = 0; int64_t debug_allocated_amount_ = 0;
#endif #endif
/* Buffers larger than that are not packed together with smaller allocations to avoid wasting
* memory. */
constexpr static inline int64_t large_buffer_threshold = 4096;
public: public:
LinearAllocator() LinearAllocator()
{ {
current_begin_ = 0; current_begin_ = 0;
current_end_ = 0; current_end_ = 0;
next_min_alloc_size_ = 64;
} }
~LinearAllocator() ~LinearAllocator()
@@ -71,23 +73,23 @@ template<typename Allocator = GuardedAllocator> class LinearAllocator : NonCopya
BLI_assert(alignment >= 1); BLI_assert(alignment >= 1);
BLI_assert(is_power_of_2_i(alignment)); BLI_assert(is_power_of_2_i(alignment));
#ifdef DEBUG
debug_allocated_amount_ += size;
#endif
const uintptr_t alignment_mask = alignment - 1; const uintptr_t alignment_mask = alignment - 1;
const uintptr_t potential_allocation_begin = (current_begin_ + alignment_mask) & const uintptr_t potential_allocation_begin = (current_begin_ + alignment_mask) &
~alignment_mask; ~alignment_mask;
const uintptr_t potential_allocation_end = potential_allocation_begin + size; const uintptr_t potential_allocation_end = potential_allocation_begin + size;
if (potential_allocation_end <= current_end_) { if (potential_allocation_end <= current_end_) {
#ifdef DEBUG
debug_allocated_amount_ += size;
#endif
current_begin_ = potential_allocation_end; current_begin_ = potential_allocation_end;
return reinterpret_cast<void *>(potential_allocation_begin); return reinterpret_cast<void *>(potential_allocation_begin);
} }
else { if (size <= large_buffer_threshold) {
this->allocate_new_buffer(size + alignment); this->allocate_new_buffer(size + alignment, alignment);
return this->allocate(size, alignment); return this->allocate(size, alignment);
} }
return this->allocator_large_buffer(size, alignment);
}; };
/** /**
@@ -195,7 +197,7 @@ template<typename Allocator = GuardedAllocator> class LinearAllocator : NonCopya
} }
private: private:
void allocate_new_buffer(int64_t min_allocation_size) void allocate_new_buffer(int64_t min_allocation_size, int64_t min_alignment)
{ {
for (int64_t i : unused_borrowed_buffers_.index_range()) { for (int64_t i : unused_borrowed_buffers_.index_range()) {
Span<char> buffer = unused_borrowed_buffers_[i]; Span<char> buffer = unused_borrowed_buffers_[i];
@@ -207,15 +209,28 @@ template<typename Allocator = GuardedAllocator> class LinearAllocator : NonCopya
} }
} }
const int64_t size_in_bytes = power_of_2_min_u( /* Possibly allocate more bytes than necessary for the current allocation. This way more small
std::max(min_allocation_size, next_min_alloc_size_)); * allocations can be packed together. Large buffers are allocated exactly to avoid wasting too
next_min_alloc_size_ = size_in_bytes * 2; * much memory. */
int64_t size_in_bytes = min_allocation_size;
if (size_in_bytes <= large_buffer_threshold) {
/* Gradually grow buffer size with each allocation, up to a maximum. */
const int64_t grow_size = 1 << std::min<int64_t>(owned_buffers_.size() + 6, 20);
size_in_bytes = std::min(large_buffer_threshold, std::max(size_in_bytes, grow_size));
}
void *buffer = allocator_.allocate(size_in_bytes, 8, AT); void *buffer = allocator_.allocate(size_in_bytes, min_alignment, __func__);
owned_buffers_.append(buffer); owned_buffers_.append(buffer);
current_begin_ = (uintptr_t)buffer; current_begin_ = (uintptr_t)buffer;
current_end_ = current_begin_ + size_in_bytes; current_end_ = current_begin_ + size_in_bytes;
} }
void *allocator_large_buffer(const int64_t size, const int64_t alignment)
{
void *buffer = allocator_.allocate(size, alignment, __func__);
owned_buffers_.append(buffer);
return buffer;
}
}; };
} // namespace blender } // namespace blender

View File

@@ -1,6 +1,7 @@
/* Apache License, Version 2.0 */ /* Apache License, Version 2.0 */
#include "BLI_linear_allocator.hh" #include "BLI_linear_allocator.hh"
#include "BLI_rand.hh"
#include "BLI_strict_flags.h" #include "BLI_strict_flags.h"
#include "testing/testing.h" #include "testing/testing.h"
@@ -115,4 +116,24 @@ TEST(linear_allocator, ConstructArrayCopy)
EXPECT_EQ(span2[2], 3); EXPECT_EQ(span2[2], 3);
} }
TEST(linear_allocator, AllocateLarge)
{
LinearAllocator<> allocator;
void *buffer1 = allocator.allocate(1024 * 1024, 8);
void *buffer2 = allocator.allocate(1024 * 1024, 8);
EXPECT_NE(buffer1, buffer2);
}
TEST(linear_allocator, ManyAllocations)
{
LinearAllocator<> allocator;
RandomNumberGenerator rng;
for (int i = 0; i < 1000; i++) {
int size = rng.get_int32(10000);
int alignment = 1 << (rng.get_int32(7));
void *buffer = allocator.allocate(size, alignment);
EXPECT_NE(buffer, nullptr);
}
}
} // namespace blender::tests } // namespace blender::tests