From 0e10b71cbf20e70819ceba99482da3b9a5daac7c Mon Sep 17 00:00:00 2001 From: LowByteFox Date: Mon, 4 Aug 2025 14:54:26 +0200 Subject: [PATCH] Stdlib: SingleSizeObjectPool implementation (#2360) * implement working single size object pool --------- Co-authored-by: Christoffer Lerno --- lib/std/core/mem_allocator.c3 | 2 +- lib/std/core/mem_mempool.c3 | 234 +++++++++++++++++++++++++ releasenotes.md | 1 + test/unit/stdlib/core/mem_blockpool.c3 | 125 +++++++++++++ 4 files changed, 361 insertions(+), 1 deletion(-) create mode 100644 lib/std/core/mem_mempool.c3 create mode 100644 test/unit/stdlib/core/mem_blockpool.c3 diff --git a/lib/std/core/mem_allocator.c3 b/lib/std/core/mem_allocator.c3 index 699777ee3..f410effec 100644 --- a/lib/std/core/mem_allocator.c3 +++ b/lib/std/core/mem_allocator.c3 @@ -65,7 +65,7 @@ alias MemoryAllocFn = fn char[]?(usz); -fn usz alignment_for_allocation(usz alignment) @inline @private +fn usz alignment_for_allocation(usz alignment) @inline { return alignment < mem::DEFAULT_MEM_ALIGNMENT ? mem::DEFAULT_MEM_ALIGNMENT : alignment; } diff --git a/lib/std/core/mem_mempool.c3 b/lib/std/core/mem_mempool.c3 new file mode 100644 index 000000000..befc1f645 --- /dev/null +++ b/lib/std/core/mem_mempool.c3 @@ -0,0 +1,234 @@ +module std::core::mem::mempool; +import std::core::mem, std::core::mem::allocator, std::math; + +const INITIAL_CAPACITY = 0; + +struct FixedBlockPoolNode +{ + void* buffer; + FixedBlockPoolNode *next; + usz capacity; +} + +struct FixedBlockPoolEntry +{ + void *previous; +} + +<* + Fixed blocks pool pre-allocating blocks backed by an Allocator which are then reserved for the user, + blocks deallocated by the user are later re-used by future blocks allocations + + `grow_capacity` can be changed in order to affect how many blocks will be allocated by next pool allocation, + it has to be greater than 0 + `allocated` number of allocated blocks + `used` number of used blocks by the user +*> +struct FixedBlockPool +{ + Allocator allocator; + FixedBlockPoolNode head; + FixedBlockPoolNode *tail; + void *next_free; + void *freelist; + usz block_size; + usz grow_capacity; + usz allocated; + usz page_size; + usz alignment; + usz used; + bool initialized; +} + +<* + Initialize an block pool + + @param [in] allocator : "The allocator to use" + @param block_size : "The block size to use" + @param capacity : "The amount of blocks to be pre-allocated" + @param alignment : "The alignment of the buffer" + @require !alignment || math::is_power_of_2(alignment) + @require !self.initialized : "The block pool must not be initialized" + @require block_size > 0 : "Block size must be non zero" + @require calculate_actual_capacity(capacity, block_size) * block_size >= block_size + : "Total memory would overflow" +*> +macro FixedBlockPool* FixedBlockPool.init(&self, Allocator allocator, usz block_size, usz capacity = INITIAL_CAPACITY, usz alignment = 0) +{ + self.allocator = allocator; + self.tail = &self.head; + self.head.next = null; + self.block_size = math::max(block_size, FixedBlockPoolEntry.sizeof); + capacity = calculate_actual_capacity(capacity, self.block_size); + self.alignment = allocator::alignment_for_allocation(alignment); + self.page_size = capacity * self.block_size; + assert(self.page_size >= self.block_size, "Total memory would overflow %d %d", block_size, capacity); + self.head.buffer = self.allocate_page(); + self.head.capacity = capacity; + self.next_free = self.head.buffer; + self.freelist = null; + self.grow_capacity = capacity; + self.initialized = true; + self.allocated = capacity; + self.used = 0; + return self; +} + +<* + Initialize an block pool + + @param [in] allocator : "The allocator to use" + @param $Type : "The type used for setting the block size" + @param capacity : "The amount of blocks to be pre-allocated" + @require !self.initialized : "The block pool must not be initialized" +*> +macro FixedBlockPool* FixedBlockPool.init_for_type(&self, Allocator allocator, $Type, usz capacity = INITIAL_CAPACITY) +{ + return self.init(allocator, $Type.sizeof, capacity, $Type.alignof); +} + +<* + Initialize an block pool using Temporary allocator + + @param $Type : "The type used for setting the block size" + @param capacity : "The amount of blocks to be pre-allocated" + @require !self.initialized : "The block pool must not be initialized" +*> +macro FixedBlockPool* FixedBlockPool.tinit_for_type(&self, $Type, usz capacity = INITIAL_CAPACITY) => self.init_for_type(tmem, $Type, capacity); + +<* + Initialize an block pool using Temporary allocator + + @param block_size : "The block size to use" + @param capacity : "The amount of blocks to be pre-allocated" + @require !self.initialized : "The block pool must not be initialized" +*> +macro FixedBlockPool* FixedBlockPool.tinit(&self, usz block_size, usz capacity = INITIAL_CAPACITY) => self.init(tmem, block_size, capacity); + +<* + Free up the entire block pool + + @require self.initialized : "The block pool must be initialized" +*> +fn void FixedBlockPool.free(&self) +{ + self.free_page(self.head.buffer); + FixedBlockPoolNode* iter = self.head.next; + + while (iter) + { + self.free_page(iter.buffer); + FixedBlockPoolNode* current = iter; + iter = iter.next; + allocator::free(self.allocator, current); + } + self.initialized = false; + self.allocated = 0; + self.used = 0; +} + +<* + Allocate an block on the block pool, re-uses previously deallocated blocks + + @require self.initialized : "The block pool must be initialized" +*> +fn void* FixedBlockPool.alloc(&self) +{ + defer self.used++; + + if (self.freelist) + { + FixedBlockPoolEntry* entry = self.freelist; + self.freelist = entry.previous; + mem::clear(entry, self.block_size); + return entry; + } + + void* end = self.tail.buffer + (self.tail.capacity * self.block_size); + if (self.next_free >= end) self.new_node(); + void* ptr = self.next_free; + self.next_free += self.block_size; + + return ptr; +} + +<* + Deallocate a block from the block pool + + @require self.initialized : "The block pool must be initialized" + @require self.check_ptr(ptr) : "The pointer should be part of the pool" +*> +fn void FixedBlockPool.dealloc(&self, void* ptr) +{ + $if env::COMPILER_SAFE_MODE && !env::ADDRESS_SANITIZER: + if (self.block_size > FixedBlockPoolEntry.sizeof) + { + mem::set(ptr + FixedBlockPoolEntry.sizeof, 0xAA, self.block_size); + } + $else + // POINT FOR IMPROVEMENT, something like: + // asan::poison_memory_region(&ptr, self.block_size); + $endif + + FixedBlockPoolEntry* entry = ptr; + entry.previous = self.freelist; + self.freelist = entry; + self.used--; +} + +<* + @require self.initialized : "The block pool must be initialized" +*> +fn bool FixedBlockPool.check_ptr(&self, void *ptr) @local +{ + FixedBlockPoolNode* iter = &self.head; + + while (iter) + { + void* end = iter.buffer + (iter.capacity * self.block_size); + if (ptr >= iter.buffer && ptr < end) return true; + iter = iter.next; + } + + return false; +} + +<* + @require self.grow_capacity > 0 : "How many blocks will it store" +*> +fn void FixedBlockPool.new_node(&self) @local +{ + FixedBlockPoolNode* node = allocator::new(self.allocator, FixedBlockPoolNode); + node.buffer = self.allocate_page(); + node.capacity = self.grow_capacity; + self.tail.next = node; + self.tail = node; + self.next_free = node.buffer; + self.allocated += node.capacity; +} + +macro void* FixedBlockPool.allocate_page(&self) @private +{ + return self.alignment > mem::DEFAULT_MEM_ALIGNMENT + ? allocator::calloc_aligned(self.allocator, self.page_size, self.alignment)!! + : allocator::calloc(self.allocator, self.page_size); +} +macro void FixedBlockPool.free_page(&self, void* page) @private +{ + if (self.alignment > mem::DEFAULT_MEM_ALIGNMENT) + { + allocator::free_aligned(self.allocator, page); + } + else + { + allocator::free(self.allocator, page); + } +} + +macro usz calculate_actual_capacity(usz capacity, usz block_size) @private +{ + // Assume some overhead + if (capacity) return capacity; + capacity = (mem::os_pagesize() - 128) / block_size; + return capacity ?: 1; +} diff --git a/releasenotes.md b/releasenotes.md index 99828c052..33b0a51de 100644 --- a/releasenotes.md +++ b/releasenotes.md @@ -21,6 +21,7 @@ - Added `FileMmap` to manage memory mapped files. - Add `vm::mmap_file` to memory map a file. - Updated hash functions in default hash methods. +- Added `FixedBlockPool` which is a memory pool for fixed size blocks. ## 0.7.4 Change list diff --git a/test/unit/stdlib/core/mem_blockpool.c3 b/test/unit/stdlib/core/mem_blockpool.c3 new file mode 100644 index 000000000..342e2a575 --- /dev/null +++ b/test/unit/stdlib/core/mem_blockpool.c3 @@ -0,0 +1,125 @@ +module std::core::fixedblockpool_test; + +import std; + +struct Foo +{ + int a; + double b; + bool c; + char[2] x; +} + +fn void test_basic_allocation() @test +{ + FixedBlockPool pool; + defer pool.free(); + pool.init_for_type(mem, int); + + test::eq(pool.used, 0); + int *ptr1 = pool.alloc(); + test::eq(pool.used, 1); + int *ptr2 = pool.alloc(); + assert(((usz) ptr2) - ((usz) ptr1) >= int.sizeof); + assert(pool.used == 2); +} + +struct Bar @align(128) +{ + int a; + int b; +} +fn void test_basic_overallocation() @test +{ + FixedBlockPool pool; + defer pool.free(); + pool.init_for_type(mem, Bar); + assert(pool.alignment == 128); + test::eq(pool.used, 0); + int *ptr1 = pool.alloc(); + test::eq(((iptr)ptr1) % 128, 0); + test::eq(pool.used, 1); + int *ptr2 = pool.alloc(); + test::eq(((iptr)ptr2) % 128, 0); + test::eq(pool.used, 2); +} + +fn void test_large_allocation() @test +{ + FixedBlockPool pool; + defer pool.free(); + pool.init_for_type(mem, Foo); + + Foo *ptr1 = pool.alloc(); + Foo *ptr2 = pool.alloc(); + assert(((usz) ptr2) - ((usz) ptr1) >= Foo.sizeof); + assert(pool.used == 2); + pool.dealloc(ptr1); + pool.dealloc(ptr2); +} + +fn void test_basic_capacity() @test +{ + FixedBlockPool pool; + defer pool.free(); + pool.init_for_type(mem, int, 2); + + int *ptr1 = pool.alloc(); + int *ptr2 = pool.alloc(); + assert(((usz) ptr2) - ((usz) ptr1) >= int.sizeof); + assert(pool.used == 2); +} + +fn void test_basic_capacity_different_grow_capacity() @test +{ + FixedBlockPool pool; + defer pool.free(); + pool.init_for_type(mem, int, 1); + + pool.alloc(); + pool.grow_capacity = 2; + + pool.alloc(); + pool.alloc(); + pool.grow_capacity = 1; + + pool.alloc(); + + assert(pool.head.buffer != null); + assert(pool.head.next.buffer != null); + assert(pool.head.next.next.buffer != null); + assert(pool.head.next.next.next == null); + assert(pool.used == 4); + assert(pool.allocated == 4); +} + +fn void test_basic_object_reuse() @test +{ + FixedBlockPool pool; + defer pool.free(); + pool.init_for_type(mem, int, 1); + + int*[10] objs; + for (int i = 0; i < 10; i++) objs[i] = pool.alloc(); + test::eq(pool.used, 10); + + int *obj1 = objs[1]; + pool.dealloc(obj1); + test::eq(pool.used, 9); + int *obj3 = objs[3]; + pool.dealloc(obj3); + test::eq(pool.used, 8); + + int *obj7 = objs[7]; + pool.dealloc(obj7); + + int *obj2 = objs[2]; + pool.dealloc(obj2); + + assert(obj2 == pool.alloc()); + assert(obj7 == pool.alloc()); + assert(obj3 == pool.alloc()); + test::eq(pool.used, 9); + assert(obj1 == pool.alloc()); + assert(pool.used == 10); +}