mirror of
https://github.com/c3lang/c3c.git
synced 2026-02-27 12:01:16 +00:00
Add simple heap allocator and update lambda and #lazy checking.
This commit is contained in:
219
lib/std/core/allocators/heap_allocator.c3
Normal file
219
lib/std/core/allocators/heap_allocator.c3
Normal file
@@ -0,0 +1,219 @@
|
||||
module std::core::mem::allocator;
|
||||
|
||||
define MemoryAllocFn = fn char[]!(usz);
|
||||
|
||||
struct SimpleHeapAllocator
|
||||
{
|
||||
inline Allocator allocator;
|
||||
MemoryAllocFn alloc_fn;
|
||||
Header* free_list;
|
||||
}
|
||||
|
||||
/**
|
||||
* @require this "Unexpectedly missing the allocator"
|
||||
* @require allocator "An underlying memory provider must be given"
|
||||
* @require !this.free_list "The allocator may not be already initialized"
|
||||
**/
|
||||
fn void SimpleHeapAllocator.init(SimpleHeapAllocator* this, MemoryAllocFn allocator)
|
||||
{
|
||||
this.alloc_fn = allocator;
|
||||
this.allocator = { &simple_heap_allocator_function };
|
||||
this.free_list = null;
|
||||
}
|
||||
|
||||
/**
|
||||
* @require !alignment || math::is_power_of_2(alignment)
|
||||
* @require this `unexpectedly missing the allocator`
|
||||
*/
|
||||
private fn void*! simple_heap_allocator_function(Allocator* this, usz size, usz alignment, usz offset, void* old_pointer, AllocationKind kind)
|
||||
{
|
||||
SimpleHeapAllocator* heap = (SimpleHeapAllocator*)this;
|
||||
switch (kind)
|
||||
{
|
||||
case ALIGNED_ALLOC:
|
||||
return @aligned_alloc(heap._alloc, size, alignment, offset);
|
||||
case ALLOC:
|
||||
return heap._alloc(size);
|
||||
case ALIGNED_CALLOC:
|
||||
return @aligned_calloc(heap._calloc, size, alignment, offset);
|
||||
case CALLOC:
|
||||
return heap._calloc(size);
|
||||
case ALIGNED_REALLOC:
|
||||
if (!size) nextcase ALIGNED_FREE;
|
||||
if (!old_pointer) nextcase ALIGNED_CALLOC;
|
||||
return @aligned_realloc(heap._calloc, heap._free, old_pointer, size, alignment, offset);
|
||||
case REALLOC:
|
||||
if (!size) nextcase FREE;
|
||||
if (!old_pointer) nextcase CALLOC;
|
||||
return heap._realloc(old_pointer, size);
|
||||
case RESET:
|
||||
return AllocationFailure.UNSUPPORTED_OPERATION!;
|
||||
case ALIGNED_FREE:
|
||||
@aligned_free(heap._free, old_pointer)?;
|
||||
return null;
|
||||
case FREE:
|
||||
heap._free(old_pointer);
|
||||
return null;
|
||||
default:
|
||||
unreachable();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @require this && old_pointer && bytes > 0
|
||||
**/
|
||||
fn void*! SimpleHeapAllocator._realloc(SimpleHeapAllocator* this, void* old_pointer, usz bytes)
|
||||
{
|
||||
// Find the block header.
|
||||
Header* block = (Header*)old_pointer - 1;
|
||||
if (block.size >= bytes) return old_pointer;
|
||||
void* new = this._alloc(bytes)?;
|
||||
usz max_to_copy = math::min(block.size, bytes);
|
||||
mem::copy(new, old_pointer, max_to_copy);
|
||||
this._free(old_pointer);
|
||||
return new;
|
||||
}
|
||||
|
||||
private fn void*! SimpleHeapAllocator._calloc(SimpleHeapAllocator* this, usz bytes)
|
||||
{
|
||||
void* data = this._alloc(bytes)?;
|
||||
mem::clear(data, bytes, DEFAULT_MEM_ALIGNMENT);
|
||||
return data;
|
||||
}
|
||||
|
||||
private fn void*! SimpleHeapAllocator._alloc(SimpleHeapAllocator* this, usz bytes)
|
||||
{
|
||||
usz aligned_bytes = mem::aligned_offset(bytes, DEFAULT_MEM_ALIGNMENT);
|
||||
if (!this.free_list)
|
||||
{
|
||||
this.add_block(aligned_bytes)?;
|
||||
}
|
||||
|
||||
Header* current = this.free_list;
|
||||
Header* previous = current;
|
||||
while (current)
|
||||
{
|
||||
switch
|
||||
{
|
||||
case current.size >= aligned_bytes && current.size <= aligned_bytes + Header.sizeof + 64:
|
||||
if (current == previous)
|
||||
{
|
||||
this.free_list = current.next;
|
||||
}
|
||||
else
|
||||
{
|
||||
previous.next = current.next;
|
||||
}
|
||||
current.next = null;
|
||||
return current + 1;
|
||||
case current.size > aligned_bytes:
|
||||
Header* unallocated = (Header*)((char*)current + aligned_bytes + Header.sizeof);
|
||||
unallocated.size = current.size - aligned_bytes;
|
||||
unallocated.next = current.next;
|
||||
if (current == this.free_list)
|
||||
{
|
||||
this.free_list = unallocated;
|
||||
}
|
||||
else
|
||||
{
|
||||
previous.next = unallocated;
|
||||
}
|
||||
current.size = aligned_bytes;
|
||||
current.next = null;
|
||||
return current + 1;
|
||||
default:
|
||||
previous = current;
|
||||
current = current.next;
|
||||
}
|
||||
}
|
||||
this.add_block(aligned_bytes)?;
|
||||
return this.alloc(aligned_bytes);
|
||||
}
|
||||
|
||||
private fn void! SimpleHeapAllocator.add_block(SimpleHeapAllocator* this, usz aligned_bytes)
|
||||
{
|
||||
assert(mem::aligned_offset(aligned_bytes, DEFAULT_MEM_ALIGNMENT) == aligned_bytes);
|
||||
char[] result = this.alloc_fn(aligned_bytes + Header.sizeof)?;
|
||||
Header* new_block = (Header*)result.ptr;
|
||||
new_block.size = result.len - Header.sizeof;
|
||||
new_block.next = null;
|
||||
this._free(new_block + 1);
|
||||
}
|
||||
|
||||
|
||||
private fn void SimpleHeapAllocator._free(SimpleHeapAllocator* this, void* ptr)
|
||||
{
|
||||
// Empty ptr -> do nothing.
|
||||
if (!ptr) return;
|
||||
|
||||
// Find the block header.
|
||||
Header* block = (Header*)ptr - 1;
|
||||
|
||||
// No free list? Then just return this.
|
||||
if (!this.free_list)
|
||||
{
|
||||
this.free_list = block;
|
||||
return;
|
||||
}
|
||||
|
||||
// Find where in the list it should be inserted.
|
||||
Header* current = this.free_list;
|
||||
Header* prev = current;
|
||||
while (current)
|
||||
{
|
||||
if (block < current)
|
||||
{
|
||||
// Between prev and current
|
||||
if (block > prev) break;
|
||||
// Before current
|
||||
if (current == prev) break;
|
||||
}
|
||||
prev = current;
|
||||
current = prev.next;
|
||||
}
|
||||
if (current)
|
||||
{
|
||||
// Insert after the current block.
|
||||
// Are the blocks adjacent?
|
||||
if (current == (Header*)((char*)(block + 1) + block.size))
|
||||
{
|
||||
// Merge
|
||||
block.size += current.size + Header.sizeof;
|
||||
block.next = current.next;
|
||||
}
|
||||
else
|
||||
{
|
||||
// Chain to current
|
||||
block.next = current;
|
||||
}
|
||||
}
|
||||
if (prev == current)
|
||||
{
|
||||
// Swap new start of free list
|
||||
this.free_list = block;
|
||||
}
|
||||
else
|
||||
{
|
||||
// Prev adjacent?
|
||||
if (block == (Header*)((char*)(prev + 1) + prev.size))
|
||||
{
|
||||
prev.size += block.size + Header.sizeof;
|
||||
prev.next = block.next;
|
||||
}
|
||||
else
|
||||
{
|
||||
// Link prev to block
|
||||
prev.next = block;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private union Header
|
||||
{
|
||||
struct
|
||||
{
|
||||
Header* next;
|
||||
usz size;
|
||||
}
|
||||
usz align;
|
||||
}
|
||||
@@ -20,21 +20,24 @@ private fn void*! null_allocator_fn(Allocator* this, usz bytes, usz alignment, u
|
||||
}
|
||||
}
|
||||
|
||||
private struct AlignedBlock
|
||||
struct AlignedBlock
|
||||
{
|
||||
usz len;
|
||||
void* start;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* @require bytes > 0
|
||||
* @require alignment > 0
|
||||
**/
|
||||
private fn void* _libc_aligned_alloc(usz bytes, usz alignment, usz offset) @inline
|
||||
macro void*! @aligned_alloc(#alloc_fn, usz bytes, usz alignment, usz offset)
|
||||
{
|
||||
usz header = mem::aligned_offset(AlignedBlock.sizeof + offset, alignment) - offset;
|
||||
void* data = libc::malloc(header + bytes);
|
||||
$if ($checks(#alloc_fn(bytes)?)):
|
||||
void* data = #alloc_fn(header + bytes)?;
|
||||
$else:
|
||||
void* data = #alloc_fn(header + bytes);
|
||||
$endif;
|
||||
void* mem = mem::aligned_pointer(data + header + offset, alignment) - offset;
|
||||
assert(mem > data);
|
||||
AlignedBlock* desc = (AlignedBlock*)mem - 1;
|
||||
@@ -46,10 +49,14 @@ private fn void* _libc_aligned_alloc(usz bytes, usz alignment, usz offset) @inli
|
||||
* @require bytes > 0
|
||||
* @require alignment > 0
|
||||
**/
|
||||
private fn void* _libc_aligned_calloc(usz bytes, usz alignment, usz offset) @inline
|
||||
macro void*! @aligned_calloc(#calloc_fn, usz bytes, usz alignment, usz offset)
|
||||
{
|
||||
usz header = mem::aligned_offset(AlignedBlock.sizeof + offset, alignment) - offset;
|
||||
void* data = libc::calloc(header + bytes, 1);
|
||||
$if ($checks(#calloc_fn(bytes)?)):
|
||||
void* data = #calloc_fn(header + bytes)?;
|
||||
$else:
|
||||
void* data = #calloc_fn(header + bytes);
|
||||
$endif;
|
||||
void* mem = mem::aligned_pointer(data + header + offset, alignment) - offset;
|
||||
AlignedBlock* desc = (AlignedBlock*)mem - 1;
|
||||
assert(mem > data);
|
||||
@@ -61,20 +68,28 @@ private fn void* _libc_aligned_calloc(usz bytes, usz alignment, usz offset) @inl
|
||||
* @require bytes > 0
|
||||
* @require alignment > 0
|
||||
**/
|
||||
private fn void* _libc_aligned_realloc(void* old_pointer, usz bytes, usz alignment, usz offset) @inline
|
||||
macro void*! @aligned_realloc(#calloc_fn, #free_fn, void* old_pointer, usz bytes, usz alignment, usz offset)
|
||||
{
|
||||
AlignedBlock* desc = (AlignedBlock*)old_pointer - 1;
|
||||
void* data_start = desc.start;
|
||||
void* new_data = _libc_aligned_calloc(bytes, alignment, offset);
|
||||
void* new_data = @aligned_calloc(#calloc_fn, bytes, alignment, offset)?;
|
||||
mem::copy(new_data, old_pointer, desc.len > bytes ? desc.len : bytes, DEFAULT_MEM_ALIGNMENT, DEFAULT_MEM_ALIGNMENT);
|
||||
libc::free(data_start);
|
||||
$if ($checks(#free_fn(data_start)?)):
|
||||
#free_fn(data_start)?;
|
||||
$else:
|
||||
#free_fn(data_start);
|
||||
$endif;
|
||||
return new_data;
|
||||
}
|
||||
|
||||
private fn void _libc_aligned_free(void* old_pointer) @inline
|
||||
macro void! @aligned_free(#free_fn, void* old_pointer)
|
||||
{
|
||||
AlignedBlock* desc = (AlignedBlock*)old_pointer - 1;
|
||||
libc::free(desc.start);
|
||||
$if ($checks(#free_fn(desc.start)?)):
|
||||
#free_fn(desc.start)?;
|
||||
$else:
|
||||
#free_fn(desc.start);
|
||||
$endif;
|
||||
}
|
||||
|
||||
fn void*! libc_allocator_fn(Allocator* unused, usz bytes, usz alignment, usz offset, void* old_pointer, AllocationKind kind) @inline
|
||||
@@ -86,17 +101,17 @@ fn void*! libc_allocator_fn(Allocator* unused, usz bytes, usz alignment, usz off
|
||||
switch (kind)
|
||||
{
|
||||
case ALIGNED_ALLOC:
|
||||
data = _libc_aligned_alloc(bytes, alignment, offset);
|
||||
data = @aligned_alloc(libc::malloc, bytes, alignment, offset)!!;
|
||||
case ALLOC:
|
||||
data = libc::malloc(bytes);
|
||||
case ALIGNED_CALLOC:
|
||||
data = _libc_aligned_calloc(bytes, alignment, offset);
|
||||
data = @aligned_calloc(fn void*(usz bytes) => libc::calloc(bytes, 1), bytes, alignment, offset)!!;
|
||||
case CALLOC:
|
||||
data = libc::calloc(bytes, 1);
|
||||
case ALIGNED_REALLOC:
|
||||
if (!bytes) nextcase ALIGNED_FREE;
|
||||
if (!old_pointer) nextcase ALIGNED_CALLOC;
|
||||
data = _libc_aligned_realloc(old_pointer, bytes, alignment, offset);
|
||||
data = @aligned_realloc(fn void*(usz bytes) => libc::calloc(bytes, 1), libc::free, old_pointer, bytes, alignment, offset)!!;
|
||||
case REALLOC:
|
||||
if (!bytes) nextcase FREE;
|
||||
if (!old_pointer) nextcase CALLOC;
|
||||
@@ -104,7 +119,7 @@ fn void*! libc_allocator_fn(Allocator* unused, usz bytes, usz alignment, usz off
|
||||
case RESET:
|
||||
return AllocationFailure.UNSUPPORTED_OPERATION!;
|
||||
case ALIGNED_FREE:
|
||||
_libc_aligned_free(old_pointer);
|
||||
@aligned_free(libc::free, old_pointer)!!;
|
||||
return null;
|
||||
case FREE:
|
||||
libc::free(old_pointer);
|
||||
|
||||
@@ -897,7 +897,7 @@ static inline bool sema_expr_analyse_hash_identifier(SemaContext *context, Expr
|
||||
if (!sema_analyse_expr_lvalue_fold_const(decl->var.hash_var.context, expr))
|
||||
{
|
||||
// Poison the decl so we don't evaluate twice.
|
||||
decl_poison(decl);
|
||||
if (!global_context.suppress_errors) decl_poison(decl);
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
@@ -6534,6 +6534,12 @@ static inline Decl *sema_find_cached_lambda(SemaContext *context, Type *func_typ
|
||||
static inline bool sema_expr_analyse_lambda(SemaContext *context, Type *func_type, Expr *expr)
|
||||
{
|
||||
Decl *decl = expr->lambda_expr;
|
||||
if (!decl_ok(decl)) return false;
|
||||
if (decl->resolve_status == RESOLVE_DONE)
|
||||
{
|
||||
expr->type = expr->type = type_get_ptr(decl->type);
|
||||
return true;
|
||||
}
|
||||
bool in_macro = context->current_macro;
|
||||
if (in_macro && decl->resolve_status != RESOLVE_DONE)
|
||||
{
|
||||
@@ -6616,6 +6622,7 @@ static inline bool sema_expr_analyse_lambda(SemaContext *context, Type *func_typ
|
||||
vec_add(unit->module->lambdas_to_evaluate, decl);
|
||||
expr->type = type_get_ptr(lambda_type);
|
||||
if (in_macro) vec_add(original->func_decl.generated_lambda, decl);
|
||||
decl->resolve_status = RESOLVE_DONE;
|
||||
return true;
|
||||
FAIL_NO_INFER:
|
||||
SEMA_ERROR(decl, "Inferred lambda expressions cannot be used unless the type can be determined.");
|
||||
|
||||
@@ -1 +1 @@
|
||||
#define COMPILER_VERSION "0.4.25"
|
||||
#define COMPILER_VERSION "0.4.26"
|
||||
Reference in New Issue
Block a user