mirror of
https://github.com/c3lang/c3c.git
synced 2026-02-27 12:01:16 +00:00
Added OnStack allocator. Added dirname, basename and extension to path functions.
This commit is contained in:
@@ -1,3 +1,6 @@
|
||||
// Copyright (c) 2023 Christoffer Lerno. All rights reserved.
|
||||
// Use of this source code is governed by the MIT license
|
||||
// a copy of which can be found in the LICENSE_STDLIB file.
|
||||
module std::collections::map<Key, Value>;
|
||||
import std::math;
|
||||
|
||||
|
||||
@@ -1,3 +1,6 @@
|
||||
// Copyright (c) 2023 Christoffer Lerno. All rights reserved.
|
||||
// Use of this source code is governed by the MIT license
|
||||
// a copy of which can be found in the LICENSE_STDLIB file.
|
||||
module std::core::mem::allocator;
|
||||
|
||||
struct ArenaAllocator
|
||||
|
||||
@@ -1,3 +1,7 @@
|
||||
// Copyright (c) 2021 Christoffer Lerno. All rights reserved.
|
||||
// Use of this source code is governed by the MIT license
|
||||
// a copy of which can be found in the LICENSE_STDLIB file.
|
||||
|
||||
module std::core::mem::allocator;
|
||||
|
||||
struct DynamicArenaAllocator
|
||||
|
||||
@@ -1,3 +1,7 @@
|
||||
// Copyright (c) 2021 Christoffer Lerno. All rights reserved.
|
||||
// Use of this source code is governed by the MIT license
|
||||
// a copy of which can be found in the LICENSE_STDLIB file.
|
||||
|
||||
module std::core::mem::allocator;
|
||||
|
||||
typedef MemoryAllocFn = fn char[]!(usz);
|
||||
|
||||
@@ -1,3 +1,7 @@
|
||||
// Copyright (c) 2021 Christoffer Lerno. All rights reserved.
|
||||
// Use of this source code is governed by the MIT license
|
||||
// a copy of which can be found in the LICENSE_STDLIB file.
|
||||
|
||||
module std::core::mem::allocator;
|
||||
import libc;
|
||||
|
||||
|
||||
235
lib/std/core/allocators/on_stack_allocator.c3
Normal file
235
lib/std/core/allocators/on_stack_allocator.c3
Normal file
@@ -0,0 +1,235 @@
|
||||
module std::core::mem::allocator;
|
||||
|
||||
struct OnStackAllocator
|
||||
{
|
||||
inline Allocator allocator;
|
||||
Allocator* backing_allocator;
|
||||
char[] data;
|
||||
usz used;
|
||||
OnStackAllocatorExtraChunk* chunk;
|
||||
}
|
||||
|
||||
macro void @stack_mem(usz $size; @body(Allocator* mem)) @builtin
|
||||
{
|
||||
char[$size] buffer;
|
||||
OnStackAllocator allocator;
|
||||
allocator.init(&buffer, mem::heap());
|
||||
defer allocator.free();
|
||||
@body(&allocator);
|
||||
}
|
||||
|
||||
macro void @stack_pool(usz $size; @body) @builtin
|
||||
{
|
||||
char[$size] buffer;
|
||||
OnStackAllocator allocator;
|
||||
allocator.init(&buffer, mem::heap());
|
||||
defer allocator.free();
|
||||
mem::@scoped(&allocator)
|
||||
{
|
||||
@body();
|
||||
};
|
||||
}
|
||||
|
||||
struct OnStackAllocatorExtraChunk @local
|
||||
{
|
||||
bool is_aligned;
|
||||
OnStackAllocatorExtraChunk* prev;
|
||||
void* data;
|
||||
}
|
||||
|
||||
/**
|
||||
* Initialize a memory arena for use using the provided bytes.
|
||||
*
|
||||
* @require this != null
|
||||
**/
|
||||
fn void OnStackAllocator.init(OnStackAllocator* this, char[] data, Allocator* using = mem::heap())
|
||||
{
|
||||
this.function = &on_stack_allocator_function;
|
||||
this.data = data;
|
||||
this.backing_allocator = using;
|
||||
this.used = 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* @require this != null
|
||||
**/
|
||||
fn void OnStackAllocator.free(OnStackAllocator* this)
|
||||
{
|
||||
OnStackAllocatorExtraChunk* chunk = this.chunk;
|
||||
while (chunk)
|
||||
{
|
||||
if (chunk.is_aligned)
|
||||
{
|
||||
this.backing_allocator.free_aligned(chunk.data)!!;
|
||||
}
|
||||
else
|
||||
{
|
||||
this.backing_allocator.free(chunk.data)!!;
|
||||
}
|
||||
void* old = chunk;
|
||||
chunk = chunk.prev;
|
||||
this.backing_allocator.free(old)!!;
|
||||
}
|
||||
this.chunk = null;
|
||||
this.used = 0;
|
||||
}
|
||||
|
||||
struct OnStackAllocatorHeader
|
||||
{
|
||||
usz size;
|
||||
char[*] data;
|
||||
}
|
||||
|
||||
/**
|
||||
* @require !alignment || math::is_power_of_2(alignment)
|
||||
* @require data `unexpectedly missing the allocator`
|
||||
*/
|
||||
fn void*! on_stack_allocator_function(Allocator* data, usz size, usz alignment, usz offset, void* old_pointer, AllocationKind kind) @private
|
||||
{
|
||||
OnStackAllocator* allocator = (OnStackAllocator*)data;
|
||||
bool clear = false;
|
||||
switch (kind)
|
||||
{
|
||||
case CALLOC:
|
||||
case ALIGNED_CALLOC:
|
||||
clear = true;
|
||||
nextcase;
|
||||
case ALLOC:
|
||||
case ALIGNED_ALLOC:
|
||||
assert(!old_pointer, "Unexpected old pointer for alloc.");
|
||||
if (!size) return null;
|
||||
return on_stack_allocator_alloc(allocator, size, alignment, offset, clear, kind == AllocationKind.ALIGNED_ALLOC || kind == AllocationKind.ALIGNED_CALLOC);
|
||||
case ALIGNED_REALLOC:
|
||||
case REALLOC:
|
||||
if (!size) nextcase FREE;
|
||||
if (!old_pointer) nextcase ALLOC;
|
||||
return on_stack_allocator_realloc(allocator, old_pointer, size, alignment, offset, kind == AllocationKind.ALIGNED_REALLOC);
|
||||
case ALIGNED_FREE:
|
||||
case FREE:
|
||||
if (!old_pointer) return null;
|
||||
if (allocation_in_stack_mem(allocator, old_pointer)) return null;
|
||||
on_stack_allocator_remove_chunk(allocator, old_pointer);
|
||||
if (kind == AllocationKind.ALIGNED_FREE)
|
||||
{
|
||||
allocator.backing_allocator.free_aligned(old_pointer)?;
|
||||
}
|
||||
else
|
||||
{
|
||||
allocator.backing_allocator.free(old_pointer)?;
|
||||
}
|
||||
return null;
|
||||
case MARK:
|
||||
case RESET:
|
||||
return AllocationFailure.UNSUPPORTED_OPERATION!;
|
||||
}
|
||||
unreachable();
|
||||
}
|
||||
|
||||
fn bool allocation_in_stack_mem(OnStackAllocator* a, void* ptr) @local
|
||||
{
|
||||
return ptr >= a.data.ptr && ptr <= &a.data[^1];
|
||||
}
|
||||
|
||||
fn void on_stack_allocator_remove_chunk(OnStackAllocator* a, void* ptr) @local
|
||||
{
|
||||
OnStackAllocatorExtraChunk* chunk = a.chunk;
|
||||
OnStackAllocatorExtraChunk** addr = &a.chunk;
|
||||
while (chunk)
|
||||
{
|
||||
if (chunk.data == ptr)
|
||||
{
|
||||
*addr = chunk.prev;
|
||||
a.backing_allocator.free(chunk)!!;
|
||||
return;
|
||||
}
|
||||
addr = &chunk.prev;
|
||||
chunk = *addr;
|
||||
}
|
||||
unreachable("Missing chunk");
|
||||
}
|
||||
|
||||
fn OnStackAllocatorExtraChunk* on_stack_allocator_find_chunk(OnStackAllocator* a, void* ptr) @local
|
||||
{
|
||||
OnStackAllocatorExtraChunk* chunk = a.chunk;
|
||||
while (chunk)
|
||||
{
|
||||
if (chunk.data == ptr) return chunk;
|
||||
chunk = chunk.prev;
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
/**
|
||||
* @require size > 0
|
||||
* @require alignment <= mem::MAX_MEMORY_ALIGNMENT `alignment too big`
|
||||
* @require offset <= mem::MAX_MEMORY_ALIGNMENT `offset too big`
|
||||
* @require offset <= size && offset >= 0
|
||||
* @require mem::aligned_offset(offset, ArenaAllocatorHeader.alignof) == offset
|
||||
* @require a != null
|
||||
**/
|
||||
fn void*! on_stack_allocator_realloc(OnStackAllocator* a, void* old_pointer, usz size, usz alignment, usz offset, bool aligned) @local @inline
|
||||
{
|
||||
if (!allocation_in_stack_mem(a, old_pointer))
|
||||
{
|
||||
OnStackAllocatorExtraChunk* chunk = on_stack_allocator_find_chunk(a, old_pointer);
|
||||
assert(chunk, "Tried to realloc pointer not belonging to the allocator");
|
||||
if (aligned)
|
||||
{
|
||||
return chunk.data = a.backing_allocator.realloc_aligned(old_pointer, size, alignment, offset)?;
|
||||
}
|
||||
return chunk.data = a.backing_allocator.realloc(old_pointer, size)?;
|
||||
}
|
||||
|
||||
OnStackAllocatorHeader* header = old_pointer - OnStackAllocatorHeader.sizeof;
|
||||
usz old_size = header.size;
|
||||
void* mem = on_stack_allocator_alloc(a, size, alignment, offset, true, aligned)?;
|
||||
mem::copy(mem, old_pointer, old_size, mem::DEFAULT_MEM_ALIGNMENT, mem::DEFAULT_MEM_ALIGNMENT);
|
||||
return mem;
|
||||
}
|
||||
|
||||
import std::io;
|
||||
/**
|
||||
* @require size > 0
|
||||
* @require alignment <= mem::MAX_MEMORY_ALIGNMENT `alignment too big`
|
||||
* @require offset <= mem::MAX_MEMORY_ALIGNMENT `offset too big`
|
||||
* @require offset <= size && offset >= 0
|
||||
* @require mem::aligned_offset(offset, ArenaAllocatorHeader.alignof) == offset
|
||||
* @require a != null
|
||||
**/
|
||||
fn void*! on_stack_allocator_alloc(OnStackAllocator* a, usz size, usz alignment, usz offset, bool clear, bool aligned) @local @inline
|
||||
{
|
||||
alignment = alignment_for_allocation(alignment);
|
||||
usz total_len = a.data.len;
|
||||
void* start_mem = a.data.ptr;
|
||||
void* unaligned_pointer_to_offset = start_mem + a.used + OnStackAllocatorHeader.sizeof + offset;
|
||||
void* aligned_pointer_to_offset = mem::aligned_pointer(unaligned_pointer_to_offset, alignment);
|
||||
usz end = (usz)(aligned_pointer_to_offset - a.data.ptr) + size - offset;
|
||||
|
||||
Allocator* backing_allocator = a.backing_allocator;
|
||||
|
||||
if (end > total_len)
|
||||
{
|
||||
OnStackAllocatorExtraChunk* chunk = backing_allocator.alloc(OnStackAllocatorExtraChunk.sizeof)?;
|
||||
defer catch backing_allocator.free(chunk)!!;
|
||||
defer try a.chunk = chunk;
|
||||
*chunk = { .prev = a.chunk, .is_aligned = aligned };
|
||||
void* data @noinit;
|
||||
switch
|
||||
{
|
||||
case !aligned && !clear:
|
||||
data = backing_allocator.alloc(size)?;
|
||||
case aligned && !clear:
|
||||
data = backing_allocator.alloc_aligned(size, alignment, offset)?;
|
||||
case !aligned && clear:
|
||||
data = backing_allocator.calloc(size)?;
|
||||
case aligned && clear:
|
||||
data = backing_allocator.calloc_aligned(size, alignment, offset)?;
|
||||
}
|
||||
return chunk.data = data;
|
||||
}
|
||||
a.used = end;
|
||||
void *mem = aligned_pointer_to_offset - offset;
|
||||
OnStackAllocatorHeader* header = mem - OnStackAllocatorHeader.sizeof;
|
||||
header.size = size;
|
||||
return mem;
|
||||
}
|
||||
@@ -24,12 +24,12 @@ fault NumberConversion
|
||||
|
||||
macro String printf(String fmt, ..., Allocator* using = mem::heap())
|
||||
{
|
||||
@allocating_pool(using; bool is_temp)
|
||||
@stack_mem(256; Allocator* mem)
|
||||
{
|
||||
DString str;
|
||||
str.tinit();
|
||||
str.init(.using = mem);
|
||||
str.printf(fmt, $vasplat());
|
||||
return using == is_temp ? str.str() : str.copy_str(using);
|
||||
return str.copy_str(using);
|
||||
};
|
||||
}
|
||||
|
||||
|
||||
@@ -40,13 +40,13 @@ fn bool native_is_dir(String path)
|
||||
|
||||
fn Path! native_temp_directory(Allocator* using = mem::heap())
|
||||
{
|
||||
@allocating_pool(using; bool is_temp)
|
||||
@stack_mem(256; Allocator* mem)
|
||||
{
|
||||
Win32_DWORD len = files::win32_GetTempPathW(0, null);
|
||||
if (!len) return IoError.GENERAL_ERROR!;
|
||||
Char16[] buff = tmalloc(Char16, len + 1);
|
||||
Char16[] buff = malloc(Char16, len + 1, .using = mem);
|
||||
if (!files::win32_GetTempPathW(len, buff)) return IoError.GENERAL_ERROR!;
|
||||
return path::new(string::from_utf16(buff[:len]), using);
|
||||
return path::new(string::from_utf16(buff[:len], .using = mem), using);
|
||||
};
|
||||
}
|
||||
|
||||
|
||||
@@ -29,9 +29,9 @@ enum PathEnv
|
||||
|
||||
fn Path! getcwd(Allocator* using = mem::heap())
|
||||
{
|
||||
@allocating_pool(using; bool is_temp)
|
||||
@stack_mem(256; Allocator* mem)
|
||||
{
|
||||
return new(os::getcwd(using));
|
||||
return new(os::getcwd(mem), using);
|
||||
};
|
||||
}
|
||||
|
||||
@@ -89,19 +89,56 @@ fn Path! Path.append(Path path, String filename, Allocator* using = mem::heap())
|
||||
if (!path.path_string.len) return new(filename, using, path.env)?;
|
||||
assert(!is_separator(path.path_string[^1], path.env));
|
||||
|
||||
// Handle temp nested temp allocations.
|
||||
@allocating_pool(using; bool is_temp)
|
||||
@stack_mem(256; Allocator* mem)
|
||||
{
|
||||
DString dstr = dstring::tnew_with_capacity(path.path_string.len + 1 + filename.len);
|
||||
DString dstr = dstring::new_with_capacity(path.path_string.len + 1 + filename.len, .using = mem);
|
||||
dstr.append(path.path_string);
|
||||
dstr.append(PREFERRED_SEPARATOR);
|
||||
dstr.append(filename);
|
||||
return { normalize(is_temp ? dstr.str() : dstr.copy_str(using), path.env), path.env };
|
||||
return { normalize(dstr.copy_str(using), path.env), path.env };
|
||||
};
|
||||
}
|
||||
|
||||
fn Path! Path.tappend(Path path, String filename) => path.append(filename, mem::temp());
|
||||
|
||||
fn usz Path.start_of_base_name(Path path) @local
|
||||
{
|
||||
String path_str = path.path_string;
|
||||
if (!path_str.len) return 0;
|
||||
if (path.env == PathEnv.WIN32)
|
||||
{
|
||||
return path_str.rindex_of(`\`) + 1 ?? volume_name_len(path_str, path.env)!!;
|
||||
}
|
||||
return path_str.rindex_of("/") + 1 ?? 0;
|
||||
}
|
||||
|
||||
fn String Path.basename(Path path)
|
||||
{
|
||||
usz basename_start = path.start_of_base_name();
|
||||
String path_str = path.path_string;
|
||||
if (basename_start == path_str.len) return "";
|
||||
return path_str[basename_start..];
|
||||
}
|
||||
|
||||
fn String Path.dirname(Path path)
|
||||
{
|
||||
usz basename_start = path.start_of_base_name();
|
||||
String path_str = path.path_string;
|
||||
if (basename_start == 0) return "";
|
||||
usz start = volume_name_len(path_str, path.env)!!;
|
||||
if (basename_start <= start + 1) return path_str[:basename_start];
|
||||
return path_str[:basename_start - 1];
|
||||
}
|
||||
|
||||
fn String! Path.extension(Path path)
|
||||
{
|
||||
String basename = path.basename();
|
||||
usz index = basename.rindex_of(".")?;
|
||||
// Plain ".foo" does not have an
|
||||
if (index == 0) return SearchResult.MISSING!;
|
||||
if (index == basename.len) return "";
|
||||
return basename[index + 1..];
|
||||
}
|
||||
|
||||
fn String Path.volume_name(Path path)
|
||||
{
|
||||
@@ -9,6 +9,7 @@ fault NetError
|
||||
INVALID_IP_STRING,
|
||||
}
|
||||
|
||||
|
||||
fn uint! ipv4toint(String s)
|
||||
{
|
||||
uint out;
|
||||
|
||||
@@ -3,7 +3,6 @@ import libc;
|
||||
|
||||
$if (env::OS_TYPE == OsType.LINUX):
|
||||
|
||||
|
||||
struct AddrInfo
|
||||
{
|
||||
int ai_flags;
|
||||
|
||||
Reference in New Issue
Block a user