- Temp allocator now supports more than 2 in-flight stacks.

- Printing stacktrace uses its own temp allocator.
- `@pool` no longer takes an argument.
- `Allocator` interface removes `mark` and `reset`.
- DynamicArenaAllocator has changed init function.
- Added `BackedArenaAllocator` which is allocated to a fixed size, then allocates on the backing allocator and supports mark/reset.
This commit is contained in:
Christoffer Lerno
2025-03-18 15:16:22 +01:00
parent 82cc49b388
commit 72608ce01d
27 changed files with 519 additions and 334 deletions

View File

@@ -95,7 +95,7 @@ fn usz? write(String filename, char[] input, QOIDesc* desc) => @pool()
@param channels : `The channels to be used`
@return? FILE_OPEN_FAILED, INVALID_DATA, TOO_MANY_PIXELS
*>
fn char[]? read(Allocator allocator, String filename, QOIDesc* desc, QOIChannels channels = AUTO) => @pool(allocator)
fn char[]? read(Allocator allocator, String filename, QOIDesc* desc, QOIChannels channels = AUTO) => @pool()
{
// read file
char[] data = file::load_temp(filename) ?? FILE_OPEN_FAILED?!;

View File

@@ -50,8 +50,8 @@ fn void ArenaAllocator.release(&self, void* ptr, bool) @dynamic
}
}
fn usz ArenaAllocator.mark(&self) @dynamic => self.used;
fn void ArenaAllocator.reset(&self, usz mark) @dynamic => self.used = mark;
fn usz ArenaAllocator.mark(&self) => self.used;
fn void ArenaAllocator.reset(&self, usz mark) => self.used = mark;
<*
@require !alignment || math::is_power_of_2(alignment)

View File

@@ -0,0 +1,210 @@
module std::core::mem::allocator;
import std::io, std::math;
struct AllocChunk @local
{
usz size;
char[*] data;
}
struct BackedArenaAllocator (Allocator)
{
Allocator backing_allocator;
ExtraPage* last_page;
usz used;
usz capacity;
char[*] data;
}
const usz PAGE_IS_ALIGNED @local = (usz)isz.max + 1u;
struct ExtraPage @local
{
ExtraPage* prev_page;
void* start;
usz mark;
usz size;
usz ident;
char[*] data;
}
macro usz ExtraPage.pagesize(&self) => self.size & ~PAGE_IS_ALIGNED;
macro bool ExtraPage.is_aligned(&self) => self.size & PAGE_IS_ALIGNED == PAGE_IS_ALIGNED;
<*
@require size >= 16
*>
fn BackedArenaAllocator*? new_backed_allocator(usz size, Allocator allocator)
{
BackedArenaAllocator* temp = allocator::alloc_with_padding(allocator, BackedArenaAllocator, size)!;
temp.last_page = null;
temp.backing_allocator = allocator;
temp.used = 0;
temp.capacity = size;
return temp;
}
fn void BackedArenaAllocator.destroy(&self)
{
self.reset(0);
if (self.last_page) (void)self._free_page(self.last_page);
allocator::free(self.backing_allocator, self);
}
fn usz BackedArenaAllocator.mark(&self) => self.used;
fn void BackedArenaAllocator.release(&self, void* old_pointer, bool) @dynamic
{
usz old_size = *(usz*)(old_pointer - DEFAULT_SIZE_PREFIX);
if (old_pointer + old_size == &self.data[self.used])
{
self.used -= old_size;
asan::poison_memory_region(&self.data[self.used], old_size);
}
}
fn void BackedArenaAllocator.reset(&self, usz mark)
{
ExtraPage *last_page = self.last_page;
while (last_page && last_page.mark > mark)
{
self.used = last_page.mark;
ExtraPage *to_free = last_page;
last_page = last_page.prev_page;
self._free_page(to_free)!!;
}
self.last_page = last_page;
$if env::COMPILER_SAFE_MODE || env::ADDRESS_SANITIZER:
if (!last_page)
{
usz cleaned = self.used - mark;
if (cleaned > 0)
{
$if env::COMPILER_SAFE_MODE && !env::ADDRESS_SANITIZER:
self.data[mark : cleaned] = 0xAA;
$endif
asan::poison_memory_region(&self.data[mark], cleaned);
}
}
$endif
self.used = mark;
}
fn void? BackedArenaAllocator._free_page(&self, ExtraPage* page) @inline @local
{
void* mem = page.start;
return self.backing_allocator.release(mem, page.is_aligned());
}
fn void*? BackedArenaAllocator._realloc_page(&self, ExtraPage* page, usz size, usz alignment) @inline @local
{
// Then the actual start pointer:
void* real_pointer = page.start;
// Walk backwards to find the pointer to this page.
ExtraPage **pointer_to_prev = &self.last_page;
// Remove the page from the list
while (*pointer_to_prev != page)
{
pointer_to_prev = &((*pointer_to_prev).prev_page);
}
*pointer_to_prev = page.prev_page;
usz page_size = page.pagesize();
// Clear on size > original size.
void* data = self.acquire(size, NO_ZERO, alignment)!;
mem::copy(data, &page.data[0], page_size, mem::DEFAULT_MEM_ALIGNMENT, mem::DEFAULT_MEM_ALIGNMENT);
self.backing_allocator.release(real_pointer, page.is_aligned());
return data;
}
fn void*? BackedArenaAllocator.resize(&self, void* pointer, usz size, usz alignment) @dynamic
{
AllocChunk *chunk = pointer - AllocChunk.sizeof;
if (chunk.size == (usz)-1)
{
assert(self.last_page, "Realloc of unrelated pointer");
// First grab the page
ExtraPage *page = pointer - ExtraPage.sizeof;
return self._realloc_page(page, size, alignment);
}
AllocChunk* data = self.acquire(size, NO_ZERO, alignment)!;
mem::copy(data, pointer, chunk.size, mem::DEFAULT_MEM_ALIGNMENT, mem::DEFAULT_MEM_ALIGNMENT);
return data;
}
<*
@require size > 0
@require !alignment || math::is_power_of_2(alignment)
@require alignment <= mem::MAX_MEMORY_ALIGNMENT : `alignment too big`
*>
fn void*? BackedArenaAllocator.acquire(&self, usz size, AllocInitType init_type, usz alignment) @dynamic
{
alignment = alignment_for_allocation(alignment);
void* start_mem = &self.data;
void* starting_ptr = start_mem + self.used;
void* aligned_header_start = mem::aligned_pointer(starting_ptr, AllocChunk.alignof);
void* mem = aligned_header_start + AllocChunk.sizeof;
if (alignment > AllocChunk.alignof)
{
mem = mem::aligned_pointer(mem, alignment);
}
usz new_usage = (usz)(mem - start_mem) + size;
// Arena allocation, simple!
if (new_usage <= self.capacity)
{
asan::unpoison_memory_region(starting_ptr, new_usage - self.used);
AllocChunk* chunk_start = mem - AllocChunk.sizeof;
chunk_start.size = size;
self.used = new_usage;
if (init_type == ZERO) mem::clear(mem, size, mem::DEFAULT_MEM_ALIGNMENT);
return mem;
}
// Fallback to backing allocator
ExtraPage* page;
// We have something we need to align.
if (alignment > mem::DEFAULT_MEM_ALIGNMENT)
{
// This is actually simpler, since it will create the offset for us.
usz total_alloc_size = mem::aligned_offset(ExtraPage.sizeof + size, alignment);
if (init_type == ZERO)
{
mem = allocator::calloc_aligned(self.backing_allocator, total_alloc_size, alignment)!;
}
else
{
mem = allocator::malloc_aligned(self.backing_allocator, total_alloc_size, alignment)!;
}
void* start = mem;
mem += mem::aligned_offset(ExtraPage.sizeof, alignment);
page = (ExtraPage*)mem - 1;
page.start = start;
page.size = size | PAGE_IS_ALIGNED;
}
else
{
// Here we might need to pad
usz padded_header_size = mem::aligned_offset(ExtraPage.sizeof, mem::DEFAULT_MEM_ALIGNMENT);
usz total_alloc_size = padded_header_size + size;
void* alloc = self.backing_allocator.acquire(total_alloc_size, init_type, 0)!;
// Find the page.
page = alloc + padded_header_size - ExtraPage.sizeof;
assert(mem::ptr_is_aligned(page, BackedArenaAllocator.alignof));
assert(mem::ptr_is_aligned(&page.data[0], mem::DEFAULT_MEM_ALIGNMENT));
page.start = alloc;
page.size = size;
}
// Mark it as a page
page.ident = ~(usz)0;
// Store when it was created
page.mark = ++self.used;
// Hook up the page.
page.prev_page = self.last_page;
self.last_page = page;
return &page.data[0];
}

View File

@@ -16,7 +16,7 @@ struct DynamicArenaAllocator (Allocator)
@param [&inout] allocator
@require page_size >= 128
*>
fn void DynamicArenaAllocator.init(&self, usz page_size, Allocator allocator)
fn void DynamicArenaAllocator.init(&self, Allocator allocator, usz page_size)
{
self.page = null;
self.unused_page = null;
@@ -110,9 +110,8 @@ fn void*? DynamicArenaAllocator.resize(&self, void* old_pointer, usz size, usz a
return new_mem;
}
fn void DynamicArenaAllocator.reset(&self, usz mark = 0) @dynamic
fn void DynamicArenaAllocator.reset(&self)
{
assert(mark == 0, "Unexpectedly reset dynamic arena allocator with mark %d", mark);
DynamicArenaPage* page = self.page;
DynamicArenaPage** unused_page_ptr = &self.unused_page;
while (page)

View File

@@ -11,8 +11,11 @@ struct TempAllocator (Allocator)
{
Allocator backing_allocator;
TempAllocatorPage* last_page;
TempAllocator* derived;
bool allocated;
usz used;
usz capacity;
usz original_capacity;
char[*] data;
}
@@ -23,7 +26,6 @@ struct TempAllocatorPage
{
TempAllocatorPage* prev_page;
void* start;
usz mark;
usz size;
usz ident;
char[*] data;
@@ -34,25 +36,92 @@ macro bool TempAllocatorPage.is_aligned(&self) => self.size & PAGE_IS_ALIGNED ==
<*
@require size >= 16
@require allocator.type != TempAllocator.typeid : "You may not create a temp allocator with a TempAllocator as the backing allocator."
*>
fn TempAllocator*? new_temp_allocator(usz size, Allocator allocator)
fn TempAllocator*? new_temp_allocator(Allocator allocator, usz size)
{
TempAllocator* temp = allocator::alloc_with_padding(allocator, TempAllocator, size)!;
temp.last_page = null;
temp.backing_allocator = allocator;
temp.used = 0;
temp.capacity = size;
temp.allocated = true;
temp.derived = null;
temp.original_capacity = temp.capacity = size;
return temp;
}
fn void TempAllocator.destroy(&self)
<*
@require !self.derived
@require min_size > TempAllocator.sizeof + 64 : "Min size must meaningfully hold the data + some bytes"
@require mult > 0 : "The multiple can never be zero"
*>
fn TempAllocator*? TempAllocator.derive_allocator(&self, usz min_size, usz buffer, usz mult)
{
self.reset(0);
if (self.last_page) (void)self._free_page(self.last_page);
allocator::free(self.backing_allocator, self);
usz remaining = self.capacity - self.used;
void* mem @noinit;
usz size @noinit;
if (min_size + buffer > remaining)
{
return self.derived = new_temp_allocator(self.backing_allocator, min_size * mult)!;
}
usz start = mem::aligned_offset(self.used + buffer, mem::DEFAULT_MEM_ALIGNMENT);
void* ptr = &self.data[start];
TempAllocator* temp = (TempAllocator*)ptr;
temp.last_page = null;
temp.backing_allocator = self.backing_allocator;
temp.used = 0;
temp.allocated = false;
temp.derived = null;
temp.original_capacity = temp.capacity = self.capacity - start - TempAllocator.sizeof;
self.capacity = start;
self.derived = temp;
return temp;
}
fn usz TempAllocator.mark(&self) @dynamic => self.used;
fn void TempAllocator.reset(&self)
{
TempAllocator* child = self.derived;
if (!child) return;
while (child)
{
TempAllocator* old = child;
old.destroy();
child = old.derived;
}
self.capacity = self.original_capacity;
self.derived = null;
}
<*
@require self.allocated : "Only a top level allocator should be freed."
*>
fn void TempAllocator.free(&self)
{
self.reset();
self.destroy();
}
fn void TempAllocator.destroy(&self) @local
{
TempAllocatorPage *last_page = self.last_page;
while (last_page)
{
TempAllocatorPage *to_free = last_page;
last_page = last_page.prev_page;
self._free_page(to_free)!!;
}
if (self.allocated)
{
allocator::free(self.backing_allocator, self);
return;
}
$if env::COMPILER_SAFE_MODE || env::ADDRESS_SANITIZER:
$if env::COMPILER_SAFE_MODE && !env::ADDRESS_SANITIZER:
self.data[0 : self.used] = 0xAA;
$else
asan::poison_memory_region(&self.data[0], self.used);
$endif
$endif
}
fn void TempAllocator.release(&self, void* old_pointer, bool) @dynamic
{
@@ -63,32 +132,7 @@ fn void TempAllocator.release(&self, void* old_pointer, bool) @dynamic
asan::poison_memory_region(&self.data[self.used], old_size);
}
}
fn void TempAllocator.reset(&self, usz mark) @dynamic
{
TempAllocatorPage *last_page = self.last_page;
while (last_page && last_page.mark > mark)
{
self.used = last_page.mark;
TempAllocatorPage *to_free = last_page;
last_page = last_page.prev_page;
self._free_page(to_free)!!;
}
self.last_page = last_page;
$if env::COMPILER_SAFE_MODE || env::ADDRESS_SANITIZER:
if (!last_page)
{
usz cleaned = self.used - mark;
if (cleaned > 0)
{
$if env::COMPILER_SAFE_MODE && !env::ADDRESS_SANITIZER:
self.data[mark : cleaned] = 0xAA;
$endif
asan::poison_memory_region(&self.data[mark], cleaned);
}
}
$endif
self.used = mark;
}
fn void? TempAllocator._free_page(&self, TempAllocatorPage* page) @inline @local
{
@@ -202,29 +246,9 @@ fn void*? TempAllocator.acquire(&self, usz size, AllocInitType init_type, usz al
// Mark it as a page
page.ident = ~(usz)0;
// Store when it was created
page.mark = ++self.used;
// Hook up the page.
page.prev_page = self.last_page;
self.last_page = page;
return &page.data[0];
}
fn void? TempAllocator.print_pages(&self, File* f)
{
TempAllocatorPage *last_page = self.last_page;
if (!last_page)
{
io::fprintf(f, "No pages.\n")!;
return;
}
io::fprintf(f, "---Pages----\n")!;
uint index = 0;
while (last_page)
{
bool is_not_aligned = !(last_page.size & (1u64 << 63));
io::fprintf(f, "%d. Alloc: %d %d at %p%s\n", ++index,
last_page.size & ~(1u64 << 63), last_page.mark, &last_page.data[0], is_not_aligned ? "" : " [aligned]")!;
last_page = last_page.prev_page;
}
}

View File

@@ -4,6 +4,7 @@
module std::core::builtin;
import libc, std::hash, std::io, std::os::backtrace;
<*
EMPTY_MACRO_SLOT is a value used for implementing optional arguments for macros in an efficient
way. It relies on the fact that distinct types are not implicitly convertable.
@@ -87,8 +88,16 @@ macro anycast(any v, $Type) @builtin
return ($Type*)v.ptr;
}
fn bool print_backtrace(String message, int backtraces_to_ignore) @if(env::NATIVE_STACKTRACE) => @pool()
fn bool print_backtrace(String message, int backtraces_to_ignore) @if(env::NATIVE_STACKTRACE) => @stack_mem(0x1100; Allocator smem)
{
TempAllocator* t = allocator::current_temp;
TempAllocator* new_t = allocator::new_temp_allocator(smem, 0x1000)!!;
allocator::current_temp = new_t;
defer
{
allocator::current_temp = t;
new_t.free();
}
void*[256] buffer;
void*[] backtraces = backtrace::capture_current(&buffer);
backtraces_to_ignore++;

View File

@@ -69,7 +69,8 @@ fn void DString.replace(&self, String needle, String replacement)
self.replace_char(needle[0], replacement[0]);
return;
}
@pool(data.allocator) {
@pool()
{
String str = self.tcopy_str();
self.clear();
usz len = str.len;
@@ -538,7 +539,7 @@ macro void DString.insert_at(&self, usz index, value)
fn usz? DString.appendf(&self, String format, args...) @maydiscard
{
if (!self.data()) self.tinit(format.len + 20);
@pool(self.data().allocator)
@pool()
{
Formatter formatter;
formatter.init(&out_string_append_fn, self);
@@ -549,7 +550,7 @@ fn usz? DString.appendf(&self, String format, args...) @maydiscard
fn usz? DString.appendfn(&self, String format, args...) @maydiscard
{
if (!self.data()) self.tinit(format.len + 20);
@pool(self.data().allocator)
@pool()
{
Formatter formatter;
formatter.init(&out_string_append_fn, self);

View File

@@ -555,61 +555,32 @@ macro void @stack_pool(usz $size; @body) @builtin
};
}
struct TempState
{
TempAllocator* old;
TempAllocator* current;
usz mark;
}
<*
Push the current temp allocator. A push must always be balanced with a pop using the current state.
*>
fn TempState temp_push(TempAllocator* other = null)
fn PoolState temp_push()
{
TempAllocator* current = allocator::temp();
TempAllocator* old = current;
if (other == current)
{
current = allocator::temp_allocator_next();
}
return { old, current, current.used };
return allocator::push_pool() @inline;
}
<*
Pop the current temp allocator. A pop must always be balanced with a push.
*>
fn void temp_pop(TempState old_state)
fn void temp_pop(PoolState old_state)
{
assert(allocator::thread_temp_allocator == old_state.current, "Tried to pop temp allocators out of order.");
assert(old_state.current.used >= old_state.mark, "Tried to pop temp allocators out of order.");
old_state.current.reset(old_state.mark);
allocator::thread_temp_allocator = old_state.old;
allocator::pop_pool(old_state) @inline;
}
<*
@require @is_empty_macro_slot(#other_temp) ||| $assignable(#other_temp, Allocator) : "Must be an allocator"
*>
macro void @pool(#other_temp = EMPTY_MACRO_SLOT; @body) @builtin
macro void @pool(;@body) @builtin
{
TempAllocator* current = allocator::temp();
$if @is_valid_macro_slot(#other_temp):
TempAllocator* original = current;
if (current == #other_temp.ptr) current = allocator::temp_allocator_next();
$endif
usz mark = current.used;
PoolState state = allocator::push_pool() @inline;
defer
{
current.reset(mark);
$if @is_valid_macro_slot(#other_temp):
allocator::thread_temp_allocator = original;
$endif;
allocator::pop_pool(state) @inline;
}
@body();
}
import libc;
module std::core::mem @if(WASM_NOLIBC);
import std::core::mem::allocator @public;
SimpleHeapAllocator wasm_allocator @private;
@@ -624,7 +595,6 @@ fn void initialize_wasm_mem() @init(1024) @private
wasm_allocator.init(fn (x) => allocator::wasm_memory.allocate_block(x));
allocator::thread_allocator = &wasm_allocator;
allocator::temp_base_allocator = &wasm_allocator;
allocator::init_default_temp_allocators();
}
module std::core::mem;

View File

@@ -1,5 +1,6 @@
module std::core::mem::allocator;
const DEFAULT_SIZE_PREFIX = usz.sizeof;
const DEFAULT_SIZE_PREFIX_ALIGNMENT = usz.alignof;
@@ -18,8 +19,6 @@ enum AllocInitType
interface Allocator
{
fn void reset(usz mark) @optional;
fn usz mark() @optional;
<*
@require !alignment || math::is_power_of_2(alignment)
@require alignment <= mem::MAX_MEMORY_ALIGNMENT : `alignment too big`
@@ -356,14 +355,32 @@ macro void*? @aligned_realloc(#calloc_fn, #free_fn, void* old_pointer, usz bytes
// All allocators
alias mem @builtin = thread_allocator ;
tlocal Allocator thread_allocator @private = base_allocator();
Allocator temp_base_allocator @private = base_allocator();
tlocal TempAllocator* thread_temp_allocator @private = null;
tlocal TempAllocator*[2] temp_allocator_pair @private;
typedef PoolState = void*;
tlocal TempAllocator* current_temp;
tlocal TempAllocator* top_temp;
usz temp_allocator_min_size = temp_allocator_default_min_size();
usz temp_allocator_buffer_size = temp_allocator_default_buffer_size();
usz temp_allocator_new_mult = 4;
fn PoolState push_pool()
{
TempAllocator* old = current_temp ?: create_temp_allocator();
current_temp = current_temp.derive_allocator(temp_allocator_min_size, temp_allocator_buffer_size, temp_allocator_new_mult)!!;
return (PoolState)old;
}
fn void pop_pool(PoolState old)
{
current_temp = (TempAllocator*)old;
current_temp.reset();
}
macro Allocator base_allocator() @private
{
@@ -374,46 +391,51 @@ macro Allocator base_allocator() @private
$endif
}
macro TempAllocator* create_default_sized_temp_allocator(Allocator allocator) @local
macro usz temp_allocator_size() @local
{
$switch env::MEMORY_ENV:
$case NORMAL:
return new_temp_allocator(1024 * 256, allocator)!!;
$case SMALL:
return new_temp_allocator(1024 * 16, allocator)!!;
$case TINY:
return new_temp_allocator(1024 * 2, allocator)!!;
$case NONE:
unreachable("Temp allocator must explicitly created when memory-env is set to 'none'.");
$case NORMAL: return 256 * 1024;
$case SMALL: return 1024 * 32;
$case TINY: return 1024 * 4;
$case NONE: return 0;
$endswitch
}
macro usz temp_allocator_default_min_size() @local
{
$switch env::MEMORY_ENV:
$case NORMAL: return 16 * 1024;
$case SMALL: return 1024 * 2;
$case TINY: return 256;
$case NONE: return 256;
$endswitch
}
macro usz temp_allocator_default_buffer_size() @local
{
$switch env::MEMORY_ENV:
$case NORMAL: return 1024;
$case SMALL: return 128;
$case TINY: return 64;
$case NONE: return 64;
$endswitch
}
macro Allocator heap() => thread_allocator;
macro TempAllocator* temp()
<*
@require !current_temp : "This should never be called when temp already exists"
*>
fn TempAllocator* create_temp_allocator() @private
{
if (!thread_temp_allocator)
{
init_default_temp_allocators();
}
return thread_temp_allocator;
return top_temp = current_temp = allocator::new_temp_allocator(base_allocator(), temp_allocator_size())!!;
}
macro Allocator temp()
{
return current_temp ?: create_temp_allocator();
}
macro TempAllocator* tmem() @builtin
{
if (!thread_temp_allocator)
{
init_default_temp_allocators();
}
return thread_temp_allocator;
}
fn void init_default_temp_allocators() @private
{
temp_allocator_pair[0] = create_default_sized_temp_allocator(temp_base_allocator);
temp_allocator_pair[1] = create_default_sized_temp_allocator(temp_base_allocator);
thread_temp_allocator = temp_allocator_pair[0];
}
alias tmem @builtin = temp;
fn void destroy_temp_allocators_after_exit() @finalizer(65535) @local @if(env::LIBC)
{
@@ -425,22 +447,8 @@ fn void destroy_temp_allocators_after_exit() @finalizer(65535) @local @if(env::L
*>
fn void destroy_temp_allocators()
{
if (!thread_temp_allocator) return;
temp_allocator_pair[0].destroy();
temp_allocator_pair[1].destroy();
temp_allocator_pair[..] = null;
thread_temp_allocator = null;
}
fn TempAllocator* temp_allocator_next() @private
{
if (!thread_temp_allocator)
{
init_default_temp_allocators();
return thread_temp_allocator;
}
usz index = thread_temp_allocator == temp_allocator_pair[0] ? 1 : 0;
return thread_temp_allocator = temp_allocator_pair[index];
if (!top_temp) return;
top_temp.free();
}
const NullAllocator NULL_ALLOCATOR = {};

View File

@@ -244,7 +244,7 @@ fn bool run_tests(String[] args, TestUnit[] tests) @private
name.append_repeat('-', len - len / 2);
if (!context.is_quiet_mode) io::printn(name);
name.clear();
TempState temp_state = mem::temp_push();
PoolState temp_state = mem::temp_push();
defer mem::temp_pop(temp_state);
foreach(unit : tests)
{

View File

@@ -59,7 +59,7 @@ fn ZString tformat_zstr(String fmt, args...)
@param [inout] allocator : `The allocator to use`
@param [in] fmt : `The formatting string`
*>
fn String format(Allocator allocator, String fmt, args...) => @pool(allocator)
fn String format(Allocator allocator, String fmt, args...) => @pool()
{
DString str = dstring::temp_with_capacity(fmt.len + args.len * 8);
str.appendf(fmt, ...args);
@@ -104,7 +104,7 @@ fn String join(Allocator allocator, String[] s, String joiner)
{
total_size += str.len;
}
@pool(allocator)
@pool()
{
DString res = dstring::temp_with_capacity(total_size);
res.append(s[0]);

View File

@@ -23,7 +23,7 @@ fn Object*? parse(Allocator allocator, InStream s)
@stack_mem(512; Allocator smem)
{
JsonContext context = { .last_string = dstring::new_with_capacity(smem, 64), .stream = s, .allocator = allocator };
@pool(allocator)
@pool()
{
return parse_any(&context);
};

View File

@@ -64,7 +64,7 @@ macro String? readline(Allocator allocator, stream = io::stdin())
$endif
if (val == '\n') return "";
@pool(allocator)
@pool()
{
DString str = dstring::temp_with_capacity(256);
if (val != '\r') str.append(val);

View File

@@ -29,7 +29,7 @@ fn PathList? native_ls(Path dir, bool no_dirs, bool no_symlinks, String mask, Al
PathList list;
list.init(allocator);
@pool(allocator)
@pool()
{
WString result = dir.str_view().tconcat(`\*`).to_temp_wstring()!!;
Win32_WIN32_FIND_DATAW find_data;
@@ -39,7 +39,7 @@ fn PathList? native_ls(Path dir, bool no_dirs, bool no_symlinks, String mask, Al
do
{
if (no_dirs && (find_data.dwFileAttributes & win32::FILE_ATTRIBUTE_DIRECTORY)) continue;
@pool(allocator)
@pool()
{
String filename = string::tfrom_wstring((WString)&find_data.cFileName)!;
if (filename == ".." || filename == ".") continue;

View File

@@ -11,7 +11,7 @@ fn Path? native_temp_directory(Allocator allocator) @if(!env::WIN32)
return path::new(allocator, "/tmp");
}
fn Path? native_temp_directory(Allocator allocator) @if(env::WIN32) => @pool(allocator)
fn Path? native_temp_directory(Allocator allocator) @if(env::WIN32) => @pool()
{
Win32_DWORD len = win32::getTempPathW(0, null);
if (!len) return io::GENERAL_ERROR?;

View File

@@ -28,7 +28,7 @@ enum PathEnv
fn Path? cwd(Allocator allocator)
{
@pool(allocator)
@pool()
{
return new(allocator, os::getcwd(tmem()));
};
@@ -153,7 +153,7 @@ fn Path? temp(String path, PathEnv path_env = DEFAULT_ENV)
return new(tmem(), path, path_env);
}
fn Path? from_win32_wstring(Allocator allocator, WString path) => @pool(allocator)
fn Path? from_win32_wstring(Allocator allocator, WString path) => @pool()
{
return path::new(allocator, string::tfrom_wstring(path)!);
}
@@ -183,7 +183,7 @@ fn Path? Path.append(self, Allocator allocator, String filename)
if (!self.path_string.len) return new(allocator, filename, self.env)!;
assert(!is_separator(self.path_string[^1], self.env));
@pool(allocator)
@pool()
{
DString dstr = dstring::temp_with_capacity(self.path_string.len + 1 + filename.len);
dstr.append(self.path_string);
@@ -232,7 +232,7 @@ fn bool? Path.is_absolute(self)
}
fn Path? String.to_absolute_path(self, Allocator allocator) => @pool(allocator)
fn Path? String.to_absolute_path(self, Allocator allocator) => @pool()
{
return temp(self).absolute(allocator);
}
@@ -247,14 +247,14 @@ fn Path? Path.absolute(self, Allocator allocator)
if (self.is_absolute()!) return new(allocator, path_str, self.env);
if (path_str == ".")
{
@pool(allocator)
@pool()
{
String cwd = os::getcwd(tmem())!;
return new(allocator, cwd, self.env);
};
}
$if DEFAULT_ENV == WIN32:
@pool(allocator)
@pool()
{
const usz BUFFER_LEN = 4096;
WString buffer = (WString)mem::talloc_array(Char16, BUFFER_LEN);
@@ -268,7 +268,7 @@ fn Path? Path.absolute(self, Allocator allocator)
$endif
}
fn String? String.file_basename(self, Allocator allocator) => @pool(allocator)
fn String? String.file_basename(self, Allocator allocator) => @pool()
{
return temp(self).basename().copy(allocator);
}
@@ -285,7 +285,7 @@ fn String Path.basename(self)
fn String? String.path_tdirname(self) => self.path_dirname(tmem());
fn String? String.path_dirname(self, Allocator allocator) => @pool(allocator)
fn String? String.path_dirname(self, Allocator allocator) => @pool()
{
return temp(self).dirname().copy(allocator);
}

View File

@@ -89,7 +89,7 @@ fn Url? parse(Allocator allocator, String url_string)
String userinfo = authority[:user_info_end];
String username @noinit;
String password;
@pool(allocator)
@pool()
{
String[] userpass = userinfo.tsplit(":", 2);
username = userpass[0];
@@ -115,7 +115,7 @@ fn Url? parse(Allocator allocator, String url_string)
}
else
{
@pool(allocator)
@pool()
{
String[] host_port = authority.tsplit(":", 2);
if (host_port.len > 1)
@@ -270,7 +270,7 @@ fn UrlQueryValues parse_query(Allocator allocator, String query)
Splitter raw_vals = query.tokenize("&");
while (try String rv = raw_vals.next())
{
@pool(allocator)
@pool()
{
String[] parts = rv.tsplit("=", 2);
String key = tdecode(parts[0], QUERY) ?? parts[0];

View File

@@ -64,7 +64,7 @@ fn usz encode_len(String s, UrlEncodingMode mode) @inline
@param [inout] allocator
@return "Percent-encoded String"
*>
fn String encode(Allocator allocator, String s, UrlEncodingMode mode) => @pool(allocator)
fn String encode(Allocator allocator, String s, UrlEncodingMode mode) => @pool()
{
usz n = encode_len(s, mode);
DString builder = dstring::temp_with_capacity(n);
@@ -131,7 +131,7 @@ fn usz? decode_len(String s, UrlEncodingMode mode) @inline
@param [inout] allocator
@return "Percent-decoded String"
*>
fn String? decode(Allocator allocator, String s, UrlEncodingMode mode) => @pool(allocator)
fn String? decode(Allocator allocator, String s, UrlEncodingMode mode) => @pool()
{
usz n = decode_len(s, mode)!;
DString builder = dstring::temp_with_capacity(n);

View File

@@ -9,7 +9,7 @@ import std::io::path, libc, std::os;
@require name.len > 0
@return? NOT_FOUND
*>
fn String? get_var(Allocator allocator, String name) => @pool(allocator)
fn String? get_var(Allocator allocator, String name) => @pool()
{
$switch:
@@ -81,7 +81,7 @@ fn String? get_home_dir(Allocator using = allocator::heap())
<*
Returns the current user's config directory.
*>
fn Path? get_config_dir(Allocator allocator) => @pool(allocator)
fn Path? get_config_dir(Allocator allocator) => @pool()
{
$if env::WIN32:
return path::new(allocator, tget_var("AppData"));

View File

@@ -204,7 +204,7 @@ fn void? backtrace_add_element(Allocator allocator, BacktraceList *list, void* a
return;
}
@pool(allocator)
@pool()
{
Linux_Dl_info info;
if (dladdr(addr, &info) == 0)
@@ -227,7 +227,7 @@ fn BacktraceList? symbolize_backtrace(Allocator allocator, void*[] backtrace)
}
list.free();
}
@pool(allocator)
@pool()
{
foreach (addr : backtrace)
{

View File

@@ -95,7 +95,7 @@ fn uptr? load_address() @local
fn Backtrace? backtrace_load_element(String execpath, void* buffer, void* load_address, Allocator allocator = allocator::heap()) @local
{
@pool(allocator)
@pool()
{
if (buffer)
{
@@ -145,9 +145,9 @@ fn BacktraceList? symbolize_backtrace(Allocator allocator, void*[] backtrace)
}
list.free();
}
@pool(allocator)
@pool()
{
String execpath = executable_path(allocator::temp())!;
String execpath = executable_path(tmem())!;
foreach (addr : backtrace)
{
list.push(backtrace_load_element(execpath, addr, load_addr, allocator) ?? backtrace::BACKTRACE_UNKNOWN);

View File

@@ -37,6 +37,8 @@
- Added `LANGUAGE_DEV_VERSION` env constant.
- Rename `anyfault` -> `fault`.
- `!!foo` now works same as as `! ! foo`.
- Temp allocator now supports more than 2 in-flight stacks.
- Printing stacktrace uses its own temp allocator.
### Fixes
- Fix address sanitizer to work on MachO targets (e.g. MacOS).
@@ -65,6 +67,10 @@
- `@wstring`, `@wstring32`, `@char32` and `@char16` compile time macros added.
- Updates to `Atomic` to handle distinct types and booleans.
- Added `math::iota`.
- `@pool` no longer takes an argument.
- `Allocator` interface removes `mark` and `reset`.
- DynamicArenaAllocator has changed init function.
- Added `BackedArenaAllocator` which is allocated to a fixed size, then allocates on the backing allocator and supports mark/reset.
## 0.6.8 Change list

View File

@@ -179,17 +179,20 @@ entry:
%indirectarg10 = alloca %"any[]", align 8
store %"char[]" { ptr @.str, i64 21 }, ptr %foo_tmpl, align 8
call void @llvm.memset.p0.i64(ptr align 8 %ft, i8 0, i64 72, i1 false)
%0 = load ptr, ptr @std.core.mem.allocator.thread_temp_allocator, align 8
%i2nb = icmp eq ptr %0, null
br i1 %i2nb, label %if.then, label %if.exit
%0 = load ptr, ptr @std.core.mem.allocator.current_temp, align 8
%i2b = icmp ne ptr %0, null
br i1 %i2b, label %cond.lhs, label %cond.rhs
if.then: ; preds = %entry
call void @std.core.mem.allocator.init_default_temp_allocators()
br label %if.exit
cond.lhs: ; preds = %entry
br label %cond.phi
if.exit: ; preds = %if.then, %entry
%1 = load ptr, ptr @std.core.mem.allocator.thread_temp_allocator, align 8
%2 = insertvalue %any undef, ptr %1, 0
cond.rhs: ; preds = %entry
%1 = call ptr @std.core.mem.allocator.create_temp_allocator()
br label %cond.phi
cond.phi: ; preds = %cond.rhs, %cond.lhs
%val = phi ptr [ %0, %cond.lhs ], [ %1, %cond.rhs ]
%2 = insertvalue %any undef, ptr %val, 0
%3 = insertvalue %any %2, i64 ptrtoint (ptr @"$ct.std.core.mem.allocator.TempAllocator" to i64), 1
%lo = load ptr, ptr %foo_tmpl, align 8
%ptradd = getelementptr inbounds i8, ptr %foo_tmpl, i64 8
@@ -201,11 +204,11 @@ if.exit: ; preds = %if.then, %entry
%5 = call i1 @llvm.expect.i1(i1 %not_err, i1 true)
br i1 %5, label %after_check, label %assign_optional
assign_optional: ; preds = %if.exit
assign_optional: ; preds = %cond.phi
store i64 %4, ptr %error_var, align 8
br label %panic_block
after_check: ; preds = %if.exit
after_check: ; preds = %cond.phi
br label %noerr_block
panic_block: ; preds = %assign_optional

View File

@@ -17,8 +17,7 @@ fn void main()
/* #expect: test.ll
entry:
%current = alloca ptr, align 8
%mark = alloca i64, align 8
%state = alloca ptr, align 8
%s = alloca %"char[]", align 8
%varargslots = alloca [1 x %any], align 16
%taddr = alloca i32, align 4
@@ -29,94 +28,80 @@ entry:
%error_var = alloca i64, align 8
%x2 = alloca %"char[]", align 8
%retparam = alloca i64, align 8
%error_var5 = alloca i64, align 8
%error_var11 = alloca i64, align 8
%1 = load ptr, ptr @std.core.mem.allocator.thread_temp_allocator, align 8
%i2nb = icmp eq ptr %1, null
br i1 %i2nb, label %if.then, label %if.exit
if.then: ; preds = %entry
call void @std.core.mem.allocator.init_default_temp_allocators()
br label %if.exit
if.exit: ; preds = %if.then, %entry
%2 = load ptr, ptr @std.core.mem.allocator.thread_temp_allocator, align 8
store ptr %2, ptr %current, align 8
%3 = load ptr, ptr %current, align 8
%ptradd = getelementptr inbounds i8, ptr %3, i64 24
%4 = load i64, ptr %ptradd, align 8
store i64 %4, ptr %mark, align 8
%error_var4 = alloca i64, align 8
%error_var10 = alloca i64, align 8
%1 = call ptr @std.core.mem.allocator.push_pool() #3
store ptr %1, ptr %state, align 8
store i32 %0, ptr %taddr, align 4
%5 = insertvalue %any undef, ptr %taddr, 0
%6 = insertvalue %any %5, i64 ptrtoint (ptr @"$ct.int" to i64), 1
store %any %6, ptr %varargslots, align 16
%7 = call { ptr, i64 } @std.core.string.tformat(ptr @.str, i64 2, ptr %varargslots, i64 1)
store { ptr, i64 } %7, ptr %result, align 8
%2 = insertvalue %any undef, ptr %taddr, 0
%3 = insertvalue %any %2, i64 ptrtoint (ptr @"$ct.int" to i64), 1
store %any %3, ptr %varargslots, align 16
%4 = call { ptr, i64 } @std.core.string.tformat(ptr @.str, i64 2, ptr %varargslots, i64 1)
store { ptr, i64 } %4, ptr %result, align 8
call void @llvm.memcpy.p0.p0.i32(ptr align 8 %s, ptr align 8 %result, i32 16, i1 false)
call void @llvm.memcpy.p0.p0.i32(ptr align 8 %x, ptr align 8 %s, i32 16, i1 false)
%8 = call ptr @std.io.stdout()
%5 = call ptr @std.io.stdout()
call void @llvm.memcpy.p0.p0.i32(ptr align 8 %x1, ptr align 8 %x, i32 16, i1 false)
call void @llvm.memcpy.p0.p0.i32(ptr align 8 %x2, ptr align 8 %x1, i32 16, i1 false)
%lo = load ptr, ptr %x2, align 8
%ptradd4 = getelementptr inbounds i8, ptr %x2, i64 8
%hi = load i64, ptr %ptradd4, align 8
%9 = call i64 @std.io.File.write(ptr %retparam, ptr %8, ptr %lo, i64 %hi)
%not_err = icmp eq i64 %9, 0
%10 = call i1 @llvm.expect.i1(i1 %not_err, i1 true)
br i1 %10, label %after_check, label %assign_optional
%ptradd = getelementptr inbounds i8, ptr %x2, i64 8
%hi = load i64, ptr %ptradd, align 8
%6 = call i64 @std.io.File.write(ptr %retparam, ptr %5, ptr %lo, i64 %hi)
%not_err = icmp eq i64 %6, 0
%7 = call i1 @llvm.expect.i1(i1 %not_err, i1 true)
br i1 %7, label %after_check, label %assign_optional
assign_optional: ; preds = %if.exit
store i64 %9, ptr %error_var, align 8
assign_optional: ; preds = %entry
store i64 %6, ptr %error_var, align 8
br label %guard_block
after_check: ; preds = %if.exit
after_check: ; preds = %entry
br label %noerr_block
guard_block: ; preds = %assign_optional
br label %voiderr
noerr_block: ; preds = %after_check
%11 = load i64, ptr %retparam, align 8
store i64 %11, ptr %len, align 8
%12 = call i64 @std.io.File.write_byte(ptr %8, i8 zeroext 10)
%not_err6 = icmp eq i64 %12, 0
%13 = call i1 @llvm.expect.i1(i1 %not_err6, i1 true)
br i1 %13, label %after_check8, label %assign_optional7
%8 = load i64, ptr %retparam, align 8
store i64 %8, ptr %len, align 8
%9 = call i64 @std.io.File.write_byte(ptr %5, i8 zeroext 10)
%not_err5 = icmp eq i64 %9, 0
%10 = call i1 @llvm.expect.i1(i1 %not_err5, i1 true)
br i1 %10, label %after_check7, label %assign_optional6
assign_optional7: ; preds = %noerr_block
store i64 %12, ptr %error_var5, align 8
br label %guard_block9
assign_optional6: ; preds = %noerr_block
store i64 %9, ptr %error_var4, align 8
br label %guard_block8
after_check8: ; preds = %noerr_block
br label %noerr_block10
after_check7: ; preds = %noerr_block
br label %noerr_block9
guard_block9: ; preds = %assign_optional7
guard_block8: ; preds = %assign_optional6
br label %voiderr
noerr_block10: ; preds = %after_check8
%14 = call i64 @std.io.File.flush(ptr %8)
%not_err12 = icmp eq i64 %14, 0
%15 = call i1 @llvm.expect.i1(i1 %not_err12, i1 true)
br i1 %15, label %after_check14, label %assign_optional13
noerr_block9: ; preds = %after_check7
%11 = call i64 @std.io.File.flush(ptr %5)
%not_err11 = icmp eq i64 %11, 0
%12 = call i1 @llvm.expect.i1(i1 %not_err11, i1 true)
br i1 %12, label %after_check13, label %assign_optional12
assign_optional13: ; preds = %noerr_block10
store i64 %14, ptr %error_var11, align 8
br label %guard_block15
assign_optional12: ; preds = %noerr_block9
store i64 %11, ptr %error_var10, align 8
br label %guard_block14
after_check14: ; preds = %noerr_block10
br label %noerr_block16
after_check13: ; preds = %noerr_block9
br label %noerr_block15
guard_block15: ; preds = %assign_optional13
guard_block14: ; preds = %assign_optional12
br label %voiderr
noerr_block16: ; preds = %after_check14
%16 = load i64, ptr %len, align 8
%add = add i64 %16, 1
noerr_block15: ; preds = %after_check13
%13 = load i64, ptr %len, align 8
%add = add i64 %13, 1
br label %voiderr
voiderr: ; preds = %noerr_block16, %guard_block15, %guard_block9, %guard_block
%17 = load ptr, ptr %current, align 8
%18 = load i64, ptr %mark, align 8
call void @std.core.mem.allocator.TempAllocator.reset(ptr %17, i64 %18)
voiderr: ; preds = %noerr_block15, %guard_block14, %guard_block8, %guard_block
%14 = load ptr, ptr %state, align 8
call void @std.core.mem.allocator.pop_pool(ptr %14) #3
ret i32 2
}

View File

@@ -123,12 +123,11 @@ entry:
%varargslots57 = alloca [1 x %any], align 16
%result60 = alloca %"double[]", align 8
%retparam61 = alloca i64, align 8
%current = alloca ptr, align 8
%mark = alloca i64, align 8
%state = alloca ptr, align 8
%map3 = alloca %HashMap.0, align 8
%varargslots67 = alloca [1 x %any], align 16
%result70 = alloca %"int[]", align 8
%retparam71 = alloca i64, align 8
%varargslots66 = alloca [1 x %any], align 16
%result69 = alloca %"int[]", align 8
%retparam70 = alloca i64, align 8
call void @llvm.memset.p0.i64(ptr align 8 %map, i8 0, i64 48, i1 false)
%lo = load i64, ptr @std.core.mem.allocator.thread_allocator, align 8
%hi = load ptr, ptr getelementptr inbounds (i8, ptr @std.core.mem.allocator.thread_allocator, i64 8), align 8
@@ -232,38 +231,24 @@ after_check18: ; preds = %entry, %after_check
%48 = insertvalue %any %47, i64 ptrtoint (ptr @"$ct.sa$double" to i64), 1
store %any %48, ptr %varargslots57, align 16
%49 = call i64 @std.io.printfn(ptr %retparam61, ptr @.str.12, i64 2, ptr %varargslots57, i64 1)
%50 = load ptr, ptr @std.core.mem.allocator.thread_temp_allocator, align 8
%i2nb = icmp eq ptr %50, null
br i1 %i2nb, label %if.then, label %if.exit
if.then: ; preds = %after_check18
call void @std.core.mem.allocator.init_default_temp_allocators()
br label %if.exit
if.exit: ; preds = %if.then, %after_check18
%51 = load ptr, ptr @std.core.mem.allocator.thread_temp_allocator, align 8
store ptr %51, ptr %current, align 8
%52 = load ptr, ptr %current, align 8
%ptradd64 = getelementptr inbounds i8, ptr %52, i64 24
%53 = load i64, ptr %ptradd64, align 8
store i64 %53, ptr %mark, align 8
%50 = call ptr @std.core.mem.allocator.push_pool() #4
store ptr %50, ptr %state, align 8
call void @llvm.memset.p0.i64(ptr align 8 %map3, i8 0, i64 48, i1 false)
%lo65 = load i64, ptr @std.core.mem.allocator.thread_allocator, align 8
%hi66 = load ptr, ptr getelementptr inbounds (i8, ptr @std.core.mem.allocator.thread_allocator, i64 8), align 8
%54 = call ptr @"std_collections_map$int$double$.HashMap.init"(ptr %map3, i64 %lo65, ptr %hi66, i32 16, float 7.500000e-01)
%55 = call i8 @"std_collections_map$int$double$.HashMap.set"(ptr %map3, i32 5, double 3.200000e+00)
%56 = call i8 @"std_collections_map$int$double$.HashMap.set"(ptr %map3, i32 7, double 5.200000e+00)
%lo68 = load i64, ptr @std.core.mem.allocator.thread_allocator, align 8
%hi69 = load ptr, ptr getelementptr inbounds (i8, ptr @std.core.mem.allocator.thread_allocator, i64 8), align 8
%57 = call { ptr, i64 } @"std_collections_map$int$double$.HashMap.keys"(ptr %map3, i64 %lo68, ptr %hi69)
store { ptr, i64 } %57, ptr %result70, align 8
%58 = insertvalue %any undef, ptr %result70, 0
%59 = insertvalue %any %58, i64 ptrtoint (ptr @"$ct.sa$int" to i64), 1
store %any %59, ptr %varargslots67, align 16
%60 = call i64 @std.io.printfn(ptr %retparam71, ptr @.str.13, i64 2, ptr %varargslots67, i64 1)
%61 = load ptr, ptr %current, align 8
%62 = load i64, ptr %mark, align 8
call void @std.core.mem.allocator.TempAllocator.reset(ptr %61, i64 %62)
%lo64 = load i64, ptr @std.core.mem.allocator.thread_allocator, align 8
%hi65 = load ptr, ptr getelementptr inbounds (i8, ptr @std.core.mem.allocator.thread_allocator, i64 8), align 8
%51 = call ptr @"std_collections_map$int$double$.HashMap.init"(ptr %map3, i64 %lo64, ptr %hi65, i32 16, float 7.500000e-01)
%52 = call i8 @"std_collections_map$int$double$.HashMap.set"(ptr %map3, i32 5, double 3.200000e+00)
%53 = call i8 @"std_collections_map$int$double$.HashMap.set"(ptr %map3, i32 7, double 5.200000e+00)
%lo67 = load i64, ptr @std.core.mem.allocator.thread_allocator, align 8
%hi68 = load ptr, ptr getelementptr inbounds (i8, ptr @std.core.mem.allocator.thread_allocator, i64 8), align 8
%54 = call { ptr, i64 } @"std_collections_map$int$double$.HashMap.keys"(ptr %map3, i64 %lo67, ptr %hi68)
store { ptr, i64 } %54, ptr %result69, align 8
%55 = insertvalue %any undef, ptr %result69, 0
%56 = insertvalue %any %55, i64 ptrtoint (ptr @"$ct.sa$int" to i64), 1
store %any %56, ptr %varargslots66, align 16
%57 = call i64 @std.io.printfn(ptr %retparam70, ptr @.str.13, i64 2, ptr %varargslots66, i64 1)
%58 = load ptr, ptr %state, align 8
call void @std.core.mem.allocator.pop_pool(ptr %58) #4
ret void
}

View File

@@ -123,12 +123,11 @@ entry:
%varargslots57 = alloca [1 x %any], align 16
%result60 = alloca %"double[]", align 8
%retparam61 = alloca i64, align 8
%current = alloca ptr, align 8
%mark = alloca i64, align 8
%state = alloca ptr, align 8
%map3 = alloca %HashMap.0, align 8
%varargslots67 = alloca [1 x %any], align 16
%result70 = alloca %"int[]", align 8
%retparam71 = alloca i64, align 8
%varargslots66 = alloca [1 x %any], align 16
%result69 = alloca %"int[]", align 8
%retparam70 = alloca i64, align 8
call void @llvm.memset.p0.i64(ptr align 8 %map, i8 0, i64 48, i1 false)
%lo = load i64, ptr @std.core.mem.allocator.thread_allocator, align 8
%hi = load ptr, ptr getelementptr inbounds (i8, ptr @std.core.mem.allocator.thread_allocator, i64 8), align 8
@@ -232,37 +231,23 @@ after_check18: ; preds = %entry, %after_check
%48 = insertvalue %any %47, i64 ptrtoint (ptr @"$ct.sa$double" to i64), 1
store %any %48, ptr %varargslots57, align 16
%49 = call i64 @std.io.printfn(ptr %retparam61, ptr @.str.12, i64 2, ptr %varargslots57, i64 1)
%50 = load ptr, ptr @std.core.mem.allocator.thread_temp_allocator, align 8
%i2nb = icmp eq ptr %50, null
br i1 %i2nb, label %if.then, label %if.exit
if.then: ; preds = %after_check18
call void @std.core.mem.allocator.init_default_temp_allocators()
br label %if.exit
if.exit: ; preds = %if.then, %after_check18
%51 = load ptr, ptr @std.core.mem.allocator.thread_temp_allocator, align 8
store ptr %51, ptr %current, align 8
%52 = load ptr, ptr %current, align 8
%ptradd64 = getelementptr inbounds i8, ptr %52, i64 24
%53 = load i64, ptr %ptradd64, align 8
store i64 %53, ptr %mark, align 8
%50 = call ptr @std.core.mem.allocator.push_pool() #4
store ptr %50, ptr %state, align 8
call void @llvm.memset.p0.i64(ptr align 8 %map3, i8 0, i64 48, i1 false)
%lo65 = load i64, ptr @std.core.mem.allocator.thread_allocator, align 8
%hi66 = load ptr, ptr getelementptr inbounds (i8, ptr @std.core.mem.allocator.thread_allocator, i64 8), align 8
%54 = call ptr @"std_collections_map$int$double$.HashMap.init"(ptr %map3, i64 %lo65, ptr %hi66, i32 16, float 7.500000e-01)
%55 = call i8 @"std_collections_map$int$double$.HashMap.set"(ptr %map3, i32 5, double 3.200000e+00)
%56 = call i8 @"std_collections_map$int$double$.HashMap.set"(ptr %map3, i32 7, double 5.200000e+00)
%lo68 = load i64, ptr @std.core.mem.allocator.thread_allocator, align 8
%hi69 = load ptr, ptr getelementptr inbounds (i8, ptr @std.core.mem.allocator.thread_allocator, i64 8), align 8
%57 = call { ptr, i64 } @"std_collections_map$int$double$.HashMap.keys"(ptr %map3, i64 %lo68, ptr %hi69)
store { ptr, i64 } %57, ptr %result70, align 8
%58 = insertvalue %any undef, ptr %result70, 0
%59 = insertvalue %any %58, i64 ptrtoint (ptr @"$ct.sa$int" to i64), 1
store %any %59, ptr %varargslots67, align 16
%60 = call i64 @std.io.printfn(ptr %retparam71, ptr @.str.13, i64 2, ptr %varargslots67, i64 1)
%61 = load ptr, ptr %current, align 8
%62 = load i64, ptr %mark, align 8
call void @std.core.mem.allocator.TempAllocator.reset(ptr %61, i64 %62)
%lo64 = load i64, ptr @std.core.mem.allocator.thread_allocator, align 8
%hi65 = load ptr, ptr getelementptr inbounds (i8, ptr @std.core.mem.allocator.thread_allocator, i64 8), align 8
%51 = call ptr @"std_collections_map$int$double$.HashMap.init"(ptr %map3, i64 %lo64, ptr %hi65, i32 16, float 7.500000e-01)
%52 = call i8 @"std_collections_map$int$double$.HashMap.set"(ptr %map3, i32 5, double 3.200000e+00)
%53 = call i8 @"std_collections_map$int$double$.HashMap.set"(ptr %map3, i32 7, double 5.200000e+00)
%lo67 = load i64, ptr @std.core.mem.allocator.thread_allocator, align 8
%hi68 = load ptr, ptr getelementptr inbounds (i8, ptr @std.core.mem.allocator.thread_allocator, i64 8), align 8
%54 = call { ptr, i64 } @"std_collections_map$int$double$.HashMap.keys"(ptr %map3, i64 %lo67, ptr %hi68)
store { ptr, i64 } %54, ptr %result69, align 8
%55 = insertvalue %any undef, ptr %result69, 0
%56 = insertvalue %any %55, i64 ptrtoint (ptr @"$ct.sa$int" to i64), 1
store %any %56, ptr %varargslots66, align 16
%57 = call i64 @std.io.printfn(ptr %retparam70, ptr @.str.13, i64 2, ptr %varargslots66, i64 1)
%58 = load ptr, ptr %state, align 8
call void @std.core.mem.allocator.pop_pool(ptr %58) #4
ret void
}

View File

@@ -4,7 +4,7 @@ fn String add(String s, Allocator a, int x)
{
if (x < 0) return s.copy(a);
String tmp;
@pool(a)
@pool()
{
tmp = "foo".tconcat(s);
tmp = add(tmp, a, x - 1);
@@ -16,7 +16,7 @@ fn String add(String s, Allocator a, int x)
fn String breakit(String s, Allocator a)
{
@pool(a)
@pool()
{
return inner2("foo".concat(tmem(), s), a);
};
@@ -24,7 +24,7 @@ fn String breakit(String s, Allocator a)
fn String inner2(String s, Allocator a)
{
@pool(a)
@pool()
{
ulong* z1 = mem::talloc(ulong);
*z1 = 0xAAAA_AAAA_AAAA_AAAA;
@@ -37,7 +37,7 @@ fn String inner2(String s, Allocator a)
fn String inner3(String s, Allocator a)
{
@pool(a)
@pool()
{
ulong* z1 = mem::talloc(ulong);
*z1 = 0xAAAA_AAAA_AAAA_AAAA;
@@ -50,7 +50,7 @@ fn String inner3(String s, Allocator a)
fn String inner4(String s, Allocator a)
{
@pool(a)
@pool()
{
String y = s.concat(tmem(), "xy**********").copy(a);
return y;