0.5.4: Hash variables accept designated initializers. @safemacro overrides the need for @ in macro names. Fixes to macro context evaluation. Updated allocator api. Removed install_win_reqs.bat. Deterministic @init for MacOS. Fixed temp memory issue with formatter. Support LLVM 19. Add support to compare bitstructs using == and !=. Support Windows .def files. Removed invalid grammar from grammar.y. Support compile time folding of &|^~ for bitstructs. output project setting now respected. Fix issue where constants were not properly constant folded. Add temp_push/pop. Aliased declarations caused errors when used in initializers. Fix export output. Fix of const ternary #1118. Fix of $$MODULE in nested macros #1117. Fix debug info on globals. out now correctly detects subscript[] use #1116. Lateral implicit imports removed. Default to '.' if no libdir is specified. Improved error messages for --lib. Fix raylib snake example. Overzealous local escape check corrected #1127. Improved yacc grammar #1128. --linker argument #1067. Fixes to the matrix operations #1130. Added GenericList.

This commit is contained in:
Christoffer Lerno
2024-01-16 00:16:29 +01:00
committed by Christoffer Lerno
parent c673101bbb
commit 748c737e8f
151 changed files with 3991 additions and 1687 deletions

View File

@@ -86,7 +86,7 @@ struct GrowableBitSet
* @param initial_capacity
* @param [&inout] allocator "The allocator to use, defaults to the heap allocator"
**/
fn GrowableBitSet* GrowableBitSet.new_init(&self, usz initial_capacity = 1, Allocator* allocator = mem::heap())
fn GrowableBitSet* GrowableBitSet.new_init(&self, usz initial_capacity = 1, Allocator* allocator = allocator::heap())
{
self.data.new_init(initial_capacity, allocator);
return self;
@@ -96,14 +96,14 @@ fn GrowableBitSet* GrowableBitSet.new_init(&self, usz initial_capacity = 1, Allo
* @param initial_capacity
* @param [&inout] allocator "The allocator to use, defaults to the heap allocator"
**/
fn GrowableBitSet* GrowableBitSet.init_new(&self, usz initial_capacity = 1, Allocator* allocator = mem::heap()) @deprecated("Replaced by new_init")
fn GrowableBitSet* GrowableBitSet.init_new(&self, usz initial_capacity = 1, Allocator* allocator = allocator::heap()) @deprecated("Replaced by new_init")
{
return self.new_init(initial_capacity, allocator) @inline;
}
fn GrowableBitSet* GrowableBitSet.temp_init(&self, usz initial_capacity = 1)
{
return self.new_init(initial_capacity, mem::temp()) @inline;
return self.new_init(initial_capacity, allocator::temp()) @inline;
}
fn GrowableBitSet* GrowableBitSet.init_temp(&self, usz initial_capacity = 1) @deprecated("Replaced by temp_init")

View File

@@ -1,5 +1,5 @@
module std::collections::enummap(<Enum, ValueType>);
import std::io;
struct EnumMap (Printable)
{
ValueType[Enum.len] values;
@@ -25,7 +25,7 @@ fn usz! EnumMap.to_format(&self, Formatter* formatter) @dynamic
return n;
}
fn String EnumMap.to_new_string(&self, Allocator* allocator = mem::heap()) @dynamic
fn String EnumMap.to_new_string(&self, Allocator* allocator = allocator::heap()) @dynamic
{
return string::new_format("%s", *self, .allocator = allocator);
}

View File

@@ -1,4 +1,4 @@
// Copyright (c) 2021 Christoffer Lerno. All rights reserved.
// Copyright (c) 2021-2024 Christoffer Lerno. All rights reserved.
// Use of self source code is governed by the MIT license
// a copy of which can be found in the LICENSE_STDLIB file.
@@ -6,8 +6,9 @@
* @require Enum.kindof == TypeKind.ENUM : "Only enums maybe be used with an enumset"
**/
module std::collections::enumset(<Enum>);
import std::io;
def EnumSetType = $typefrom(private::type_for_enum_elements(Enum.elements)) @private ;
def EnumSetType = $typefrom(private::type_for_enum_elements(Enum.elements)) @private;
const IS_CHAR_ARRAY = Enum.elements > 128;
distinct EnumSet (Printable) = EnumSetType;
@@ -140,7 +141,7 @@ fn usz! EnumSet.to_format(&set, Formatter* formatter) @dynamic
return n;
}
fn String EnumSet.to_new_string(&set, Allocator* allocator = mem::heap()) @dynamic
fn String EnumSet.to_new_string(&set, Allocator* allocator = allocator::heap()) @dynamic
{
return string::new_format("%s", *set, .allocator = allocator);
}

View File

@@ -0,0 +1,467 @@
// Copyright (c) 2024 Christoffer Lerno. All rights reserved.
// Use of self source code is governed by the MIT license
// a copy of which can be found in the LICENSE_STDLIB file.
module std::collections::generic_list;
import std::io,std::math;
def GenericPredicate = fn bool(any* value);
def GenericTest = fn bool(any* type, any* context);
struct GenericList (Printable)
{
usz size;
usz capacity;
Allocator* allocator;
any** entries;
}
/**
* @param initial_capacity "The initial capacity to reserve"
* @param [&inout] allocator "The allocator to use, defaults to the heap allocator"
**/
fn GenericList* GenericList.new_init(&self, usz initial_capacity = 16, Allocator* allocator = allocator::heap())
{
self.allocator = allocator;
self.size = 0;
if (initial_capacity > 0)
{
initial_capacity = math::next_power_of_2(initial_capacity);
self.entries = allocator::alloc_array(allocator, any*, initial_capacity);
}
else
{
self.entries = null;
}
self.capacity = initial_capacity;
return self;
}
/**
* @param initial_capacity "The initial capacity to reserve"
* @param [&inout] allocator "The allocator to use, defaults to the heap allocator"
**/
fn GenericList* GenericList.init_new(&self, usz initial_capacity = 16, Allocator* allocator = allocator::heap()) @deprecated("Replaced by new_init")
{
return self.new_init(initial_capacity, allocator) @inline;
}
/**
* Initialize the list using the temp allocator.
*
* @param initial_capacity "The initial capacity to reserve"
**/
fn GenericList* GenericList.temp_init(&self, usz initial_capacity = 16)
{
return self.new_init(initial_capacity, allocator::temp()) @inline;
}
fn usz! GenericList.to_format(&self, Formatter* formatter) @dynamic
{
switch (self.size)
{
case 0:
return formatter.print("[]")!;
case 1:
return formatter.printf("[%s]", self.entries[0])!;
default:
usz n = formatter.print("[")!;
foreach (i, element : self.entries[:self.size])
{
if (i != 0) formatter.print(", ")!;
n += formatter.printf("%s", element)!;
}
n += formatter.print("]")!;
return n;
}
}
fn String GenericList.to_new_string(&self, Allocator* allocator = allocator::heap()) @dynamic
{
return string::new_format("%s", *self, .allocator = allocator);
}
fn String GenericList.to_tstring(&self)
{
return string::tformat("%s", *self);
}
/**
* Push an element on the list by cloning it.
**/
macro void GenericList.push(&self, element)
{
if (!self.allocator) self.allocator = allocator::heap();
self.append_internal(allocator::clone(self.allocator, element));
}
fn void GenericList.append_internal(&self, any* element) @local
{
self.ensure_capacity();
self.entries[self.size++] = element;
}
/**
* Free a retained element removed using *_retained.
**/
fn void GenericList.free_element(&self, any* element) @inline
{
allocator::free(self.allocator, element.ptr);
}
/**
* Pop a value who's type is known. If the type is incorrect, this
* will still pop the element.
*
* @return! CastResult.TYPE_MISMATCH, IteratorResult.NO_MORE_ELEMENT
**/
macro GenericList.pop(&self, $Type)
{
if (!self.size) return IteratorResult.NO_MORE_ELEMENT?;
defer self.free_element(self.entries[self.size]);
return *anycast(self.entries[--self.size], $Type);
}
/**
* Pop the last value and allocate the copy using the given allocator.
* @return! IteratorResult.NO_MORE_ELEMENT
**/
fn any*! GenericList.new_pop(&self, Allocator* allocator = allocator::heap())
{
if (!self.size) return IteratorResult.NO_MORE_ELEMENT?;
defer self.free_element(self.entries[self.size]);
return allocator::clone_any(allocator, self.entries[--self.size]);
}
/**
* Pop the last value and allocate the copy using the temp allocator
* @return! IteratorResult.NO_MORE_ELEMENT
**/
fn any*! GenericList.temp_pop(&self) => self.new_pop(allocator::temp());
/**
* Pop the last value. It must later be released using list.free_element()
* @return! IteratorResult.NO_MORE_ELEMENT
**/
fn any*! GenericList.pop_retained(&self)
{
if (!self.size) return IteratorResult.NO_MORE_ELEMENT?;
return self.entries[--self.size];
}
fn void GenericList.clear(&self)
{
for (usz i = 0; i < self.size; i++)
{
self.free_element(self.entries[i]);
}
self.size = 0;
}
/**
* Same as pop() but pops the first value instead.
**/
macro GenericList.pop_first(&self, $Type)
{
if (!self.size) return IteratorResult.NO_MORE_ELEMENT?;
defer self.remove_at(0);
return *anycast(self.entries[0], $Type);
}
/**
* Same as pop_retained() but pops the first value instead.
**/
fn any*! GenericList.pop_first_retained(&self)
{
if (!self.size) return IteratorResult.NO_MORE_ELEMENT?;
defer self.remove_at(0);
return self.entries[0];
}
/**
* Same as new_pop() but pops the first value instead.
**/
fn any*! GenericList.new_pop_first(&self, Allocator* allocator = allocator::heap())
{
if (!self.size) return IteratorResult.NO_MORE_ELEMENT?;
defer self.free_element(self.entries[self.size]);
defer self.remove_at(0);
return allocator::clone_any(allocator, self.entries[0]);
}
/**
* Same as temp_pop() but pops the first value instead.
**/
fn any*! GenericList.temp_pop_first(&self) => self.new_pop_first(allocator::temp());
/**
* @require index < self.size
**/
fn void GenericList.remove_at(&self, usz index)
{
if (!--self.size || index == self.size) return;
self.free_element(self.entries[index]);
self.entries[index .. self.size - 1] = self.entries[index + 1 .. self.size];
}
fn void GenericList.add_all(&self, GenericList* other_list)
{
if (!other_list.size) return;
self.reserve(other_list.size);
foreach (value : other_list)
{
self.entries[self.size++] = allocator::clone_any(self.allocator, value);
}
}
/**
* Reverse the elements in a list.
**/
fn void GenericList.reverse(&self)
{
if (self.size < 2) return;
usz half = self.size / 2U;
usz end = self.size - 1;
for (usz i = 0; i < half; i++)
{
self.swap(i, end - i);
}
}
fn any*[] GenericList.array_view(&self)
{
return self.entries[:self.size];
}
/**
* Push an element to the front of the list.
**/
macro void GenericList.push_front(&self, type)
{
self.insert_at(0, type);
}
/**
* @require index < self.size
**/
macro void GenericList.insert_at(&self, usz index, type) @local
{
any* value = allocator::copy(self.allocator, type);
self.insert_at_internal(self, index, value);
}
/**
* @require index < self.size
**/
fn void GenericList.insert_at_internal(&self, usz index, any* value) @local
{
self.ensure_capacity();
for (usz i = self.size; i > index; i--)
{
self.entries[i] = self.entries[i - 1];
}
self.size++;
self.entries[index] = value;
}
/**
* @require self.size > 0
**/
fn void GenericList.remove_last(&self)
{
self.free_element(self.entries[--self.size]);
}
/**
* @require self.size > 0
**/
fn void GenericList.remove_first(&self)
{
self.remove_at(0);
}
macro GenericList.first(&self, $Type)
{
return *anycast(self.first_any(), $Type);
}
fn any*! GenericList.first_any(&self) @inline
{
return self.size ? self.entries[0] : IteratorResult.NO_MORE_ELEMENT?;
}
macro GenericList.last(&self, $Type)
{
return *anycast(self.last_any(), $Type);
}
fn any*! GenericList.last_any(&self) @inline
{
return self.size ? self.entries[self.size - 1] : IteratorResult.NO_MORE_ELEMENT?;
}
fn bool GenericList.is_empty(&self) @inline
{
return !self.size;
}
fn usz GenericList.len(&self) @operator(len) @inline
{
return self.size;
}
/**
* @require index < self.size "Index out of range"
**/
macro GenericList.get(&self, usz index, $Type)
{
return *anycast(self.entries[index], $Type);
}
/**
* @require index < self.size "Index out of range"
**/
fn any* GenericList.get_any(&self, usz index) @inline
{
return self.entries[index];
}
fn void GenericList.free(&self)
{
if (!self.allocator) return;
self.clear();
allocator::free(self.allocator, self.entries);
self.capacity = 0;
self.entries = null;
}
fn void GenericList.swap(&self, usz i, usz j)
{
any* temp = self.entries[i];
self.entries[i] = self.entries[j];
self.entries[j] = temp;
}
/**
* @param filter "The function to determine if it should be removed or not"
* @return "the number of deleted elements"
**/
fn usz GenericList.remove_if(&self, GenericPredicate filter)
{
return self._remove_if(filter, false);
}
/**
* @param selection "The function to determine if it should be kept or not"
* @return "the number of deleted elements"
**/
fn usz GenericList.retain_if(&self, GenericPredicate selection)
{
return self._remove_if(selection, true);
}
macro usz GenericList._remove_if(&self, GenericPredicate filter, bool $invert) @local
{
usz size = self.size;
for (usz i = size, usz k = size; k > 0; k = i)
{
// Find last index of item to be deleted.
$if $invert:
while (i > 0 && !filter(&self.entries[i - 1])) i--;
$else
while (i > 0 && filter(&self.entries[i - 1])) i--;
$endif
// Remove the items from this index up to the one not to be deleted.
usz n = self.size - k;
for (usz j = i; j < k; j++) self.free_element(self.entries[j]);
self.entries[i:n] = self.entries[k:n];
self.size -= k - i;
// Find last index of item not to be deleted.
$if $invert:
while (i > 0 && filter(&self.entries[i - 1])) i--;
$else
while (i > 0 && !filter(&self.entries[i - 1])) i--;
$endif
}
return size - self.size;
}
fn usz GenericList.remove_using_test(&self, GenericTest filter, any* context)
{
return self._remove_using_test(filter, false, context);
}
fn usz GenericList.retain_using_test(&self, GenericTest filter, any* context)
{
return self._remove_using_test(filter, true, context);
}
macro usz GenericList._remove_using_test(&self, GenericTest filter, bool $invert, ctx) @local
{
usz size = self.size;
for (usz i = size, usz k = size; k > 0; k = i)
{
// Find last index of item to be deleted.
$if $invert:
while (i > 0 && !filter(&self.entries[i - 1], ctx)) i--;
$else
while (i > 0 && filter(&self.entries[i - 1], ctx)) i--;
$endif
// Remove the items from this index up to the one not to be deleted.
usz n = self.size - k;
for (usz j = i; j < k; j++) self.free_element(self.entries[j]);
self.entries[i:n] = self.entries[k:n];
self.size -= k - i;
// Find last index of item not to be deleted.
$if $invert:
while (i > 0 && filter(&self.entries[i - 1], ctx)) i--;
$else
while (i > 0 && !filter(&self.entries[i - 1], ctx)) i--;
$endif
}
return size - self.size;
}
/**
* Reserve at least min_capacity
**/
fn void GenericList.reserve(&self, usz min_capacity)
{
if (!min_capacity) return;
if (self.capacity >= min_capacity) return;
if (!self.allocator) self.allocator = allocator::heap();
min_capacity = math::next_power_of_2(min_capacity);
self.entries = allocator::realloc(self.allocator, self.entries, any*.sizeof * min_capacity);
self.capacity = min_capacity;
}
macro any* GenericList.@item_at(&self, usz index) @operator([])
{
return self.entries[index];
}
/**
* @require index <= self.size "Index out of range"
**/
macro void GenericList.set(&self, usz index, value)
{
if (index == self.size)
{
self.push(value);
return;
}
self.free_element(self.entries[index]);
self.entries[index] = allocator::copy(self.allocator, value);
}
fn void GenericList.ensure_capacity(&self, usz added = 1) @inline @private
{
usz new_size = self.size + added;
if (self.capacity >= new_size) return;
assert(new_size < usz.max / 2U);
usz new_capacity = self.capacity ? 2U * self.capacity : 16U;
while (new_capacity < new_size) new_capacity *= 2U;
self.reserve(new_capacity);
}

View File

@@ -1,4 +1,4 @@
// Copyright (c) 2021 Christoffer Lerno. All rights reserved.
// Copyright (c) 2021-2024 Christoffer Lerno. All rights reserved.
// Use of self source code is governed by the MIT license
// a copy of which can be found in the LICENSE_STDLIB file.
module std::collections::linkedlist(<Type>);
@@ -32,7 +32,7 @@ fn void LinkedList.push_last(&self, Type value)
* @param [&inout] allocator "The allocator to use, defaults to the heap allocator"
* @return "the initialized list"
**/
fn LinkedList* LinkedList.new_init(&self, Allocator* allocator = mem::heap())
fn LinkedList* LinkedList.new_init(&self, Allocator* allocator = allocator::heap())
{
*self = { .allocator = allocator };
return self;
@@ -42,14 +42,14 @@ fn LinkedList* LinkedList.new_init(&self, Allocator* allocator = mem::heap())
* @param [&inout] allocator "The allocator to use, defaults to the heap allocator"
* @return "the initialized list"
**/
fn LinkedList* LinkedList.init_new(&self, Allocator* allocator = mem::heap()) @deprecated("Replaced by new_init")
fn LinkedList* LinkedList.init_new(&self, Allocator* allocator = allocator::heap()) @deprecated("Replaced by new_init")
{
return self.new_init(allocator);
}
fn LinkedList* LinkedList.temp_init(&self)
{
return self.new_init(mem::temp()) @inline;
return self.new_init(allocator::temp()) @inline;
}
fn LinkedList* LinkedList.init_temp(&self) @deprecated("Replaced by temp_init")
@@ -62,12 +62,13 @@ fn LinkedList* LinkedList.init_temp(&self) @deprecated("Replaced by temp_init")
**/
macro void LinkedList.free_node(&self, Node* node) @private
{
self.allocator.free(node);
allocator::free(self.allocator, node);
}
macro Node* LinkedList.alloc_node(&self) @private
{
if (!self.allocator) self.allocator = mem::heap();
return self.allocator.new(Node);
if (!self.allocator) self.allocator = allocator::heap();
return allocator::alloc(self.allocator, Node);
}
fn void LinkedList.link_first(&self, Type value) @private

View File

@@ -1,9 +1,8 @@
// Copyright (c) 2021 Christoffer Lerno. All rights reserved.
// Copyright (c) 2021-2024 Christoffer Lerno. All rights reserved.
// Use of self source code is governed by the MIT license
// a copy of which can be found in the LICENSE_STDLIB file.
module std::collections::list(<Type>);
import std::io;
import std::math;
import std::io,std::math;
def ElementPredicate = fn bool(Type *type);
def ElementTest = fn bool(Type *type, any* context);
@@ -23,14 +22,14 @@ struct List (Printable)
* @param initial_capacity "The initial capacity to reserve"
* @param [&inout] allocator "The allocator to use, defaults to the heap allocator"
**/
fn List* List.new_init(&self, usz initial_capacity = 16, Allocator* allocator = mem::heap())
fn List* List.new_init(&self, usz initial_capacity = 16, Allocator* allocator = allocator::heap())
{
self.allocator = allocator;
self.size = 0;
if (initial_capacity > 0)
{
initial_capacity = math::next_power_of_2(initial_capacity);
self.entries = allocator.alloc_aligned(Type.sizeof * initial_capacity, .alignment = Type[1].alignof)!!;
self.entries = allocator::malloc_aligned(allocator, Type.sizeof * initial_capacity, .alignment = Type[1].alignof)!!;
}
else
{
@@ -44,7 +43,7 @@ fn List* List.new_init(&self, usz initial_capacity = 16, Allocator* allocator =
* @param initial_capacity "The initial capacity to reserve"
* @param [&inout] allocator "The allocator to use, defaults to the heap allocator"
**/
fn List* List.init_new(&self, usz initial_capacity = 16, Allocator* allocator = mem::heap()) @deprecated("Replaced by new_init")
fn List* List.init_new(&self, usz initial_capacity = 16, Allocator* allocator = allocator::heap()) @deprecated("Replaced by new_init")
{
return self.new_init(initial_capacity, allocator) @inline;
}
@@ -56,7 +55,7 @@ fn List* List.init_new(&self, usz initial_capacity = 16, Allocator* allocator =
**/
fn List* List.temp_init(&self, usz initial_capacity = 16)
{
return self.new_init(initial_capacity, mem::temp()) @inline;
return self.new_init(initial_capacity, allocator::temp()) @inline;
}
/**
@@ -72,7 +71,7 @@ fn List* List.init_temp(&self, usz initial_capacity = 16) @deprecated("Replaced
/**
* @require self.size == 0 "The List must be empty"
**/
fn void List.init_wrapping_array(&self, Type[] types, Allocator* allocator = mem::heap())
fn void List.init_wrapping_array(&self, Type[] types, Allocator* allocator = allocator::heap())
{
self.allocator = allocator;
self.size = types.len;
@@ -100,7 +99,7 @@ fn usz! List.to_format(&self, Formatter* formatter) @dynamic
}
}
fn String List.to_new_string(&self, Allocator* allocator = mem::heap()) @dynamic
fn String List.to_new_string(&self, Allocator* allocator = allocator::heap()) @dynamic
{
return string::new_format("%s", *self, .allocator = allocator);
}
@@ -164,17 +163,17 @@ fn void List.add_all(&self, List* other_list)
}
fn Type[] List.to_new_array(&self, Allocator* allocator = mem::heap())
fn Type[] List.to_new_array(&self, Allocator* allocator = allocator::heap())
{
if (!self.size) return Type[] {};
Type[] result = allocator.new_array(Type, self.size);
Type[] result = allocator::alloc_array(allocator, Type, self.size);
result[..] = self.entries[:self.size];
return result;
}
fn Type[] List.to_tarray(&self)
{
return self.to_new_array(mem::temp());
return self.to_new_array(allocator::temp());
}
/**
@@ -264,6 +263,11 @@ fn bool List.is_empty(&self) @inline
return !self.size;
}
fn usz List.byte_size(&self) @inline
{
return Type.sizeof * self.size;
}
fn usz List.len(&self) @operator(len) @inline
{
return self.size;
@@ -277,7 +281,7 @@ fn Type List.get(&self, usz index) @inline
fn void List.free(&self)
{
if (!self.allocator) return;
self.allocator.free_aligned(self.entries);
allocator::free_aligned(self.allocator, self.entries);
self.capacity = 0;
self.size = 0;
self.entries = null;
@@ -373,9 +377,9 @@ fn void List.reserve(&self, usz min_capacity)
{
if (!min_capacity) return;
if (self.capacity >= min_capacity) return;
if (!self.allocator) self.allocator = mem::heap();
if (!self.allocator) self.allocator = allocator::heap();
min_capacity = math::next_power_of_2(min_capacity);
self.entries = self.allocator.realloc_aligned(self.entries, Type.sizeof * min_capacity, .alignment = Type[1].alignof) ?? null;
self.entries = allocator::realloc_aligned(self.allocator, self.entries, Type.sizeof * min_capacity, .alignment = Type[1].alignof) ?? null;
self.capacity = min_capacity;
}

View File

@@ -26,13 +26,13 @@ struct HashMap
* @require !self.allocator "Map was already initialized"
* @require capacity < MAXIMUM_CAPACITY "Capacity cannot exceed maximum"
**/
fn HashMap* HashMap.new_init(&self, uint capacity = DEFAULT_INITIAL_CAPACITY, float load_factor = DEFAULT_LOAD_FACTOR, Allocator* allocator = mem::heap())
fn HashMap* HashMap.new_init(&self, uint capacity = DEFAULT_INITIAL_CAPACITY, float load_factor = DEFAULT_LOAD_FACTOR, Allocator* allocator = allocator::heap())
{
capacity = math::next_power_of_2(capacity);
self.allocator = allocator;
self.load_factor = load_factor;
self.threshold = (uint)(capacity * load_factor);
self.table = allocator.new_zero_array(Entry*, capacity);
self.table = allocator::new_array(allocator, Entry*, capacity);
return self;
}
@@ -43,7 +43,7 @@ fn HashMap* HashMap.new_init(&self, uint capacity = DEFAULT_INITIAL_CAPACITY, fl
* @require !map.allocator "Map was already initialized"
* @require capacity < MAXIMUM_CAPACITY "Capacity cannot exceed maximum"
**/
fn HashMap* HashMap.init_new(&map, uint capacity = DEFAULT_INITIAL_CAPACITY, float load_factor = DEFAULT_LOAD_FACTOR, Allocator* allocator = mem::heap()) @deprecated("Replaced by new_init")
fn HashMap* HashMap.init_new(&map, uint capacity = DEFAULT_INITIAL_CAPACITY, float load_factor = DEFAULT_LOAD_FACTOR, Allocator* allocator = allocator::heap()) @deprecated("Replaced by new_init")
{
return map.new_init(capacity, load_factor, allocator);
}
@@ -56,7 +56,7 @@ fn HashMap* HashMap.init_new(&map, uint capacity = DEFAULT_INITIAL_CAPACITY, flo
**/
fn HashMap* HashMap.temp_init(&self, uint capacity = DEFAULT_INITIAL_CAPACITY, float load_factor = DEFAULT_LOAD_FACTOR)
{
return self.new_init(capacity, load_factor, mem::temp()) @inline;
return self.new_init(capacity, load_factor, allocator::temp()) @inline;
}
/**
@@ -85,7 +85,7 @@ fn bool HashMap.is_initialized(&map)
* @param [&inout] allocator "The allocator to use"
* @param [&in] other_map "The map to copy from."
**/
fn HashMap* HashMap.new_init_from_map(&self, HashMap* other_map, Allocator* allocator = mem::heap())
fn HashMap* HashMap.new_init_from_map(&self, HashMap* other_map, Allocator* allocator = allocator::heap())
{
self.new_init(other_map.table.len, other_map.load_factor, allocator);
self.put_all_for_create(other_map);
@@ -96,7 +96,7 @@ fn HashMap* HashMap.new_init_from_map(&self, HashMap* other_map, Allocator* allo
* @param [&inout] allocator "The allocator to use"
* @param [&in] other_map "The map to copy from."
**/
fn HashMap* HashMap.init_new_from_map(&self, HashMap* other_map, Allocator* allocator = mem::heap()) @deprecated("Replaced by new_init_from_map")
fn HashMap* HashMap.init_new_from_map(&self, HashMap* other_map, Allocator* allocator = allocator::heap()) @deprecated("Replaced by new_init_from_map")
{
return self.new_init_from_map(other_map, allocator) @inline;
@@ -107,7 +107,7 @@ fn HashMap* HashMap.init_new_from_map(&self, HashMap* other_map, Allocator* allo
**/
fn HashMap* HashMap.temp_init_from_map(&map, HashMap* other_map)
{
return map.new_init_from_map(other_map, mem::temp()) @inline;
return map.new_init_from_map(other_map, allocator::temp()) @inline;
}
/**
@@ -153,6 +153,7 @@ fn Entry*! HashMap.get_entry(&map, Key key)
/**
* Get the value or update and
* @require $assignable(#expr, Value)
**/
macro Value HashMap.@get_or_set(&map, Key key, Value #expr)
{
@@ -239,14 +240,14 @@ fn void HashMap.free(&map)
fn Key[] HashMap.key_tlist(&map)
{
return map.key_new_list(mem::temp()) @inline;
return map.key_new_list(allocator::temp()) @inline;
}
fn Key[] HashMap.key_new_list(&map, Allocator* allocator = mem::heap())
fn Key[] HashMap.key_new_list(&map, Allocator* allocator = allocator::heap())
{
if (!map.count) return {};
Key[] list = allocator.new_array(Key, map.count);
Key[] list = allocator::alloc_array(allocator, Key, map.count);
usz index = 0;
foreach (Entry* entry : map.table)
{
@@ -283,13 +284,13 @@ macro HashMap.@each_entry(map; @body(entry))
fn Value[] HashMap.value_tlist(&map)
{
return map.value_new_list(mem::temp()) @inline;
return map.value_new_list(allocator::temp()) @inline;
}
fn Value[] HashMap.value_new_list(&map, Allocator* allocator = mem::heap())
fn Value[] HashMap.value_new_list(&map, Allocator* allocator = allocator::heap())
{
if (!map.count) return {};
Value[] list = allocator.new_array(Value, map.count);
Value[] list = allocator::alloc_array(allocator, Value, map.count);
usz index = 0;
foreach (Entry* entry : map.table)
{
@@ -320,11 +321,10 @@ fn bool HashMap.has_value(&map, Value v) @if(VALUE_IS_EQUATABLE)
fn void HashMap.add_entry(&map, uint hash, Key key, Value value, uint bucket_index) @private
{
Entry* entry = map.allocator.new(Entry);
$if COPY_KEYS:
key = key.copy(map.allocator);
$endif
*entry = { .hash = hash, .key = key, .value = value, .next = map.table[bucket_index] };
Entry* entry = allocator::new(map.allocator, Entry, { .hash = hash, .key = key, .value = value, .next = map.table[bucket_index] });
map.table[bucket_index] = entry;
if (map.count++ >= map.threshold)
{
@@ -341,7 +341,7 @@ fn void HashMap.resize(&map, uint new_capacity) @private
map.threshold = uint.max;
return;
}
Entry*[] new_table = map.allocator.new_zero_array(Entry*, new_capacity);
Entry*[] new_table = allocator::new_array(map.allocator, Entry*, new_capacity);
map.transfer(new_table);
map.table = new_table;
map.free_internal(old_table.ptr);
@@ -405,7 +405,7 @@ fn void HashMap.put_for_create(&map, Key key, Value value) @private
fn void HashMap.free_internal(&map, void* ptr) @inline @private
{
map.allocator.free(ptr);
allocator::free(map.allocator, ptr);
}
fn bool HashMap.remove_entry_for_key(&map, Key key) @private
@@ -440,11 +440,10 @@ fn bool HashMap.remove_entry_for_key(&map, Key key) @private
fn void HashMap.create_entry(&map, uint hash, Key key, Value value, int bucket_index) @private
{
Entry *e = map.table[bucket_index];
Entry* entry = map.allocator.new(Entry);
$if COPY_KEYS:
key = key.copy(map.allocator);
$endif
*entry = { .hash = hash, .key = key, .value = value, .next = map.table[bucket_index] };
Entry* entry = allocator::new(map.allocator, Entry, { .hash = hash, .key = key, .value = value, .next = map.table[bucket_index] });
map.table[bucket_index] = entry;
map.count++;
}
@@ -452,7 +451,7 @@ fn void HashMap.create_entry(&map, uint hash, Key key, Value value, int bucket_i
fn void HashMap.free_entry(&self, Entry *entry) @local
{
$if COPY_KEYS:
self.allocator.free(entry.key);
allocator::free(self.allocator, entry.key);
$endif
self.free_internal(entry);
}

View File

@@ -2,9 +2,7 @@
// Use of this source code is governed by the MIT license
// a copy of which can be found in the LICENSE_STDLIB file.
module std::collections::object;
import std::collections::map;
import std::collections::list;
import std::io;
import std::collections::map, std::collections::list, std::io;
const Object TRUE_OBJECT = { .b = true, .type = bool.typeid };
const Object FALSE_OBJECT = { .b = false, .type = bool.typeid };
@@ -80,9 +78,7 @@ fn usz! Object.to_format(&self, Formatter* formatter) @dynamic
fn Object* new_obj(Allocator* allocator)
{
Object* o = allocator.new(Object);
*o = { .allocator = allocator, .type = void.typeid };
return o;
return allocator::new(allocator, Object, { .allocator = allocator, .type = void.typeid });
}
fn Object* new_null()
@@ -92,30 +88,22 @@ fn Object* new_null()
fn Object* new_int(int128 i, Allocator* allocator)
{
Object* o = allocator.new(Object);
*o = { .i = i, .allocator = allocator, .type = int128.typeid };
return o;
return allocator::new(allocator, Object, { .i = i, .allocator = allocator, .type = int128.typeid });
}
macro Object* new_enum(e, Allocator* allocator)
{
Object* o = allocator.new(Object);
*o = { .i = (int128)e, .allocator = allocator, .type = @typeid(e) };
return o;
return allocator::new(allocator, Object, { .i = (int128)e, .allocator = allocator, .type = @typeid(e) });
}
fn Object* new_float(double f, Allocator* allocator)
{
Object* o = allocator.new(Object);
*o = { .f = f, .allocator = allocator, .type = double.typeid };
return o;
return allocator::new(allocator, Object, { .f = f, .allocator = allocator, .type = double.typeid });
}
fn Object* new_string(String s, Allocator* allocator)
{
Object* o = allocator.new(Object);
*o = { .s = s.copy(allocator), .allocator = allocator, .type = String.typeid };
return o;
return allocator::new(allocator, Object, { .s = s.copy(allocator), .allocator = allocator, .type = String.typeid });
}
@@ -131,7 +119,7 @@ fn void Object.free(&self)
case void:
break;
case String:
self.allocator.free(self.s);
allocator::free(self.allocator, self.s);
case ObjectInternalList:
foreach (ol : self.array)
{
@@ -140,13 +128,13 @@ fn void Object.free(&self)
self.array.free();
case ObjectInternalMap:
self.map.@each_entry(; ObjectInternalMapEntry* entry) {
self.allocator.free(entry.key);
allocator::free(self.allocator, entry.key);
entry.value.free();
};
default:
break;
}
if (self.allocator) self.allocator.free(self);
if (self.allocator) allocator::free(self.allocator, self);
}
fn bool Object.is_null(&self) @inline => self == &NULL_OBJECT;
@@ -193,7 +181,7 @@ fn void Object.set_object(&self, String key, Object* new_object) @private
ObjectInternalMapEntry*! entry = self.map.get_entry(key);
defer
{
(void)self.allocator.free(entry.key);
(void)allocator::free(self.allocator, entry.key);
(void)entry.value.free();
}
self.map.set(key.copy(self.map.allocator), new_object);

View File

@@ -27,7 +27,7 @@ distinct PriorityQueue = inline PrivatePriorityQueue(<Type, false>);
distinct PriorityQueueMax = inline PrivatePriorityQueue(<Type, true>);
module std::collections::priorityqueue::private(<Type, MAX>);
import std::collections::list;
import std::collections::list, std::io;
def Heap = List(<Type>);
@@ -36,19 +36,19 @@ struct PrivatePriorityQueue (Printable)
Heap heap;
}
fn void PrivatePriorityQueue.init_new(&self, usz initial_capacity = 16, Allocator* allocator = mem::heap()) @inline @deprecated("Replaced by new_init")
fn void PrivatePriorityQueue.init_new(&self, usz initial_capacity = 16, Allocator* allocator = allocator::heap()) @inline @deprecated("Replaced by new_init")
{
return self.new_init(initial_capacity, allocator);
}
fn void PrivatePriorityQueue.new_init(&self, usz initial_capacity = 16, Allocator* allocator = mem::heap()) @inline
fn void PrivatePriorityQueue.new_init(&self, usz initial_capacity = 16, Allocator* allocator = allocator::heap()) @inline
{
self.heap.new_init(initial_capacity, allocator);
}
fn void PrivatePriorityQueue.temp_init(&self, usz initial_capacity = 16) @inline
{
self.heap.new_init(initial_capacity, mem::temp()) @inline;
self.heap.new_init(initial_capacity, allocator::temp()) @inline;
}
fn void PrivatePriorityQueue.init_temp(&self, usz initial_capacity = 16) @inline @deprecated("Replaced by temp_init")
@@ -151,7 +151,7 @@ fn usz! PrivatePriorityQueue.to_format(&self, Formatter* formatter) @dynamic
return self.heap.to_format(formatter);
}
fn String PrivatePriorityQueue.to_new_string(&self, Allocator* allocator = mem::heap()) @dynamic
fn String PrivatePriorityQueue.to_new_string(&self, Allocator* allocator = allocator::heap()) @dynamic
{
return self.heap.to_new_string(allocator);
}

View File

@@ -2,6 +2,7 @@
* @require Type.is_ordered : "The type must be ordered"
**/
module std::collections::range(<Type>);
import std::io;
struct Range (Printable)
{
@@ -28,14 +29,14 @@ fn Type Range.get(&self, usz index) @operator([])
return (Type)(self.start + (usz)index);
}
fn String Range.to_new_string(&self, Allocator* allocator = mem::heap()) @dynamic
fn String Range.to_new_string(&self, Allocator* allocator = allocator::heap()) @dynamic
{
return string::new_format("[%s..%s]", self.start, self.end, .allocator = allocator);
}
fn String Range.to_tstring(&self)
{
return self.to_new_string(mem::temp());
return self.to_new_string(allocator::temp());
}
fn usz! Range.to_format(&self, Formatter* formatter) @dynamic
@@ -65,14 +66,14 @@ fn usz! ExclusiveRange.to_format(&self, Formatter* formatter) @dynamic
return formatter.printf("[%s..<%s]", self.start, self.end)!;
}
fn String ExclusiveRange.to_new_string(&self, Allocator* allocator = mem::heap()) @dynamic
fn String ExclusiveRange.to_new_string(&self, Allocator* allocator = allocator::heap()) @dynamic
{
return string::new_format("[%s..<%s]", self.start, self.end, .allocator = allocator);
}
fn String ExclusiveRange.to_tstring(&self)
{
return self.to_new_string(mem::temp());
return self.to_new_string(allocator::temp());
}
/**

View File

@@ -2,6 +2,7 @@
// Use of this source code is governed by the MIT license
// a copy of which can be found in the LICENSE_STDLIB file.
module std::core::mem::allocator;
import std::math;
struct ArenaAllocator (Allocator)
{

View File

@@ -1,7 +1,8 @@
// Copyright (c) 2021 Christoffer Lerno. All rights reserved.
// Copyright (c) 2021-2024 Christoffer Lerno. All rights reserved.
// Use of this source code is governed by the MIT license
// a copy of which can be found in the LICENSE_STDLIB file.
module std::core::mem::allocator;
import std::math;
struct DynamicArenaAllocator (Allocator)
{
@@ -29,14 +30,14 @@ fn void DynamicArenaAllocator.free(&self)
while (page)
{
DynamicArenaPage* next_page = page.prev_arena;
self.backing_allocator.free(page);
allocator::free(self.backing_allocator, page);
page = next_page;
}
page = self.unused_page;
while (page)
{
DynamicArenaPage* next_page = page.prev_arena;
self.backing_allocator.free(page);
allocator::free(self.backing_allocator, page);
page = next_page;
}
self.page = null;
@@ -140,11 +141,11 @@ fn void*! DynamicArenaAllocator._alloc_new(&self, usz size, usz alignment, usz o
usz page_size = max(self.page_size, mem::aligned_offset(size + DynamicArenaChunk.sizeof + offset, alignment) - offset);
// Grab the page without alignment (we do it ourselves)
void* mem = self.backing_allocator.alloc_checked(page_size)!;
DynamicArenaPage*! page = self.backing_allocator.new(DynamicArenaPage);
void* mem = allocator::malloc_try(self.backing_allocator, page_size)!;
DynamicArenaPage*! page = allocator::new_try(self.backing_allocator, DynamicArenaPage);
if (catch err = page)
{
self.backing_allocator.free(mem);
allocator::free(self.backing_allocator, mem);
return err?;
}
page.memory = mem;

View File

@@ -1,9 +1,9 @@
// Copyright (c) 2021-2023 Christoffer Lerno. All rights reserved.
// Copyright (c) 2021-2024 Christoffer Lerno. All rights reserved.
// Use of this source code is governed by the MIT license
// a copy of which can be found in the LICENSE_STDLIB file.
module std::core::mem::allocator;
import std::math;
struct SimpleHeapAllocator (Allocator)
{

View File

@@ -1,4 +1,4 @@
// Copyright (c) 2021 Christoffer Lerno. All rights reserved.
// Copyright (c) 2021-2024 Christoffer Lerno. All rights reserved.
// Use of this source code is governed by the MIT license
// a copy of which can be found in the LICENSE_STDLIB file.

View File

@@ -34,15 +34,15 @@ fn void OnStackAllocator.free(&self)
{
if (chunk.is_aligned)
{
self.backing_allocator.free_aligned(chunk.data);
allocator::free_aligned(self.backing_allocator, chunk.data);
}
else
{
self.backing_allocator.free(chunk.data);
allocator::free(self.backing_allocator, chunk.data);
}
void* old = chunk;
chunk = chunk.prev;
self.backing_allocator.free(old);
allocator::free(self.backing_allocator, old);
}
self.chunk = null;
self.used = 0;
@@ -76,7 +76,7 @@ fn void on_stack_allocator_remove_chunk(OnStackAllocator* a, void* ptr) @local
if (chunk.data == ptr)
{
*addr = chunk.prev;
a.backing_allocator.free(chunk);
allocator::free(a.backing_allocator, chunk);
return;
}
addr = &chunk.prev;
@@ -140,8 +140,8 @@ fn void*! OnStackAllocator.acquire(&self, usz size, bool clear, usz alignment, u
if (end > total_len)
{
OnStackAllocatorExtraChunk* chunk = backing_allocator.alloc_checked(OnStackAllocatorExtraChunk.sizeof)!;
defer catch backing_allocator.free(chunk);
OnStackAllocatorExtraChunk* chunk = allocator::alloc_try(backing_allocator, OnStackAllocatorExtraChunk)!;
defer catch allocator::free(backing_allocator, chunk);
defer try self.chunk = chunk;
*chunk = { .prev = self.chunk, .is_aligned = aligned };
return chunk.data = backing_allocator.acquire(size, clear, aligned ? alignment : 0, offset)!;

View File

@@ -1,5 +1,5 @@
module std::core::mem::allocator;
import std::io;
import std::io, std::math;
struct TempAllocatorChunk @local
{
@@ -35,9 +35,9 @@ macro bool TempAllocatorPage.is_aligned(&self) => self.size & PAGE_IS_ALIGNED ==
/**
* @require size >= 16
**/
fn TempAllocator*! new_temp(usz size, Allocator* allocator)
fn TempAllocator*! new_temp_allocator(usz size, Allocator* allocator)
{
TempAllocator* temp = allocator.alloc_checked(TempAllocator.sizeof + size)!;
TempAllocator* temp = allocator::alloc_with_padding(allocator, TempAllocator, size)!;
temp.last_page = null;
temp.backing_allocator = allocator;
temp.used = 0;
@@ -45,6 +45,11 @@ fn TempAllocator*! new_temp(usz size, Allocator* allocator)
return temp;
}
fn TempAllocator*! new_temp(usz size, Allocator* allocator) @deprecated("Use new_temp_allocator")
{
return new_temp_allocator(size, allocator);
}
fn usz TempAllocator.mark(&self) @dynamic => self.used;
fn void TempAllocator.release(&self, void* old_pointer, bool) @dynamic
@@ -71,8 +76,7 @@ fn void TempAllocator.reset(&self, usz mark) @dynamic
fn void! TempAllocator._free_page(&self, TempAllocatorPage* page) @inline @local
{
void* mem = page.start;
if (page.is_aligned()) return self.backing_allocator.free_aligned(mem);
return self.backing_allocator.free(mem);
return self.backing_allocator.release(mem, page.is_aligned());
}
fn void*! TempAllocator._realloc_page(&self, TempAllocatorPage* page, usz size, usz alignment, usz offset) @inline @local
@@ -92,14 +96,7 @@ fn void*! TempAllocator._realloc_page(&self, TempAllocatorPage* page, usz size,
// Clear on size > original size.
void* data = self.acquire(size, size > page_size, alignment, offset)!;
mem::copy(data, &page.data[0], page_size, mem::DEFAULT_MEM_ALIGNMENT, mem::DEFAULT_MEM_ALIGNMENT);
if (page.is_aligned())
{
self.backing_allocator.free_aligned(real_pointer);
}
else
{
self.backing_allocator.free(real_pointer);
}
self.backing_allocator.release(real_pointer, page.is_aligned());
return data;
}
@@ -168,11 +165,11 @@ fn void*! TempAllocator.acquire(&self, usz size, bool clear, usz alignment, usz
usz total_alloc_size = TempAllocatorPage.sizeof + size;
if (clear)
{
page = self.backing_allocator.calloc_aligned(total_alloc_size, alignment, TempAllocatorPage.sizeof + offset)!;
page = allocator::calloc_aligned(self.backing_allocator, total_alloc_size, alignment, TempAllocatorPage.sizeof + offset)!;
}
else
{
page = self.backing_allocator.alloc_aligned(total_alloc_size, alignment, TempAllocatorPage.sizeof + offset)!;
page = allocator::malloc_aligned(self.backing_allocator, total_alloc_size, alignment, TempAllocatorPage.sizeof + offset)!;
}
page.start = page;
page.size = size | PAGE_IS_ALIGNED;

View File

@@ -1,10 +1,9 @@
// Copyright (c) 2021 Christoffer Lerno. All rights reserved.
// Copyright (c) 2021-2024 Christoffer Lerno. All rights reserved.
// Use of this source code is governed by the MIT license
// a copy of which can be found in the LICENSE_STDLIB file.
module std::core::mem::allocator;
import std::collections::map;
import std::collections::list;
import std::collections, std::io, std::os::backtrace;
const MAX_BACKTRACE = 16;
struct Allocation
@@ -168,7 +167,7 @@ fn void! TrackingAllocator.fprint_report(&self, OutStream* out)
Backtrace trace = backtrace::BACKTRACE_UNKNOWN;
if (allocation.backtrace[3])
{
trace = backtrace::symbolize_backtrace(allocation.backtrace[3:1], mem::temp()).get(0) ?? backtrace::BACKTRACE_UNKNOWN;
trace = backtrace::symbolize_backtrace(allocation.backtrace[3:1], allocator::temp()).get(0) ?? backtrace::BACKTRACE_UNKNOWN;
}
if (trace.function.len) leaks = true;
io::fprintfn(out, "%13s %p %s:%d", allocation.size,
@@ -207,7 +206,7 @@ fn void! TrackingAllocator.fprint_report(&self, OutStream* out)
break;
}
}
BacktraceList list = backtrace::symbolize_backtrace(allocation.backtrace[3..(end - 1)], mem::temp())!;
BacktraceList list = backtrace::symbolize_backtrace(allocation.backtrace[3..(end - 1)], allocator::temp())!;
io::fprintfn(out, "Allocation %d (%d bytes): ", i + 1, allocation.size)!;
foreach (trace : list)
{

View File

@@ -55,10 +55,10 @@ macro rindex_of(array, element)
* @require @typeis(arr1[0], $typeof(arr2[0])) "Arrays must have the same type"
* @ensure result.len == arr1.len + arr2.len
**/
macro concat_new(arr1, arr2, Allocator* allocator = mem::heap())
macro concat_new(arr1, arr2, Allocator* allocator = allocator::heap())
{
var $Type = $typeof(arr1[0]);
$Type[] result = allocator.new_array($Type, arr1.len + arr2.len);
$Type[] result = allocator::alloc_array(allocator, $Type, arr1.len + arr2.len);
if (arr1.len > 0)
{
mem::copy(result.ptr, &arr1[0], arr1.len * $Type.sizeof, $Type.alignof, $Type.alignof);
@@ -81,7 +81,7 @@ macro concat_new(arr1, arr2, Allocator* allocator = mem::heap())
* @require @typeis(arr1[0], $typeof(arr2[0])) "Arrays must have the same type"
* @ensure result.len == arr1.len + arr2.len
**/
macro tconcat(arr1, arr2) => concat(arr1, arr2, mem::temp());
macro tconcat(arr1, arr2) => concat(arr1, arr2, allocator::temp());
module std::core::array::slice(<Type>);

View File

@@ -1,9 +1,8 @@
// Copyright (c) 2021-2022 Christoffer Lerno and contributors. All rights reserved.
// Copyright (c) 2021-2024 Christoffer Lerno and contributors. All rights reserved.
// Use of this source code is governed by the MIT license
// a copy of which can be found in the LICENSE_STDLIB file.
module std::core::builtin;
import libc;
import std::hash;
import libc, std::hash, std::io, std::os::backtrace;
/**
* Use `IteratorResult` when reading the end of an iterator, or accessing a result out of bounds.
@@ -75,7 +74,7 @@ fn bool print_backtrace(String message, int backtraces_to_ignore) @if(env::NATIV
void*[256] buffer;
void*[] backtraces = backtrace::capture_current(&buffer);
backtraces_to_ignore++;
BacktraceList! backtrace = backtrace::symbolize_backtrace(backtraces, mem::temp());
BacktraceList! backtrace = backtrace::symbolize_backtrace(backtraces, allocator::temp());
if (catch backtrace) return false;
if (backtrace.len() <= backtraces_to_ignore) return false;
io::eprint("\nERROR: '");

View File

@@ -1,4 +1,4 @@
// Copyright (c) 2021-2022 Christoffer Lerno and contributors. All rights reserved.
// Copyright (c) 2021-2024 Christoffer Lerno and contributors. All rights reserved.
// Use of this source code is governed by the MIT license
// a copy of which can be found in the LICENSE_STDLIB file.
module std::core::builtin;

View File

@@ -1,4 +1,4 @@
// Copyright (c) 2021 Christoffer Lerno. All rights reserved.
// Copyright (c) 2021-2024 Christoffer Lerno. All rights reserved.
// Use of this source code is governed by the MIT license
// a copy of which can be found in the LICENSE_STDLIB file.
module std::core::cinterop;

View File

@@ -8,10 +8,10 @@ const usz MIN_CAPACITY @private = 16;
/**
* @require !self.data() "String already initialized"
**/
fn DString DString.new_init(&self, usz capacity = MIN_CAPACITY, Allocator* allocator = mem::heap())
fn DString DString.new_init(&self, usz capacity = MIN_CAPACITY, Allocator* allocator = allocator::heap())
{
if (capacity < MIN_CAPACITY) capacity = MIN_CAPACITY;
StringData* data = allocator.new(StringData, .end_padding = capacity);
StringData* data = allocator::alloc_with_padding(allocator, StringData, capacity)!!;
data.allocator = allocator;
data.len = 0;
data.capacity = capacity;
@@ -21,7 +21,7 @@ fn DString DString.new_init(&self, usz capacity = MIN_CAPACITY, Allocator* alloc
/**
* @require !self.data() "String already initialized"
**/
fn DString DString.init_new(&self, usz capacity = MIN_CAPACITY, Allocator* allocator = mem::heap()) @deprecated("Replaced by new_init")
fn DString DString.init_new(&self, usz capacity = MIN_CAPACITY, Allocator* allocator = allocator::heap()) @deprecated("Replaced by new_init")
{
return self.new_init(capacity, allocator) @inline;
}
@@ -31,7 +31,7 @@ fn DString DString.init_new(&self, usz capacity = MIN_CAPACITY, Allocator* alloc
**/
fn DString DString.temp_init(&self, usz capacity = MIN_CAPACITY)
{
self.new_init(capacity, mem::temp()) @inline;
self.new_init(capacity, allocator::temp()) @inline;
return *self;
}
@@ -43,14 +43,14 @@ fn DString DString.init_temp(&self, usz capacity = MIN_CAPACITY) @deprecated("Re
return self.temp_init(capacity) @inline;
}
fn DString new_with_capacity(usz capacity, Allocator* allocator = mem::heap())
fn DString new_with_capacity(usz capacity, Allocator* allocator = allocator::heap())
{
return DString{}.new_init(capacity, allocator);
}
fn DString temp_with_capacity(usz capacity) => new_with_capacity(capacity, mem::temp()) @inline;
fn DString temp_with_capacity(usz capacity) => new_with_capacity(capacity, allocator::temp()) @inline;
fn DString new(String c = "", Allocator* allocator = mem::heap())
fn DString new(String c = "", Allocator* allocator = allocator::heap())
{
usz len = c.len;
StringData* data = (StringData*)new_with_capacity(len, allocator);
@@ -62,9 +62,9 @@ fn DString new(String c = "", Allocator* allocator = mem::heap())
return (DString)data;
}
fn DString temp_new(String s = "") => new(s, mem::temp()) @inline;
fn DString temp_new(String s = "") => new(s, allocator::temp()) @inline;
fn DString DString.new_concat(self, DString b, Allocator* allocator = mem::heap())
fn DString DString.new_concat(self, DString b, Allocator* allocator = allocator::heap())
{
DString string;
string.new_init(self.len() + b.len(), allocator);
@@ -73,9 +73,9 @@ fn DString DString.new_concat(self, DString b, Allocator* allocator = mem::heap(
return string;
}
fn DString DString.temp_concat(self, DString b) => self.new_concat(b, mem::temp());
fn DString DString.temp_concat(self, DString b) => self.new_concat(b, allocator::temp());
fn DString DString.new_tconcat(self, DString b) @deprecated("Replaced by temp_concat") => self.new_concat(b, mem::temp());
fn DString DString.new_tconcat(self, DString b) @deprecated("Replaced by temp_concat") => self.new_concat(b, allocator::temp());
fn ZString DString.zstr_view(&self)
{
@@ -164,7 +164,7 @@ fn void DString.append_char32(&self, Char32 c)
data.len += n;
}
fn DString DString.tcopy(&self) => self.copy(mem::temp());
fn DString DString.tcopy(&self) => self.copy(allocator::temp());
fn DString DString.copy(self, Allocator* allocator = null)
{
@@ -174,32 +174,32 @@ fn DString DString.copy(self, Allocator* allocator = null)
return (DString)null;
}
StringData* data = self.data();
if (!allocator) allocator = mem::heap();
if (!allocator) allocator = allocator::heap();
DString new_string = new_with_capacity(data.capacity, allocator);
mem::copy((char*)new_string.data(), (char*)data, StringData.sizeof + data.len);
return new_string;
}
fn ZString DString.copy_zstr(self, Allocator* allocator = mem::heap())
fn ZString DString.copy_zstr(self, Allocator* allocator = allocator::heap())
{
usz str_len = self.len();
if (!str_len)
{
return (ZString)allocator.calloc(1);
return (ZString)allocator::calloc(allocator, 1);
}
char* zstr = allocator.alloc(str_len + 1);
char* zstr = allocator::malloc(allocator, str_len + 1);
StringData* data = self.data();
mem::copy(zstr, &data.chars, str_len);
zstr[str_len] = 0;
return (ZString)zstr;
}
fn String DString.copy_str(self, Allocator* allocator = mem::heap())
fn String DString.copy_str(self, Allocator* allocator = allocator::heap())
{
return (String)self.copy_zstr(allocator)[:self.len()];
}
fn String DString.tcopy_str(self) => self.copy_str(mem::temp()) @inline;
fn String DString.tcopy_str(self) => self.copy_str(allocator::temp()) @inline;
fn bool DString.equals(self, DString other_string)
{
@@ -222,7 +222,7 @@ fn void DString.free(&self)
if (!*self) return;
StringData* data = self.data();
if (!data) return;
data.allocator.free(data);
allocator::free(data.allocator, data);
*self = (DString)null;
}
@@ -258,7 +258,7 @@ fn void DString.append_chars(&self, String str)
data.len += other_len;
}
fn Char32[] DString.copy_utf32(&self, Allocator* allocator = mem::heap())
fn Char32[] DString.copy_utf32(&self, Allocator* allocator = allocator::heap())
{
return self.str_view().to_new_utf32(allocator) @inline!!;
}
@@ -403,7 +403,7 @@ fn usz! DString.appendfn(&self, String format, args...) @maydiscard
return len + 1;
}
fn DString new_join(String[] s, String joiner, Allocator* allocator = mem::heap())
fn DString new_join(String[] s, String joiner, Allocator* allocator = allocator::heap())
{
if (!s.len) return (DString)null;
usz total_size = joiner.len * s.len;
@@ -447,7 +447,7 @@ fn void DString.reserve(&self, usz addition)
if (new_capacity < MIN_CAPACITY) new_capacity = MIN_CAPACITY;
while (new_capacity < len) new_capacity *= 2;
data.capacity = new_capacity;
*self = (DString)data.allocator.realloc(data, StringData.sizeof + new_capacity);
*self = (DString)allocator::realloc(data.allocator, data, StringData.sizeof + new_capacity);
}
fn usz! DString.read_from_stream(&self, InStream* reader)

View File

@@ -1,4 +1,4 @@
// Copyright (c) 2021 Christoffer Lerno. All rights reserved.
// Copyright (c) 2021-2024 Christoffer Lerno. All rights reserved.
// Use of this source code is governed by the MIT license
// a copy of which can be found in the LICENSE_STDLIB file.
module std::core::env;
@@ -131,6 +131,7 @@ const bool TESTING = $$TESTING;
const MemoryEnvironment MEMORY_ENV = (MemoryEnvironment)$$MEMORY_ENVIRONMENT;
const bool TRACK_MEMORY = DEBUG_SYMBOLS && (COMPILER_SAFE_MODE || TESTING);
const bool X86_64 = ARCH_TYPE == X86_64;
const bool X86 = ARCH_TYPE == X86;
const bool AARCH64 = ARCH_TYPE == AARCH64;
const bool NATIVE_STACKTRACE = LINUX || DARWIN || WIN32;
const bool LINUX = LIBC && OS_TYPE == LINUX;

View File

@@ -2,6 +2,8 @@
// Use of this source code is governed by the MIT license
// a copy of which can be found in the LICENSE_STDLIB file.
module std::core::mem;
import std::core::mem::allocator @public;
import std::math;
const MAX_MEMORY_ALIGNMENT = 0x1000_0000;
const DEFAULT_MEM_ALIGNMENT = (void*.alignof) * 2;
@@ -379,9 +381,9 @@ macro type_alloc_must_be_aligned($Type)
**/
macro void @scoped(Allocator* allocator; @body())
{
Allocator* old_allocator = thread_allocator;
thread_allocator = allocator;
defer thread_allocator = old_allocator;
Allocator* old_allocator = allocator::thread_allocator;
allocator::thread_allocator = allocator;
defer allocator::thread_allocator = old_allocator;
@body();
}
@@ -389,11 +391,11 @@ macro void @report_heap_allocs_in_scope(;@body())
{
TrackingAllocator tracker;
tracker.init(thread_allocator);
Allocator* old_allocator = thread_allocator;
thread_allocator = &tracker;
Allocator* old_allocator = allocator::thread_allocator;
allocator::thread_allocator = &tracker;
defer
{
thread_allocator = old_allocator;
allocator::thread_allocator = old_allocator;
tracker.print_report();
tracker.free();
}
@@ -404,7 +406,7 @@ macro void @stack_mem(usz $size; @body(Allocator* mem)) @builtin
{
char[$size] buffer;
OnStackAllocator allocator;
allocator.init(&buffer, mem::heap());
allocator.init(&buffer, allocator::heap());
defer allocator.free();
@body(&allocator);
}
@@ -413,7 +415,7 @@ macro void @stack_pool(usz $size; @body) @builtin
{
char[$size] buffer;
OnStackAllocator allocator;
allocator.init(&buffer, mem::heap());
allocator.init(&buffer, allocator::heap());
defer allocator.free();
mem::@scoped(&allocator)
{
@@ -421,76 +423,67 @@ macro void @stack_pool(usz $size; @body) @builtin
};
}
struct TempState
{
TempAllocator* old;
TempAllocator* current;
usz mark;
}
/**
* Push the current temp allocator. A push must always be balanced with a pop using the current state.
**/
fn TempState temp_push(TempAllocator* other = null)
{
TempAllocator* current = allocator::temp();
TempAllocator* old = current;
if (other == current)
{
current = allocator::temp_allocator_next();
}
return { old, current, current.used };
}
/**
* Pop the current temp allocator. A pop must always be balanced with a push.
**/
fn void temp_pop(TempState old_state)
{
assert(allocator::thread_temp_allocator == old_state.current, "Tried to pop temp allocators out of order.");
assert(old_state.current.used >= old_state.mark, "Tried to pop temp allocators out of order.");
old_state.current.reset(old_state.mark);
allocator::thread_temp_allocator = old_state.old;
}
macro void @pool(TempAllocator* #other_temp = null; @body) @builtin
{
TempAllocator* current = temp();
TempAllocator* current = allocator::temp();
var $has_arg = !$is_const(#other_temp);
$if $has_arg:
TempAllocator* original = current;
if (current == (void*)#other_temp) current = temp_allocator_next();
if (current == (void*)#other_temp) current = allocator::temp_allocator_next();
$endif
usz mark = current.used;
defer
{
current.reset(mark);
$if $has_arg:
thread_temp_allocator = original;
allocator::thread_temp_allocator = original;
$endif;
}
@body();
}
tlocal Allocator* thread_allocator @private = &allocator::LIBC_ALLOCATOR;
tlocal TempAllocator* thread_temp_allocator @private = null;
tlocal TempAllocator*[2] temp_allocator_pair @private;
Allocator* temp_base_allocator @private = &allocator::LIBC_ALLOCATOR;
macro TempAllocator* create_default_sized_temp_allocator(Allocator* allocator) @local
{
$switch (env::MEMORY_ENV)
$case NORMAL:
return allocator::new_temp(1024 * 256, allocator)!!;
$case SMALL:
return allocator::new_temp(1024 * 16, allocator)!!;
$case TINY:
return allocator::new_temp(1024 * 2, allocator)!!;
$case NONE:
unreachable("Temp allocator must explicitly created when memory-env is set to 'none'.");
$endswitch
}
fn TempAllocator *temp_allocator_next() @private
{
if (!thread_temp_allocator)
{
init_default_temp_allocators();
return thread_temp_allocator;
}
usz index = thread_temp_allocator == temp_allocator_pair[0] ? 1 : 0;
return thread_temp_allocator = temp_allocator_pair[index];
}
import libc;
fn void init_default_temp_allocators() @private
{
temp_allocator_pair[0] = create_default_sized_temp_allocator(temp_base_allocator);
temp_allocator_pair[1] = create_default_sized_temp_allocator(temp_base_allocator);
thread_temp_allocator = temp_allocator_pair[0];
}
macro TempAllocator* temp()
{
if (!thread_temp_allocator)
{
init_default_temp_allocators();
}
return thread_temp_allocator;
}
macro Allocator* current_allocator() => thread_allocator;
macro Allocator* heap() => thread_allocator;
macro TempAllocator* temp() @deprecated("Use allocator::temp()") => allocator::temp();
macro Allocator* current_allocator() @deprecated("Use allocator::heap()") => allocator::heap();
macro Allocator* heap() @deprecated("Use allocator::heap()") => allocator::heap();
module std::core::mem @if(WASM_NOLIBC);
@@ -520,88 +513,143 @@ macro TrackingEnv* get_tracking_env()
$endif
}
macro @clone(value) @builtin
macro @clone(value) @builtin @nodiscard
{
return mem::heap().clone(value);
return allocator::clone(allocator::heap(), value);
}
macro @tclone(value) @builtin
macro @tclone(value) @builtin @nodiscard
{
return mem::temp().clone(value);
return temp_new($typeof(value), value);
}
fn void* malloc(usz size) @builtin @inline
fn void* malloc(usz size) @builtin @inline @nodiscard
{
return mem::heap().alloc(size);
return allocator::malloc(allocator::heap(), size);
}
fn void* tmalloc(usz size, usz alignment = 0, usz offset = 0) @builtin @inline
fn void* tmalloc(usz size, usz alignment = 0, usz offset = 0) @builtin @inline @nodiscard
{
return temp().acquire(size, false, alignment, offset)!!;
return allocator::temp().acquire(size, false, alignment, offset)!!;
}
macro new($Type)
/**
* @require $vacount < 2 : "Too many arguments."
* @require $or($vacount == 0, $assignable($vaexpr(0), $Type)) : "The second argument must be an initializer for the type"
**/
macro new($Type, ...) @nodiscard
{
return heap().new($Type);
$if $vacount == 0:
return ($Type*)calloc($Type.sizeof);
$else
$Type* val = malloc($Type.sizeof);
*val = $vaexpr(0);
return val;
$endif
}
macro new_clear($Type)
macro alloc($Type) @nodiscard
{
return heap().new_clear($Type);
return ($Type*)malloc($Type.sizeof);
}
macro new_temp($Type)
macro new_clear($Type) @deprecated("Use mem::new")
{
return new($Type);
}
macro new_temp($Type) @deprecated("Use mem::temp_alloc or mem::temp_new")
{
return tmalloc($Type.sizeof);
}
macro new_temp_clear($Type)
/**
* @require $vacount < 2 : "Too many arguments."
* @require $or($vacount == 0, $assignable($vaexpr(0), $Type)) : "The second argument must be an initializer for the type"
**/
macro temp_new($Type, ...) @nodiscard
{
$if $vacount == 0:
return ($Type*)tcalloc($Type.sizeof) @inline;
$else
$Type* val = tmalloc($Type.sizeof) @inline;
*val = $vaexpr(0);
return val;
$endif
}
macro temp_alloc($Type) @nodiscard
{
return tmalloc($Type.sizeof);
}
macro new_temp_clear($Type) @deprecated("use mem::temp_new")
{
return tcalloc($Type.sizeof);
}
macro new_array($Type, usz elements)
macro new_array($Type, usz elements) @nodiscard
{
return heap().new_array($Type, elements);
return allocator::new_array(allocator::heap(), $Type, elements);
}
macro temp_array($Type, usz elements)
macro alloc_array($Type, usz elements) @nodiscard
{
return allocator::alloc_array(allocator::heap(), $Type, elements);
}
macro talloc_array($Type, usz elements) @nodiscard @deprecated("use mem::temp_alloc_array")
{
return temp_alloc_array($Type, elements);
}
macro temp_alloc_array($Type, usz elements) @nodiscard
{
return (($Type*)tmalloc($Type.sizeof * elements, $Type.alignof))[:elements];
}
macro new_zero_array($Type, usz elements)
macro temp_array($Type, usz elements) @nodiscard @deprecated("use mem::temp_alloc_array")
{
return heap().new_zero_array($Type, elements);
return temp_alloc_array($Type, elements);
}
macro temp_zero_array($Type, usz elements)
macro temp_new_array($Type, usz elements) @nodiscard
{
return (($Type*)tcalloc($Type.sizeof * elements, $Type.alignof))[:elements];
}
fn void* calloc(usz size) @builtin @inline
macro new_zero_array($Type, usz elements) @deprecated("Use new_array")
{
return heap().calloc(size);
return new_array($Type, elements);
}
fn void* tcalloc(usz size, usz alignment = 0, usz offset = 0) @builtin @inline
macro temp_zero_array($Type, usz elements) @deprecated("Use temp_new_array")
{
return temp().acquire(size, false, alignment, offset)!!;
return temp_new_array($Type, elements);
}
fn void* realloc(void *ptr, usz new_size) @builtin @inline
fn void* calloc(usz size) @builtin @inline @nodiscard
{
return heap().realloc(ptr, new_size);
return allocator::calloc(allocator::heap(), size);
}
fn void* tcalloc(usz size, usz alignment = 0, usz offset = 0) @builtin @inline @nodiscard
{
return allocator::temp().acquire(size, false, alignment, offset)!!;
}
fn void* realloc(void *ptr, usz new_size) @builtin @inline @nodiscard
{
return allocator::realloc(allocator::heap(), ptr, new_size);
}
fn void free(void* ptr) @builtin @inline
{
heap().free(ptr);
return allocator::free(allocator::heap(), ptr);
}
fn void* trealloc(void* ptr, usz size, usz alignment = mem::DEFAULT_MEM_ALIGNMENT) @builtin @inline
fn void* trealloc(void* ptr, usz size, usz alignment = mem::DEFAULT_MEM_ALIGNMENT) @builtin @inline @nodiscard
{
return temp().resize(ptr, size, alignment, 0)!!;
return allocator::temp().resize(ptr, size, alignment, 0)!!;
}

View File

@@ -20,12 +20,292 @@ interface Allocator
fn void release(void* ptr, bool aligned);
}
struct AlignedBlock
def MemoryAllocFn = fn char[]!(usz);
fault AllocationFailure
{
usz len;
void* start;
OUT_OF_MEMORY,
CHUNK_TOO_LARGE,
}
fn usz alignment_for_allocation(usz alignment) @inline @private
{
return alignment < mem::DEFAULT_MEM_ALIGNMENT ? alignment = mem::DEFAULT_MEM_ALIGNMENT : alignment;
}
macro void* malloc(Allocator* allocator, usz size) @nodiscard
{
return malloc_try(allocator, size)!!;
}
macro void*! malloc_try(Allocator* allocator, usz size) @nodiscard
{
$if env::TESTING:
char* data = allocator.acquire(size, false, 0, 0)!;
mem::set(data, 0xAA, size, mem::DEFAULT_MEM_ALIGNMENT);
return data;
$else
return allocator.acquire(size, false, 0, 0);
$endif
}
macro void* calloc(Allocator* allocator, usz size) @nodiscard
{
return calloc_try(allocator, size)!!;
}
macro void*! calloc_try(Allocator* allocator, usz size) @nodiscard
{
return allocator.acquire(size, true, 0, 0);
}
macro void* realloc(Allocator* allocator, void* ptr, usz new_size) @nodiscard
{
return realloc_try(allocator, ptr, new_size)!!;
}
macro void*! realloc_try(Allocator* allocator, void* ptr, usz new_size) @nodiscard
{
return allocator.resize(ptr, new_size, 0, 0);
}
macro void free(Allocator* allocator, void* ptr)
{
$if env::TESTING:
if (ptr) ((char*)ptr)[0] = 0xBA;
$endif
allocator.release(ptr, false);
}
macro void*! malloc_aligned(Allocator* allocator, usz size, usz alignment, usz offset = 0) @nodiscard
{
$if env::TESTING:
char* data = allocator.acquire(size, false, alignment, offset)!;
mem::set(data, 0xAA, size, mem::DEFAULT_MEM_ALIGNMENT);
return data;
$else
return allocator.acquire(size, false, alignment, offset);
$endif
}
macro void*! calloc_aligned(Allocator* allocator, usz size, usz alignment, usz offset = 0) @nodiscard
{
return allocator.acquire(size, true, alignment, offset);
}
macro void*! realloc_aligned(Allocator* allocator, void* ptr, usz new_size, usz alignment, usz offset = 0) @nodiscard
{
return allocator.resize(ptr, new_size, alignment, offset);
}
macro void free_aligned(Allocator* allocator, void* ptr)
{
$if env::TESTING:
if (ptr) ((char*)ptr)[0] = 0xBA;
$endif
allocator.release(ptr, true);
}
/**
* @require $vacount < 2 : "Too many arguments."
* @require $or($vacount == 1, $assignable($vaexpr(0), $Type)) : "The second argument must be an initializer for the type"
**/
macro new(Allocator* allocator, $Type, ...) @nodiscard
{
$if $vacount == 0:
return ($Type*)calloc(allocator, $Type.sizeof);
$else
$Type* val = malloc(allocator, $Type.sizeof);
*val = $vaexpr(0);
return val;
$endif
}
/**
* @require $vacount < 2 : "Too many arguments."
* @require $or($vacount == 0, $assignable($vaexpr(0), $Type)) : "The second argument must be an initializer for the type"
**/
macro new_try(Allocator* allocator, $Type, ...) @nodiscard
{
$if $vacount == 0:
return ($Type*)calloc_try(allocator, $Type.sizeof);
$else
$Type* val = malloc_try(allocator, $Type.sizeof)!;
*val = $vaexpr(0);
return val;
$endif
}
macro new_with_padding(Allocator* allocator, $Type, usz padding) @nodiscard
{
return ($Type*)calloc_try(allocator, $Type.sizeof + padding);
}
macro alloc(Allocator* allocator, $Type) @nodiscard
{
return ($Type*)malloc(allocator, $Type.sizeof);
}
macro alloc_try(Allocator* allocator, $Type) @nodiscard
{
return ($Type*)malloc_try(allocator, $Type.sizeof);
}
macro alloc_with_padding(Allocator* allocator, $Type, usz padding) @nodiscard
{
return ($Type*)malloc_try(allocator, $Type.sizeof + padding);
}
macro new_array(Allocator* allocator, $Type, usz elements) @nodiscard
{
return new_array_try(allocator, $Type, elements)!!;
}
macro new_array_try(Allocator* allocator, $Type, usz elements) @nodiscard
{
return (($Type*)calloc_try(allocator, $Type.sizeof * elements))[:elements];
}
macro alloc_array(Allocator* allocator, $Type, usz elements) @nodiscard
{
return alloc_array_try(allocator, $Type, elements)!!;
}
macro alloc_array_try(Allocator* allocator, $Type, usz elements) @nodiscard
{
return (($Type*)malloc_try(allocator, $Type.sizeof * elements))[:elements];
}
macro clone(Allocator* allocator, value) @nodiscard
{
return new(allocator, $typeof(value), value);
}
fn any* clone_any(Allocator* allocator, any* value) @nodiscard
{
usz size = value.type.sizeof;
void* data = malloc(allocator, size);
mem::copy(data, value.ptr, size);
return any_make(data, value.type);
}
// Allocator "functions"
macro void*! Allocator.alloc_checked(&self, usz size) @deprecated("Use allocator::malloc_try")
{
$if env::TESTING:
char* data = self.acquire(size, false, 0, 0)!;
mem::set(data, 0xAA, size, mem::DEFAULT_MEM_ALIGNMENT);
return data;
$else
return self.acquire(size, false, 0, 0);
$endif
}
macro void*! Allocator.calloc_checked(&self, usz size) @deprecated("Use allocator::calloc_try")
{
return self.acquire(size, true, 0, 0);
}
macro void*! Allocator.realloc_checked(&self, void* ptr, usz new_size) @deprecated("Use allocator::realloc_try")
{
return self.resize(ptr, new_size, 0, 0);
}
macro Allocator.new_array(&self, $Type, usz size, usz end_padding = 0) @deprecated("Use allocator::alloc_array")
{
return (($Type*)self.alloc_checked($Type.sizeof * size + end_padding))[:size]!!;
}
macro Allocator.new_array_checked(&self, $Type, usz size, usz end_padding = 0) @deprecated("Use allocator::alloc_array_try")
{
return (($Type*)self.alloc_checked($Type.sizeof * size + end_padding))[:size];
}
macro Allocator.new_zero_array(&self, $Type, usz size, usz end_padding = 0) @deprecated("Use allocator::new_array")
{
return (($Type*)self.calloc_checked($Type.sizeof * size + end_padding))[:size]!!;
}
macro Allocator.new_zero_array_checked(&self, $Type, usz size, usz end_padding = 0) @deprecated("Use allocator::new_array_try")
{
return (($Type*)self.calloc_checked($Type.sizeof * size + end_padding))[:size];
}
macro Allocator.new(&self, $Type, usz end_padding = 0) @nodiscard @deprecated("Use allocator::alloc")
{
return ($Type*)self.alloc_checked($Type.sizeof + end_padding)!!;
}
macro Allocator.new_checked(&self, $Type, usz end_padding = 0) @nodiscard @deprecated("Use allocator::alloc_try")
{
return ($Type*)self.alloc_checked($Type.sizeof + end_padding);
}
macro Allocator.new_clear(&self, $Type, usz end_padding = 0) @nodiscard @deprecated("Use allocator::new")
{
return ($Type*)self.calloc_checked($Type.sizeof + end_padding)!!;
}
macro Allocator.new_clear_checked(&self, $Type, usz end_padding = 0) @nodiscard @deprecated("Use allocator::new_try")
{
return ($Type*)self.calloc_checked($Type.sizeof + end_padding);
}
macro Allocator.clone(&self, value) @deprecated("Use allocator::clone")
{
var x = self.alloc($typeof(value));
*x = value;
return x;
}
macro void* Allocator.alloc(&self, usz size) @nodiscard @deprecated("Use allocator::malloc")
{
return self.alloc_checked(size)!!;
}
macro void* Allocator.calloc(&self, usz size) @nodiscard @deprecated("Use allocator::calloc")
{
return self.acquire(size, true, 0, 0)!!;
}
macro void* Allocator.realloc(&self, void* ptr, usz new_size) @nodiscard @deprecated("Use allocator::realloc")
{
return self.resize(ptr, new_size, 0, 0)!!;
}
macro void*! Allocator.alloc_aligned(&self, usz size, usz alignment, usz offset = 0) @deprecated("Use allocator::alloc_aligned")
{
$if env::TESTING:
char* data = self.acquire(size, false, alignment, offset)!;
mem::set(data, 0xAA, size, mem::DEFAULT_MEM_ALIGNMENT);
return data;
$else
return self.acquire(size, false, alignment, offset);
$endif
}
macro void*! Allocator.calloc_aligned(&self, usz size, usz alignment, usz offset = 0) @deprecated("Use allocator::calloc_aligned")
{
return self.acquire(size, true, alignment, offset);
}
macro void*! Allocator.realloc_aligned(&self, void* ptr, usz new_size, usz alignment = 0, usz offset = 0) @deprecated("Use allocator::realloc_aligned")
{
return self.resize(ptr, new_size, alignment, offset);
}
macro void Allocator.free(&self, void* ptr) @deprecated("Use allocator::free")
{
$if env::TESTING:
if (ptr) ((char*)ptr)[0] = 0xBA;
$endif
self.release(ptr, false);
}
macro void Allocator.free_aligned(&self, void* ptr) @deprecated("Use allocator::free_aligned")
{
$if env::TESTING:
if (ptr) ((char*)ptr)[0] = 0xBA;
$endif
self.release(ptr, true);
}
/**
* @require bytes > 0
* @require alignment > 0
@@ -64,6 +344,22 @@ macro void*! @aligned_calloc(#calloc_fn, usz bytes, usz alignment, usz offset)
return mem;
}
struct AlignedBlock
{
usz len;
void* start;
}
macro void! @aligned_free(#free_fn, void* old_pointer)
{
AlignedBlock* desc = (AlignedBlock*)old_pointer - 1;
$if @typekind(#free_fn(desc.start)) == OPTIONAL:
#free_fn(desc.start)!;
$else
#free_fn(desc.start);
$endif
}
/**
* @require bytes > 0
* @require alignment > 0
@@ -82,143 +378,52 @@ macro void*! @aligned_realloc(#calloc_fn, #free_fn, void* old_pointer, usz bytes
return new_data;
}
macro void! @aligned_free(#free_fn, void* old_pointer)
// All allocators
tlocal Allocator* thread_allocator @private = &allocator::LIBC_ALLOCATOR;
tlocal TempAllocator* thread_temp_allocator @private = null;
tlocal TempAllocator*[2] temp_allocator_pair @private;
Allocator* temp_base_allocator @private = &allocator::LIBC_ALLOCATOR;
macro TempAllocator* create_default_sized_temp_allocator(Allocator* allocator) @local
{
AlignedBlock* desc = (AlignedBlock*)old_pointer - 1;
$if @typekind(#free_fn(desc.start)) == OPTIONAL:
#free_fn(desc.start)!;
$else
#free_fn(desc.start);
$endif
$switch (env::MEMORY_ENV)
$case NORMAL:
return new_temp_allocator(1024 * 256, allocator)!!;
$case SMALL:
return new_temp_allocator(1024 * 16, allocator)!!;
$case TINY:
return new_temp_allocator(1024 * 2, allocator)!!;
$case NONE:
unreachable("Temp allocator must explicitly created when memory-env is set to 'none'.");
$endswitch
}
def MemoryAllocFn = fn char[]!(usz);
macro Allocator* heap() => thread_allocator;
fault AllocationFailure
macro TempAllocator* temp()
{
OUT_OF_MEMORY,
CHUNK_TOO_LARGE,
if (!thread_temp_allocator)
{
init_default_temp_allocators();
}
return thread_temp_allocator;
}
fn usz alignment_for_allocation(usz alignment) @inline @private
fn void init_default_temp_allocators() @private
{
return alignment < mem::DEFAULT_MEM_ALIGNMENT ? alignment = mem::DEFAULT_MEM_ALIGNMENT : alignment;
temp_allocator_pair[0] = create_default_sized_temp_allocator(temp_base_allocator);
temp_allocator_pair[1] = create_default_sized_temp_allocator(temp_base_allocator);
thread_temp_allocator = temp_allocator_pair[0];
}
// Allocator "functions"
macro void*! Allocator.alloc_checked(&self, usz size)
fn TempAllocator *temp_allocator_next() @private
{
$if env::TESTING:
char* data = self.acquire(size, false, 0, 0)!;
mem::set(data, 0xAA, size, mem::DEFAULT_MEM_ALIGNMENT);
return data;
$else
return self.acquire(size, false, 0, 0);
$endif
}
macro void*! Allocator.calloc_checked(&self, usz size)
{
return self.acquire(size, true, 0, 0);
}
macro void*! Allocator.realloc_checked(&self, void* ptr, usz new_size)
{
return self.resize(ptr, new_size, 0, 0);
}
macro Allocator.new_array(&self, $Type, usz size, usz end_padding = 0)
{
return (($Type*)self.alloc_checked($Type.sizeof * size + end_padding))[:size]!!;
}
macro Allocator.new_array_checked(&self, $Type, usz size, usz end_padding = 0)
{
return (($Type*)self.alloc_checked($Type.sizeof * size + end_padding))[:size];
}
macro Allocator.new_zero_array(&self, $Type, usz size, usz end_padding = 0)
{
return (($Type*)self.calloc_checked($Type.sizeof * size + end_padding))[:size]!!;
}
macro Allocator.new_zero_array_checked(&self, $Type, usz size, usz end_padding = 0)
{
return (($Type*)self.calloc_checked($Type.sizeof * size + end_padding))[:size];
}
macro Allocator.new(&self, $Type, usz end_padding = 0) @nodiscard
{
return ($Type*)self.alloc_checked($Type.sizeof + end_padding)!!;
}
macro Allocator.new_checked(&self, $Type, usz end_padding = 0) @nodiscard
{
return ($Type*)self.alloc_checked($Type.sizeof + end_padding);
}
macro Allocator.new_clear(&self, $Type, usz end_padding = 0) @nodiscard
{
return ($Type*)self.calloc_checked($Type.sizeof + end_padding)!!;
}
macro Allocator.new_clear_checked(&self, $Type, usz end_padding = 0) @nodiscard
{
return ($Type*)self.calloc_checked($Type.sizeof + end_padding);
}
macro Allocator.clone(&self, value)
{
var x = self.alloc($typeof(value));
*x = value;
return x;
}
macro void* Allocator.alloc(&self, usz size) @nodiscard
{
return self.alloc_checked(size)!!;
}
macro void* Allocator.calloc(&self, usz size) @nodiscard
{
return self.acquire(size, true, 0, 0)!!;
}
macro void* Allocator.realloc(&self, void* ptr, usz new_size) @nodiscard
{
return self.resize(ptr, new_size, 0, 0)!!;
}
macro void*! Allocator.alloc_aligned(&self, usz size, usz alignment, usz offset = 0)
{
$if env::TESTING:
char* data = self.acquire(size, false, alignment, offset)!;
mem::set(data, 0xAA, size, mem::DEFAULT_MEM_ALIGNMENT);
return data;
$else
return self.acquire(size, false, alignment, offset);
$endif
}
macro void*! Allocator.calloc_aligned(&self, usz size, usz alignment, usz offset = 0)
{
return self.acquire(size, true, alignment, offset);
}
macro void*! Allocator.realloc_aligned(&self, void* ptr, usz new_size, usz alignment = 0, usz offset = 0)
{
return self.resize(ptr, new_size, alignment, offset);
}
macro void Allocator.free(&self, void* ptr)
{
$if env::TESTING:
if (ptr) ((char*)ptr)[0] = 0xBA;
$endif
self.release(ptr, false);
}
macro void Allocator.free_aligned(&self, void* ptr)
{
$if env::TESTING:
if (ptr) ((char*)ptr)[0] = 0xBA;
$endif
self.release(ptr, true);
}
if (!thread_temp_allocator)
{
init_default_temp_allocators();
return thread_temp_allocator;
}
usz index = thread_temp_allocator == temp_allocator_pair[0] ? 1 : 0;
return thread_temp_allocator = temp_allocator_pair[index];
}

View File

@@ -0,0 +1,258 @@
module std::core::cpudetect @if(env::X86 || env::X86_64);
struct CpuId
{
uint eax, ebx, ecx, edx;
}
fn CpuId x86_cpuid(uint eax, uint ecx = 0)
{
int edx;
int ebx;
asm
{
movl $eax, eax;
movl $ecx, ecx;
cpuid;
movl eax, $eax;
movl ebx, $ebx;
movl ecx, $ecx;
movl edx, $edx;
}
return { eax, ebx, ecx, edx };
}
enum X86Feature
{
ADX,
AES,
AMX_BF16,
AMX_COMPLEX,
AMX_FP16,
AMX_INT8,
AMX_TILE,
AVX,
AVX10_1_256,
AVX10_1_512,
AVX2,
AVX5124FMAPS,
AVX5124VNNIW,
AVX512BF16,
AVX512BITALG,
AVX512BW,
AVX512CD,
AVX512DQ,
AVX512ER,
AVX512F,
AVX512FP16,
AVX512IFMA,
AVX512PF,
AVX512VBMI,
AVX512VBMI2,
AVX512VL,
AVX512VNNI,
AVX512VP2INTERSECT,
AVX512VPOPCNTDQ,
AVXIFMA,
AVXNECONVERT,
AVXVNNI,
AVXVNNIINT16,
AVXVNNIINT8,
BMI,
BMI2,
CLDEMOTE,
CLFLUSHOPT,
CLWB,
CLZERO,
CMOV,
CMPCCXADD,
CMPXCHG16B,
CX8,
ENQCMD,
F16C,
FMA,
FMA4,
FSGSBASE,
FXSR,
GFNI,
HRESET,
INVPCID,
KL,
LWP,
LZCNT,
MMX,
MOVBE,
MOVDIR64B,
MOVDIRI,
MWAITX,
PCLMUL,
PCONFIG,
PKU,
POPCNT,
PREFETCHI,
PREFETCHWT1,
PRFCHW,
PTWRITE,
RAOINT,
RDPID,
RDPRU,
RDRND,
RDSEED,
RTM,
SAHF,
SERIALIZE,
SGX,
SHA,
SHA512,
SHSTK,
SM3,
SM4,
SSE,
SSE2,
SSE3,
SSE4_1,
SSE4_2,
SSE4_A,
SSSE3,
TBM,
TSXLDTRK,
UINTR,
USERMSR,
VAES,
VPCLMULQDQ,
WAITPKG,
WBNOINVD,
WIDEKL,
X87,
XOP,
XSAVE,
XSAVEC,
XSAVEOPT,
XSAVES,
}
uint128 x86_features;
fn void add_feature_if_bit(X86Feature feature, uint register, int bit)
{
if (register & 1U << bit) x86_features |= 1u128 << feature.ordinal;
}
fn void x86_initialize_cpu_features()
{
uint max_level = x86_cpuid(0).eax;
CpuId feat = x86_cpuid(1);
CpuId leaf7 = max_level >= 8 ? x86_cpuid(7) : CpuId {};
CpuId leaf7s1 = leaf7.eax >= 1 ? x86_cpuid(7, 1) : CpuId {};
CpuId ext1 = x86_cpuid(0x80000000).eax >= 0x80000001 ? x86_cpuid(0x80000001) : CpuId {};
CpuId ext8 = x86_cpuid(0x80000000).eax >= 0x80000008 ? x86_cpuid(0x80000008) : CpuId {};
CpuId leaf_d = max_level >= 0xd ? x86_cpuid(0xd, 0x1) : CpuId {};
CpuId leaf_14 = max_level >= 0x14 ? x86_cpuid(0x14) : CpuId {};
CpuId leaf_19 = max_level >= 0x19 ? x86_cpuid(0x19) : CpuId {};
CpuId leaf_24 = max_level >= 0x24 ? x86_cpuid(0x24) : CpuId {};
add_feature_if_bit(ADX, leaf7.ebx, 19);
add_feature_if_bit(AES, feat.ecx, 25);
add_feature_if_bit(AMX_BF16, leaf7.edx, 22);
add_feature_if_bit(AMX_COMPLEX, leaf7s1.edx, 8);
add_feature_if_bit(AMX_FP16, leaf7s1.eax, 21);
add_feature_if_bit(AMX_INT8, leaf7.edx, 25);
add_feature_if_bit(AMX_TILE, leaf7.edx, 24);
add_feature_if_bit(AVX, feat.ecx, 28);
add_feature_if_bit(AVX10_1_256, leaf7s1.edx, 19);
add_feature_if_bit(AVX10_1_512, leaf_24.ebx, 18);
add_feature_if_bit(AVX2, leaf7.ebx, 5);
add_feature_if_bit(AVX5124FMAPS, leaf7.edx, 3);
add_feature_if_bit(AVX5124VNNIW, leaf7.edx, 2);
add_feature_if_bit(AVX512BF16, leaf7s1.eax, 5);
add_feature_if_bit(AVX512BITALG, leaf7.ecx, 12);
add_feature_if_bit(AVX512BW, leaf7.ebx, 30);
add_feature_if_bit(AVX512CD, leaf7.ebx, 28);
add_feature_if_bit(AVX512DQ, leaf7.ebx, 17);
add_feature_if_bit(AVX512ER, leaf7.ebx, 27);
add_feature_if_bit(AVX512F, leaf7.ebx, 16);
add_feature_if_bit(AVX512FP16, leaf7.edx, 23);
add_feature_if_bit(AVX512IFMA, leaf7.ebx, 21);
add_feature_if_bit(AVX512PF, leaf7.ebx, 26);
add_feature_if_bit(AVX512VBMI, leaf7.ecx, 1);
add_feature_if_bit(AVX512VBMI2, leaf7.ecx, 6);
add_feature_if_bit(AVX512VL, leaf7.ebx, 31);
add_feature_if_bit(AVX512VNNI, leaf7.ecx, 11);
add_feature_if_bit(AVX512VP2INTERSECT, leaf7.edx, 8);
add_feature_if_bit(AVX512VPOPCNTDQ, leaf7.ecx, 14);
add_feature_if_bit(AVXIFMA, leaf7s1.eax, 23);
add_feature_if_bit(AVXNECONVERT, leaf7s1.edx, 5);
add_feature_if_bit(AVXVNNI, leaf7s1.eax, 4);
add_feature_if_bit(AVXVNNIINT16, leaf7s1.edx, 10);
add_feature_if_bit(AVXVNNIINT8, leaf7s1.edx, 4);
add_feature_if_bit(BMI, leaf7.ebx, 3);
add_feature_if_bit(BMI2, leaf7.ebx, 8);
add_feature_if_bit(CLDEMOTE, leaf7.ecx, 25);
add_feature_if_bit(CLFLUSHOPT, leaf7.ebx, 23);
add_feature_if_bit(CLWB, leaf7.ebx, 24);
add_feature_if_bit(CLZERO, ext8.ecx, 0);
add_feature_if_bit(CMOV, feat.edx, 15);
add_feature_if_bit(CMPCCXADD, leaf7s1.eax, 7);
add_feature_if_bit(CMPXCHG16B, feat.ecx, 12);
add_feature_if_bit(CX8, feat.edx, 8);
add_feature_if_bit(ENQCMD, leaf7.ecx, 29);
add_feature_if_bit(F16C, feat.ecx, 29);
add_feature_if_bit(FMA, feat.ecx, 12);
add_feature_if_bit(FMA4, ext1.ecx, 16);
add_feature_if_bit(FSGSBASE, leaf7.ebx, 0);
add_feature_if_bit(FXSR, feat.edx, 24);
add_feature_if_bit(GFNI, leaf7.ecx, 8);
add_feature_if_bit(HRESET, leaf7s1.eax, 22);
add_feature_if_bit(INVPCID, leaf7.ebx, 10);
add_feature_if_bit(KL, leaf7.ecx, 23);
add_feature_if_bit(LWP, ext1.ecx, 15);
add_feature_if_bit(LZCNT, ext1.ecx, 5);
add_feature_if_bit(MMX, feat.edx, 23);
add_feature_if_bit(MOVBE, feat.ecx, 22);
add_feature_if_bit(MOVDIR64B, leaf7.ecx, 28);
add_feature_if_bit(MOVDIRI, leaf7.ecx, 27);
add_feature_if_bit(MWAITX, ext1.ecx, 29);
add_feature_if_bit(PCLMUL, feat.ecx, 1);
add_feature_if_bit(PCONFIG, leaf7.edx, 18);
add_feature_if_bit(PKU, leaf7.ecx, 4);
add_feature_if_bit(POPCNT, feat.ecx, 23);
add_feature_if_bit(PREFETCHI, leaf7s1.edx, 14);
add_feature_if_bit(PREFETCHWT1, leaf7.ecx, 0);
add_feature_if_bit(PRFCHW, ext1.ecx, 8);
add_feature_if_bit(PTWRITE, leaf_14.ebx, 4);
add_feature_if_bit(RAOINT, leaf7s1.eax, 3);
add_feature_if_bit(RDPID, leaf7.ecx, 22);
add_feature_if_bit(RDPRU, ext8.ecx, 4);
add_feature_if_bit(RDRND, feat.ecx, 30);
add_feature_if_bit(RDSEED, leaf7.ebx, 18);
add_feature_if_bit(RTM, leaf7.ebx, 11);
add_feature_if_bit(SAHF, ext1.ecx, 0);
add_feature_if_bit(SERIALIZE, leaf7.edx, 14);
add_feature_if_bit(SGX, leaf7.ebx, 2);
add_feature_if_bit(SHA, leaf7.ebx, 29);
add_feature_if_bit(SHA512, leaf7s1.eax, 0);
add_feature_if_bit(SHSTK, leaf7.ecx, 7);
add_feature_if_bit(SM3, leaf7s1.eax, 1);
add_feature_if_bit(SM4, leaf7s1.eax, 2);
add_feature_if_bit(SSE, feat.edx, 25);
add_feature_if_bit(SSE2, feat.edx, 26);
add_feature_if_bit(SSE3, feat.ecx, 0);
add_feature_if_bit(SSE4_1, feat.ecx, 19);
add_feature_if_bit(SSE4_2, feat.ecx, 20);
add_feature_if_bit(SSE4_A, ext1.ecx, 6);
add_feature_if_bit(SSSE3, feat.ecx, 9);
add_feature_if_bit(TBM, ext1.ecx, 21);
add_feature_if_bit(TSXLDTRK, leaf7.edx, 16);
add_feature_if_bit(UINTR, leaf7.edx, 5);
add_feature_if_bit(USERMSR, leaf7s1.edx, 15);
add_feature_if_bit(VAES, leaf7.ecx, 9);
add_feature_if_bit(VPCLMULQDQ, leaf7.ecx, 10);
add_feature_if_bit(WAITPKG, leaf7.ecx, 5);
add_feature_if_bit(WBNOINVD, ext8.ecx, 9);
add_feature_if_bit(WIDEKL, leaf_19.ebx, 2);
add_feature_if_bit(X87, feat.edx, 0);
add_feature_if_bit(XOP, ext1.ecx, 11);
add_feature_if_bit(XSAVE, feat.ecx, 26);
add_feature_if_bit(XSAVEC, leaf_d.eax, 1);
add_feature_if_bit(XSAVEOPT, leaf_d.eax, 0);
add_feature_if_bit(XSAVES, leaf_d.eax, 3);
}

View File

@@ -0,0 +1,254 @@
module std::core::machoruntime @if(env::DARWIN) @private;
struct SegmentCommand64
{
uint cmd;
uint cmdsize;
char[16] segname;
ulong vmaddr;
ulong vmsize;
ulong fileoff;
ulong filesize;
uint maxprot;
uint initprot;
uint nsects;
uint flags;
}
struct LoadCommand
{
uint cmd;
uint cmdsize;
}
struct Section64
{
char[16] sectname;
char[16] segname;
ulong addr;
ulong size;
uint offset;
uint align;
uint reloff;
uint nreloc;
uint flags;
uint reserved1;
uint reserved2;
uint reserved3;
}
struct MachHeader
{
uint magic;
uint cputype;
uint cpusubtype;
uint filetype;
uint ncmds;
uint sizeofcmds;
uint flags;
}
struct MachHeader64
{
inline MachHeader header;
uint reserved;
}
const LC_SEGMENT_64 = 0x19;
fault MachoSearch
{
NOT_FOUND
}
fn bool name_cmp(char* a, char[16]* b)
{
for (usz i = 0; i < 16; i++)
{
if (a[i] != (*b)[i]) return false;
if (a[i] == '\0') return true;
}
return false;
}
fn SegmentCommand64*! find_segment(MachHeader* header, char* segname)
{
LoadCommand* command = (void*)header + MachHeader64.sizeof;
for (uint i = 0; i < header.ncmds; i++)
{
if (command.cmd == LC_SEGMENT_64)
{
SegmentCommand64* segment = (SegmentCommand64*)command;
if (name_cmp(segname, &segment.segname)) return segment;
}
command = (void*)command + command.cmdsize;
}
return MachoSearch.NOT_FOUND?;
}
fn Section64*! find_section(SegmentCommand64* command, char* sectname)
{
Section64* section = (void*)command + SegmentCommand64.sizeof;
for (uint i = 0; i < command.nsects; i++)
{
if (name_cmp(sectname, &section.sectname)) return section;
section++;
}
return MachoSearch.NOT_FOUND?;
}
macro find_segment_section_body(MachHeader* header, char* segname, char* sectname, $Type)
{
Section64*! section = find_section(find_segment(header, segname), sectname);
if (catch section)
{
return $Type[] {};
}
$Type* ptr = (void*)header + section.offset;
return ptr[:section.size / $Type.sizeof];
}
def DyldCallback = fn void (MachHeader* mh, isz vmaddr_slide);
extern fn void _dyld_register_func_for_add_image(DyldCallback);
struct DlInfo
{
char* dli_fname;
void* dli_fbase;
char* dli_sname;
void* dli_saddr;
}
extern fn void printf(char*, ...);
extern fn int dladdr(MachHeader* mh, DlInfo* dlinfo);
extern fn void* realloc(void* ptr, usz size);
extern fn void* malloc(usz size);
extern fn void free(void* ptr);
def CallbackFn = fn void();
struct Callback
{
uint priority;
CallbackFn xtor;
Callback* next;
}
struct DynamicMethod
{
void* fn_ptr;
char* sel;
union
{
DynamicMethod* next;
TypeId* type;
}
}
enum StartupState
{
NOT_STARTED,
INIT,
RUN_CTORS,
READ_DYLIB,
RUN_DYLIB_CTORS,
RUN_DTORS,
SHUTDOWN
}
StartupState runtime_state = NOT_STARTED;
Callback* ctor_first;
Callback* dtor_first;
fn void runtime_startup() @public @export("__c3_runtime_startup")
{
if (runtime_state != NOT_STARTED) return;
runtime_state = INIT;
_dyld_register_func_for_add_image(&dl_reg_callback);
assert(runtime_state == INIT);
runtime_state = RUN_CTORS;
Callback* ctor = ctor_first;
while (ctor)
{
ctor.xtor();
ctor = ctor.next;
}
assert(runtime_state == RUN_CTORS);
runtime_state = READ_DYLIB;
ctor = null;
}
fn void runtime_finalize() @public @export("__c3_runtime_finalize")
{
if (runtime_state != READ_DYLIB) return;
runtime_state = RUN_DTORS;
Callback* dtor = dtor_first;
while (dtor)
{
dtor.xtor();
dtor = dtor.next;
}
assert(runtime_state == RUN_DTORS);
runtime_state = SHUTDOWN;
}
fn void append_xxlizer(Callback** ref, Callback* cb)
{
while (Callback* current = *ref, current)
{
if (current.priority > cb.priority)
{
cb.next = current;
break;
}
ref = &current.next;
}
*ref = cb;
}
struct TypeId
{
char type;
TypeId* parentof;
DynamicMethod* dtable;
usz sizeof;
TypeId* inner;
usz len;
typeid[*] additional;
}
fn void dl_reg_callback(MachHeader* mh, isz vmaddr_slide)
{
usz size = 0;
assert(runtime_state == INIT || runtime_state == READ_DYLIB, "State was %s", runtime_state);
foreach (&dm : find_segment_section_body(mh, "__DATA", "__c3_dynamic", DynamicMethod))
{
TypeId* type = dm.type;
dm.next = type.dtable;
type.dtable = dm;
DynamicMethod* m = dm;
while (m)
{
m = m.next;
}
}
foreach (&cb : find_segment_section_body(mh, "__DATA", "__c3dtor", Callback))
{
append_xxlizer(&dtor_first, cb);
}
foreach (&cb : find_segment_section_body(mh, "__DATA", "__c3ctor", Callback))
{
append_xxlizer(&ctor_first, cb);
}
if (runtime_state != READ_DYLIB) return;
runtime_state = RUN_DYLIB_CTORS;
Callback* ctor = ctor_first;
ctor_first = null;
while (ctor)
{
ctor.xtor();
ctor = ctor.next;
}
assert(runtime_state == RUN_DYLIB_CTORS);
runtime_state = READ_DYLIB;
}

View File

@@ -21,7 +21,7 @@ macro int @main_to_void_main(#m, int, char**)
macro String[] args_to_strings(int argc, char** argv) @private
{
String[] list = mem::new_array(String, argc);
String[] list = mem::alloc_array(String, argc);
for (int i = 0; i < argc; i++)
{
char* arg = argv[i];
@@ -68,7 +68,7 @@ macro String[] win_command_line_to_strings(ushort* cmd_line) @private
macro String[] wargs_strings(int argc, Char16** argv) @private
{
String[] list = mem::new_array(String, argc);
String[] list = mem::alloc_array(String, argc);
for (int i = 0; i < argc; i++)
{
Char16* arg = argv[i];

View File

@@ -2,7 +2,7 @@
// Use of this source code is governed by the MIT license
// a copy of which can be found in the LICENSE_STDLIB file.
module std::core::runtime;
import libc;
import libc, std::time, std::io, std::sort;
struct AnyStruct
{
@@ -24,11 +24,11 @@ struct BenchmarkUnit
BenchmarkFn func;
}
fn BenchmarkUnit[] benchmark_collection_create(Allocator* allocator = mem::heap())
fn BenchmarkUnit[] benchmark_collection_create(Allocator* allocator = allocator::heap())
{
BenchmarkFn[] fns = $$BENCHMARK_FNS;
String[] names = $$BENCHMARK_NAMES;
BenchmarkUnit[] benchmarks = allocator.new_array(BenchmarkUnit, names.len);
BenchmarkUnit[] benchmarks = allocator::alloc_array(allocator, BenchmarkUnit, names.len);
foreach (i, benchmark : fns)
{
benchmarks[i] = { names[i], fns[i] };
@@ -130,7 +130,7 @@ fn bool default_benchmark_runner()
{
@pool()
{
return run_benchmarks(benchmark_collection_create(mem::temp()));
return run_benchmarks(benchmark_collection_create(allocator::temp()));
};
}
@@ -142,11 +142,11 @@ struct TestUnit
TestFn func;
}
fn TestUnit[] test_collection_create(Allocator* allocator = mem::heap())
fn TestUnit[] test_collection_create(Allocator* allocator = allocator::heap())
{
TestFn[] fns = $$TEST_FNS;
String[] names = $$TEST_NAMES;
TestUnit[] tests = allocator.new_array(TestUnit, names.len);
TestUnit[] tests = allocator::alloc_array(allocator, TestUnit, names.len);
foreach (i, test : fns)
{
tests[i] = { names[i], fns[i] };
@@ -236,7 +236,7 @@ fn bool default_test_runner()
{
@pool()
{
return run_tests(test_collection_create(mem::temp()));
return run_tests(test_collection_create(allocator::temp()));
};
}

View File

@@ -38,7 +38,7 @@ macro String tformat(String fmt, ...)
return str.str_view();
}
macro String new_format(String fmt, ..., Allocator* allocator = mem::heap())
macro String new_format(String fmt, ..., Allocator* allocator = allocator::heap())
{
@pool(allocator)
{
@@ -55,11 +55,11 @@ macro bool char_in_set(char c, String set)
return false;
}
fn String join_new(String[] s, String joiner, Allocator* allocator = mem::heap())
fn String join_new(String[] s, String joiner, Allocator* allocator = allocator::heap())
{
if (!s)
{
return (String)allocator.new_zero_array(char, 2)[:0];
return (String)allocator::new_array(allocator, char, 2)[:0];
}
usz total_size = joiner.len * s.len;
@@ -153,11 +153,11 @@ fn String String.strip_end(string, String needle)
* @require needle.len > 0 "The needle must be at least 1 character long"
* @ensure return.len > 0
**/
fn String[] String.split(s, String needle, usz max = 0, Allocator* allocator = mem::heap())
fn String[] String.split(s, String needle, usz max = 0, Allocator* allocator = allocator::heap())
{
usz capacity = 16;
usz i = 0;
String* holder = allocator.new_array(String, capacity);
String* holder = allocator::alloc_array(allocator, String, capacity);
bool no_more = false;
while (!no_more)
{
@@ -176,7 +176,7 @@ fn String[] String.split(s, String needle, usz max = 0, Allocator* allocator = m
if (i == capacity)
{
capacity *= 2;
holder = allocator.realloc(holder, String.sizeof * capacity);
holder = allocator::realloc(allocator, holder, String.sizeof * capacity);
}
holder[i++] = res;
}
@@ -193,7 +193,7 @@ fn String[] String.split(s, String needle, usz max = 0, Allocator* allocator = m
**/
fn String[] String.tsplit(s, String needle, usz max = 0)
{
return s.split(needle, max, mem::temp()) @inline;
return s.split(needle, max, allocator::temp()) @inline;
}
fn bool String.contains(s, String needle)
@@ -312,19 +312,19 @@ fn usz ZString.len(str)
}
fn ZString String.zstr_copy(s, Allocator* allocator = mem::heap())
fn ZString String.zstr_copy(s, Allocator* allocator = allocator::heap())
{
usz len = s.len;
char* str = allocator.alloc(len + 1);
char* str = allocator::malloc(allocator, len + 1);
mem::copy(str, s.ptr, len);
str[len] = 0;
return (ZString)str;
}
fn String String.concat(s1, String s2, Allocator* allocator = mem::heap())
fn String String.concat(s1, String s2, Allocator* allocator = allocator::heap())
{
usz full_len = s1.len + s2.len;
char* str = allocator.alloc(full_len + 1);
char* str = allocator::malloc(allocator, full_len + 1);
usz s1_len = s1.len;
mem::copy(str, s1.ptr, s1_len);
mem::copy(str + s1_len, s2.ptr, s2.len);
@@ -332,37 +332,37 @@ fn String String.concat(s1, String s2, Allocator* allocator = mem::heap())
return (String)str[:full_len];
}
fn String String.tconcat(s1, String s2) => s1.concat(s2, mem::temp());
fn String String.tconcat(s1, String s2) => s1.concat(s2, allocator::temp());
fn ZString String.zstr_tcopy(s) => s.zstr_copy(mem::temp()) @inline;
fn ZString String.zstr_tcopy(s) => s.zstr_copy(allocator::temp()) @inline;
fn String String.copy(s, Allocator* allocator = mem::heap())
fn String String.copy(s, Allocator* allocator = allocator::heap())
{
usz len = s.len;
char* str = allocator.alloc(len + 1);
char* str = allocator::malloc(allocator, len + 1);
mem::copy(str, s.ptr, len);
str[len] = 0;
return (String)str[:len];
}
fn void String.free(&s, Allocator* allocator = mem::heap())
fn void String.free(&s, Allocator* allocator = allocator::heap())
{
if (!s.len) return;
allocator.free(s.ptr);
allocator::free(allocator, s.ptr);
*s = "";
}
fn String String.tcopy(s) => s.copy(mem::temp()) @inline;
fn String String.tcopy(s) => s.copy(allocator::temp()) @inline;
fn String ZString.copy(z, Allocator* allocator = mem::temp())
fn String ZString.copy(z, Allocator* allocator = allocator::temp())
{
return z.str_view().copy(allocator) @inline;
}
fn String ZString.tcopy(z)
{
return z.str_view().copy(mem::temp()) @inline;
return z.str_view().copy(allocator::temp()) @inline;
}
/**
@@ -371,10 +371,10 @@ fn String ZString.tcopy(z)
* @return! UnicodeResult.INVALID_UTF8 "If the string contained an invalid UTF-8 sequence"
* @return! AllocationFailure "If allocation of the string fails"
**/
fn Char16[]! String.to_new_utf16(s, Allocator* allocator = mem::heap())
fn Char16[]! String.to_new_utf16(s, Allocator* allocator = allocator::heap())
{
usz len16 = conv::utf16len_for_utf8(s);
Char16* data = allocator.new_array_checked(Char16, len16 + 1)!;
Char16* data = allocator::alloc_array_try(allocator, Char16, len16 + 1)!;
conv::utf8to16_unsafe(s, data)!;
data[len16] = 0;
return data[:len16];
@@ -388,10 +388,10 @@ fn Char16[]! String.to_new_utf16(s, Allocator* allocator = mem::heap())
**/
fn Char16[]! String.to_temp_utf16(s)
{
return s.to_new_utf16(mem::temp());
return s.to_new_utf16(allocator::temp());
}
fn WString! String.to_new_wstring(s, Allocator* allocator = mem::heap())
fn WString! String.to_new_wstring(s, Allocator* allocator = allocator::heap())
{
return (WString)s.to_new_utf16(allocator).ptr;
}
@@ -401,10 +401,10 @@ fn WString! String.to_temp_wstring(s)
return (WString)s.to_temp_utf16().ptr;
}
fn Char32[]! String.to_new_utf32(s, Allocator* allocator = mem::heap())
fn Char32[]! String.to_new_utf32(s, Allocator* allocator = allocator::heap())
{
usz codepoints = conv::utf8_codepoints(s);
Char32* data = allocator.new_array(Char32, codepoints + 1);
Char32* data = allocator::alloc_array_try(allocator, Char32, codepoints + 1)!;
conv::utf8to32_unsafe(s, data)!;
data[codepoints] = 0;
return data[:codepoints];
@@ -412,7 +412,7 @@ fn Char32[]! String.to_new_utf32(s, Allocator* allocator = mem::heap())
fn Char32[]! String.to_temp_utf32(s)
{
return s.to_new_utf32(mem::temp());
return s.to_new_utf32(allocator::temp());
}
fn void String.convert_ascii_to_lower(s)
@@ -420,16 +420,16 @@ fn void String.convert_ascii_to_lower(s)
foreach (&c : s) if (c.is_upper()) *c += 'a' - 'A';
}
fn String String.new_ascii_to_lower(s, Allocator* allocator = mem::heap())
fn String String.new_ascii_to_lower(s, Allocator* allocator = allocator::heap())
{
String copy = s.copy(allocator);
copy.convert_ascii_to_lower();
return copy;
}
fn String String.temp_ascii_to_lower(s, Allocator* allocator = mem::heap())
fn String String.temp_ascii_to_lower(s, Allocator* allocator = allocator::heap())
{
return s.new_ascii_to_lower(mem::temp());
return s.new_ascii_to_lower(allocator::temp());
}
fn void String.convert_ascii_to_upper(s)
@@ -437,7 +437,7 @@ fn void String.convert_ascii_to_upper(s)
foreach (&c : s) if (c.is_lower()) *c -= 'a' - 'A';
}
fn String String.new_ascii_to_upper(s, Allocator* allocator = mem::heap())
fn String String.new_ascii_to_upper(s, Allocator* allocator = allocator::heap())
{
String copy = s.copy(allocator);
copy.convert_ascii_to_upper();
@@ -451,30 +451,30 @@ fn StringIterator String.iterator(s)
fn String String.temp_ascii_to_upper(s)
{
return s.new_ascii_to_upper(mem::temp());
return s.new_ascii_to_upper(allocator::temp());
}
fn String! new_from_utf32(Char32[] utf32, Allocator* allocator = mem::heap())
fn String! new_from_utf32(Char32[] utf32, Allocator* allocator = allocator::heap())
{
usz len = conv::utf8len_for_utf32(utf32);
char* data = allocator.alloc_checked(len + 1)!;
defer catch allocator.free(data);
char* data = allocator::malloc_try(allocator, len + 1)!;
defer catch allocator::free(allocator, data);
conv::utf32to8_unsafe(utf32, data);
data[len] = 0;
return (String)data[:len];
}
fn String! new_from_utf16(Char16[] utf16, Allocator* allocator = mem::heap())
fn String! new_from_utf16(Char16[] utf16, Allocator* allocator = allocator::heap())
{
usz len = conv::utf8len_for_utf16(utf16);
char* data = allocator.alloc_checked(len + 1)!;
defer catch allocator.free(data);
char* data = allocator::malloc_try(allocator, len + 1)!;
defer catch allocator::free(allocator, data);
conv::utf16to8_unsafe(utf16, data)!;
data[len] = 0;
return (String)data[:len];
}
fn String! new_from_wstring(WString wstring, Allocator* allocator = mem::heap())
fn String! new_from_wstring(WString wstring, Allocator* allocator = allocator::heap())
{
usz utf16_len;
while (wstring[utf16_len] != 0) utf16_len++;
@@ -482,8 +482,8 @@ fn String! new_from_wstring(WString wstring, Allocator* allocator = mem::heap())
return new_from_utf16(utf16, allocator);
}
fn String! temp_from_wstring(WString wstring) => new_from_wstring(wstring, mem::temp()) @inline;
fn String! temp_from_utf16(Char16[] utf16) => new_from_utf16(utf16, mem::temp()) @inline;
fn String! temp_from_wstring(WString wstring) => new_from_wstring(wstring, allocator::temp()) @inline;
fn String! temp_from_utf16(Char16[] utf16) => new_from_utf16(utf16, allocator::temp()) @inline;
fn usz String.utf8_codepoints(s)
{

View File

@@ -13,12 +13,12 @@ fn void CsvReader.init(&self, InStream* stream, String separator = ",")
self.separator = separator;
}
fn String[]! CsvReader.read_new_row(self, Allocator* allocator = mem::heap())
fn String[]! CsvReader.read_new_row(self, Allocator* allocator = allocator::heap())
{
return self.read_new_row_with_allocator(mem::temp()) @inline;
return self.read_new_row_with_allocator(allocator::temp()) @inline;
}
fn String[]! CsvReader.read_new_row_with_allocator(self, Allocator* allocator = mem::heap())
fn String[]! CsvReader.read_new_row_with_allocator(self, Allocator* allocator = allocator::heap())
{
@pool(allocator)
{
@@ -28,7 +28,7 @@ fn String[]! CsvReader.read_new_row_with_allocator(self, Allocator* allocator =
fn String[]! CsvReader.read_temp_row(self)
{
return self.read_new_row_with_allocator(mem::temp()) @inline;
return self.read_new_row_with_allocator(allocator::temp()) @inline;
}
fn void! CsvReader.skip_row(self) @maydiscard

View File

@@ -15,7 +15,7 @@ fault JsonParsingError
INVALID_NUMBER,
}
fn Object*! parse(InStream* s, Allocator* allocator = mem::heap())
fn Object*! parse(InStream* s, Allocator* allocator = allocator::heap())
{
JsonContext context = { .last_string = dstring::new_with_capacity(64, allocator), .stream = s, .allocator = allocator };
defer context.last_string.free();

View File

@@ -7,7 +7,7 @@ struct File (InStream, OutStream)
}
module std::io::file;
import libc;
import libc, std::io::path, std::io::os;
fn File! open(String filename, String mode)
{
@@ -161,14 +161,14 @@ fn char[]! load_buffer(String filename, char[] buffer)
}
fn char[]! load_new(String filename, Allocator* allocator = mem::heap())
fn char[]! load_new(String filename, Allocator* allocator = allocator::heap())
{
File file = open(filename, "rb")!;
defer (void)file.close();
usz len = file.seek(0, END)!;
file.seek(0, SET)!;
char* data = allocator.alloc_checked(len)!;
defer catch allocator.free(data);
char* data = allocator::malloc_try(allocator, len)!;
defer catch allocator::free(allocator, data);
usz read = 0;
while (read < len)
{
@@ -179,7 +179,7 @@ fn char[]! load_new(String filename, Allocator* allocator = mem::heap())
fn char[]! load_temp(String filename)
{
return load_new(filename, mem::temp());
return load_new(filename, allocator::temp());
}
/**

View File

@@ -98,9 +98,9 @@ fn usz! Formatter.print_with_function(&self, Printable* arg)
self.width = old_width;
self.prec = old_prec;
}
@pool()
@stack_mem(1024; Allocator* mem)
{
return self.out_substr(arg.to_new_string(mem::temp()));
return self.out_substr(arg.to_new_string(mem));
};
}
return SearchResult.MISSING?;

View File

@@ -1,4 +1,5 @@
module std::io;
import std::math;
const char[16] XDIGITS_H = "0123456789ABCDEF";
const char[16] XDIGITS_L = "0123456789abcdef";

View File

@@ -49,7 +49,7 @@ fault IoError
* @param stream
* @require @is_instream(stream)
**/
macro String! readline(stream = io::stdin(), Allocator* allocator = mem::heap())
macro String! readline(stream = io::stdin(), Allocator* allocator = allocator::heap())
{
bool $is_stream = @typeid(stream) == InStream*.typeid;
$if $is_stream:
@@ -84,7 +84,7 @@ macro String! readline(stream = io::stdin(), Allocator* allocator = mem::heap())
};
}
macro String! treadline(stream = io::stdin()) => readline(stream, mem::temp()) @inline;
macro String! treadline(stream = io::stdin()) => readline(stream, allocator::temp()) @inline;
/**
* @require @is_outstream(out) "The output must implement OutStream"

View File

@@ -1,5 +1,5 @@
module std::io::os;
import libc;
import std::io::path, libc, std::os;
macro void! native_chdir(Path path)
{

View File

@@ -1,5 +1,5 @@
module std::io::os;
import libc;
import libc, std::os, std::io;
fn void! native_stat(Stat* stat, String path) @if(env::DARWIN || env::LINUX)
{

View File

@@ -1,7 +1,7 @@
module std::io::os;
import libc;
import libc, std::os;
macro String! getcwd(Allocator* allocator = mem::heap())
macro String! getcwd(Allocator* allocator = allocator::heap())
{
$switch
$case env::WIN32:

View File

@@ -1,4 +1,5 @@
module std::io::file::os @if(env::POSIX);
module std::io::os @if(env::POSIX);
import std::io, std::os;
fn PathList! native_ls(Path dir, bool no_dirs, bool no_symlinks, String mask, Allocator* allocator)
{
@@ -21,6 +22,7 @@ fn PathList! native_ls(Path dir, bool no_dirs, bool no_symlinks, String mask, Al
}
module std::io::os @if(env::WIN32);
import std::time, std::os, std::io;
fn PathList! native_ls(Path dir, bool no_dirs, bool no_symlinks, String mask, Allocator* allocator)
{

View File

@@ -1,5 +1,5 @@
module std::io::file::os @if(env::POSIX);
import libc;
module std::io::os @if(env::POSIX);
import std::io, std::os, libc;
/**
* @require dir.str_view()
@@ -33,6 +33,7 @@ fn void! native_rmtree(Path dir)
}
module std::io::os @if(env::WIN32);
import std::io, std::time, std::os;
fn void! native_rmtree(Path path)
{
@@ -46,7 +47,7 @@ fn void! native_rmtree(Path path)
{
@pool()
{
String filename = string::new_from_wstring((WString)&find_data.cFileName, mem::temp())!;
String filename = string::new_from_wstring((WString)&find_data.cFileName, allocator::temp())!;
if (filename == "." || filename == "..") continue;
Path file_path = path.tappend(filename)!;
if (find_data.dwFileAttributes & win32::FILE_ATTRIBUTE_DIRECTORY)

View File

@@ -1,6 +1,7 @@
module std::io::os @if(env::LIBC);
import std::io::path, std::os;
fn Path! native_temp_directory(Allocator* allocator = mem::heap()) @if(!env::WIN32)
fn Path! native_temp_directory(Allocator* allocator = allocator::heap()) @if(!env::WIN32)
{
foreach (String env : { "TMPDIR", "TMP", "TEMP", "TEMPDIR" })
{
@@ -10,13 +11,13 @@ fn Path! native_temp_directory(Allocator* allocator = mem::heap()) @if(!env::WIN
return path::new("/tmp", allocator);
}
fn Path! native_temp_directory(Allocator* allocator = mem::heap()) @if(env::WIN32)
fn Path! native_temp_directory(Allocator* allocator = allocator::heap()) @if(env::WIN32)
{
@pool(allocator)
{
Win32_DWORD len = win32::getTempPathW(0, null);
if (!len) return IoError.GENERAL_ERROR?;
Char16[] buff = mem::temp_array(Char16, len + (usz)1);
Char16[] buff = mem::temp_alloc_array(Char16, len + (usz)1);
if (!win32::getTempPathW(len, buff)) return IoError.GENERAL_ERROR?;
return path::new(string::temp_from_utf16(buff[:len]), allocator);
};
@@ -24,7 +25,7 @@ fn Path! native_temp_directory(Allocator* allocator = mem::heap()) @if(env::WIN3
module std::io::os @if(env::NO_LIBC);
macro Path! native_temp_directory(Allocator* allocator = mem::heap())
macro Path! native_temp_directory(Allocator* allocator = allocator::heap())
{
return IoError.UNSUPPORTED_OPERATION?;
}

View File

@@ -1,5 +1,5 @@
module std::io::path;
import std::collections::list;
import std::collections::list, std::io::os;
const PathEnv DEFAULT_PATH_ENV = env::WIN32 ? PathEnv.WIN32 : PathEnv.POSIX;
const char PREFERRED_SEPARATOR_WIN32 = '\\';
@@ -26,11 +26,11 @@ enum PathEnv
POSIX
}
fn Path! getcwd(Allocator* allocator = mem::heap())
fn Path! getcwd(Allocator* allocator = allocator::heap())
{
@pool(allocator)
{
return new(os::getcwd(mem::temp()), allocator);
return new(os::getcwd(allocator::temp()), allocator);
};
}
@@ -38,9 +38,9 @@ fn bool is_dir(Path path) => os::native_is_dir(path.str_view());
fn bool is_file(Path path) => os::native_is_file(path.str_view());
fn usz! file_size(Path path) => os::native_file_size(path.str_view());
fn bool exists(Path path) => os::native_file_or_dir_exists(path.str_view());
fn Path! tgetcwd() => getcwd(mem::temp()) @inline;
fn Path! tgetcwd() => getcwd(allocator::temp()) @inline;
fn void! chdir(Path path) => os::native_chdir(path) @inline;
fn Path! temp_directory(Allocator* allocator = mem::heap()) => os::native_temp_directory(allocator);
fn Path! temp_directory(Allocator* allocator = allocator::heap()) => os::native_temp_directory(allocator);
fn void! delete(Path path) => os::native_remove(path.str_view()) @inline;
macro bool is_separator(char c, PathEnv path_env = DEFAULT_PATH_ENV)
@@ -58,7 +58,7 @@ macro bool is_win32_separator(char c)
return c == '/' || c == '\\';
}
fn PathList! ls(Path dir, bool no_dirs = false, bool no_symlinks = false, String mask = "", Allocator* allocator = mem::heap())
fn PathList! ls(Path dir, bool no_dirs = false, bool no_symlinks = false, String mask = "", Allocator* allocator = allocator::heap())
{
$if $defined(os::native_ls):
return os::native_ls(dir, no_dirs, no_symlinks, mask, allocator);
@@ -105,17 +105,17 @@ fn void! rmtree(Path path)
$endif
}
fn Path! new(String path, Allocator* allocator = mem::heap(), PathEnv path_env = DEFAULT_PATH_ENV)
fn Path! new(String path, Allocator* allocator = allocator::heap(), PathEnv path_env = DEFAULT_PATH_ENV)
{
return { normalize(path.copy(allocator), path_env), path_env };
}
fn Path! temp_new(String path, PathEnv path_env = DEFAULT_PATH_ENV)
{
return new(path, mem::temp(), path_env);
return new(path, allocator::temp(), path_env);
}
fn Path! new_win32_wstring(WString path, Allocator* allocator = mem::heap())
fn Path! new_win32_wstring(WString path, Allocator* allocator = allocator::heap())
{
@pool(allocator)
{
@@ -123,12 +123,12 @@ fn Path! new_win32_wstring(WString path, Allocator* allocator = mem::heap())
};
}
fn Path! new_windows(String path, Allocator* allocator = mem::heap())
fn Path! new_windows(String path, Allocator* allocator = allocator::heap())
{
return new(path, allocator, WIN32);
}
fn Path! new_posix(String path, Allocator* allocator = mem::heap())
fn Path! new_posix(String path, Allocator* allocator = allocator::heap())
{
return new(path, allocator, POSIX);
}
@@ -143,7 +143,7 @@ fn bool Path.equals(self, Path p2)
*
* @param [in] filename
**/
fn Path! Path.append(self, String filename, Allocator* allocator = mem::heap())
fn Path! Path.append(self, String filename, Allocator* allocator = allocator::heap())
{
if (!self.path_string.len) return new(filename, allocator, self.env)!;
assert(!is_separator(self.path_string[^1], self.env));
@@ -158,7 +158,7 @@ fn Path! Path.append(self, String filename, Allocator* allocator = mem::heap())
};
}
fn Path! Path.tappend(self, String filename) => self.append(filename, mem::temp());
fn Path! Path.tappend(self, String filename) => self.append(filename, allocator::temp());
fn usz Path.start_of_base_name(self) @local
{
@@ -179,13 +179,13 @@ fn bool! Path.is_absolute(self)
return path_start < path_str.len && is_separator(path_str[path_start], self.env);
}
fn Path! Path.absolute(self, Allocator* allocator = mem::heap())
fn Path! Path.absolute(self, Allocator* allocator = allocator::heap())
{
String path_str = self.str_view();
if (!path_str.len) path_str = ".";
if (path_str == ".")
{
String cwd = os::getcwd(mem::temp())!;
String cwd = os::getcwd(allocator::temp())!;
return new(cwd, allocator, self.env);
}
switch (self.env)
@@ -196,7 +196,7 @@ fn Path! Path.absolute(self, Allocator* allocator = mem::heap())
case POSIX:
if (path_str[0] == PREFERRED_SEPARATOR_POSIX) return self;
}
String cwd = os::getcwd(mem::temp())!;
String cwd = os::getcwd(allocator::temp())!;
return Path{ cwd, self.env }.append(path_str, allocator)!;
}
@@ -459,7 +459,7 @@ fn usz! Path.to_format(&self, Formatter* formatter) @dynamic
return formatter.print(self.str_view());
}
fn String Path.to_new_string(&self, Allocator* allocator = mem::heap()) @dynamic
fn String Path.to_new_string(&self, Allocator* allocator = allocator::heap()) @dynamic
{
return self.str_view().copy(allocator);
}

View File

@@ -1,4 +1,5 @@
module std::io;
import std::math;
interface InStream
{
@@ -142,12 +143,12 @@ fn usz! copy_to(InStream* in, OutStream* dst, char[] buffer = {})
$case NORMAL:
@pool()
{
return copy_through_buffer(in, dst, mem::temp_array(char, 4096));
return copy_through_buffer(in, dst, mem::temp_alloc_array(char, 4096));
};
$case SMALL:
@pool()
{
return copy_through_buffer(in, dst, mem::temp_array(char, 1024));
return copy_through_buffer(in, dst, mem::temp_alloc_array(char, 1024));
};
$case TINY:
$case NONE:

View File

@@ -16,7 +16,7 @@ struct ByteBuffer (InStream, OutStream)
* max_read defines how many bytes might be kept before its internal buffer is shrinked.
* @require self.bytes.len == 0 "Buffer already initialized."
**/
fn ByteBuffer*! ByteBuffer.init_new(&self, usz max_read, usz initial_capacity = 16, Allocator* allocator = mem::heap()) @deprecated("Replaced by new_init")
fn ByteBuffer*! ByteBuffer.init_new(&self, usz max_read, usz initial_capacity = 16, Allocator* allocator = allocator::heap()) @deprecated("Replaced by new_init")
{
return self.new_init(max_read, initial_capacity, allocator) @inline;
}
@@ -26,7 +26,7 @@ fn ByteBuffer*! ByteBuffer.init_new(&self, usz max_read, usz initial_capacity =
* max_read defines how many bytes might be kept before its internal buffer is shrinked.
* @require self.bytes.len == 0 "Buffer already initialized."
**/
fn ByteBuffer*! ByteBuffer.new_init(&self, usz max_read, usz initial_capacity = 16, Allocator* allocator = mem::heap())
fn ByteBuffer*! ByteBuffer.new_init(&self, usz max_read, usz initial_capacity = 16, Allocator* allocator = allocator::heap())
{
*self = { .allocator = allocator, .max_read = max_read };
initial_capacity = max(initial_capacity, 16);
@@ -41,7 +41,7 @@ fn ByteBuffer*! ByteBuffer.init_temp(&self, usz max_read, usz initial_capacity =
fn ByteBuffer*! ByteBuffer.temp_init(&self, usz max_read, usz initial_capacity = 16)
{
return self.new_init(max_read, initial_capacity, mem::temp());
return self.new_init(max_read, initial_capacity, allocator::temp());
}
/**
@@ -56,7 +56,7 @@ fn ByteBuffer*! ByteBuffer.init_with_buffer(&self, char[] buf)
fn void ByteBuffer.free(&self)
{
if (self.allocator) self.allocator.free(self.bytes);
if (self.allocator) allocator::free(self.allocator, self.bytes);
*self = {};
}
@@ -146,7 +146,7 @@ fn usz! ByteBuffer.available(&self) @inline @dynamic
fn void! ByteBuffer.grow(&self, usz n)
{
n = math::next_power_of_2(n);
char* p = self.allocator.realloc_aligned(self.bytes, n, .alignment = char.alignof)!;
char* p = allocator::realloc_aligned(self.allocator, self.bytes, n, .alignment = char.alignof)!;
self.bytes = p[:n];
}

View File

@@ -1,4 +1,5 @@
module std::io;
import std::math;
struct ByteWriter (OutStream)
{
@@ -13,7 +14,7 @@ struct ByteWriter (OutStream)
* @require self.bytes.len == 0 "Init may not run on on already initialized data"
* @ensure (bool)allocator, self.index == 0
**/
fn ByteWriter* ByteWriter.new_init(&self, Allocator* allocator = mem::heap())
fn ByteWriter* ByteWriter.new_init(&self, Allocator* allocator = allocator::heap())
{
*self = { .bytes = {}, .allocator = allocator };
return self;
@@ -25,7 +26,7 @@ fn ByteWriter* ByteWriter.new_init(&self, Allocator* allocator = mem::heap())
* @require self.bytes.len == 0 "Init may not run on on already initialized data"
* @ensure (bool)allocator, self.index == 0
**/
fn ByteWriter* ByteWriter.init_new(&self, Allocator* allocator = mem::heap()) @deprecated("Replaced by new_init")
fn ByteWriter* ByteWriter.init_new(&self, Allocator* allocator = allocator::heap()) @deprecated("Replaced by new_init")
{
return self.new_init(allocator) @inline;
}
@@ -37,7 +38,7 @@ fn ByteWriter* ByteWriter.init_new(&self, Allocator* allocator = mem::heap()) @d
**/
fn ByteWriter* ByteWriter.temp_init(&self)
{
return self.new_init(mem::temp()) @inline;
return self.new_init(allocator::temp()) @inline;
}
/**
@@ -59,7 +60,7 @@ fn ByteWriter* ByteWriter.init_with_buffer(&self, char[] data)
fn void! ByteWriter.destroy(&self) @dynamic
{
if (!self.allocator) return;
if (void* ptr = self.bytes.ptr) self.allocator.free(ptr);
if (void* ptr = self.bytes.ptr) allocator::free(self.allocator, ptr);
*self = { };
}
@@ -74,7 +75,7 @@ fn void! ByteWriter.ensure_capacity(&self, usz len) @inline
if (!self.allocator) return IoError.OUT_OF_SPACE?;
if (len < 16) len = 16;
usz new_capacity = math::next_power_of_2(len);
char* new_ptr = self.allocator.realloc_checked(self.bytes.ptr, new_capacity)!;
char* new_ptr = allocator::realloc_try(self.allocator, self.bytes.ptr, new_capacity)!;
self.bytes = new_ptr[:new_capacity];
}

View File

@@ -99,6 +99,8 @@ def Quaternionf = Quaternion(<float>);
def Quaternion = Quaternion(<double>);
def quaternionf_identity = quaternion::identity(<float>);
def quaternion_identity = quaternion::identity(<double>);
def QUATERNION_IDENTITY = quaternion::IDENTITY(<double>);
def QUATERNIONF_IDENTITY = quaternion::IDENTITY(<float>);
def Matrix2f = Matrix2x2(<float>);
def Matrix2 = Matrix2x2(<double>);

View File

@@ -401,13 +401,13 @@ fn Matrix4x4 ortho(Real left, Real right, Real top, Real bottom, Real near, Real
fn Matrix4x4 perspective(Real fov, Real aspect_ratio, Real near, Real far)
{
Real f = (Real)math::tan(math::PI * 0.5 - 0.5 * fov);
Real rangeInv = (Real)1.0 / (near - far);
Real range_inv = (Real)1.0 / (near - far);
return {
f / aspect_ratio, 0, 0, 0,
0, f, 0, 0,
0, 0, (near + far) * rangeInv, -1,
0, 0, near * far * rangeInv * 2, 0,
0, 0, (near + far) * range_inv, near * far * range_inv * 2,
0, 0, -1, 0,
};
}

View File

@@ -10,6 +10,7 @@ union Quaternion
}
macro Quaternion identity() => { 0, 0, 0, 1 };
const Quaternion IDENTITY = { 0, 0, 0, 1 };
macro Quaternion Quaternion.add(Quaternion a, Quaternion b) => Quaternion { .v = a.v + b.v };
macro Quaternion Quaternion.add_each(Quaternion a, Real b) => Quaternion { .v = a.v + b };
macro Quaternion Quaternion.sub(Quaternion a, Quaternion b) => Quaternion { .v = a.v - b.v };
@@ -69,30 +70,33 @@ fn Quaternion Quaternion.mul(a, Quaternion b)
macro into_matrix(Quaternion* q, $Type) @private
{
$Type result = { .m = { [0] = 1, [5] = 1, [10] = 1, [15] = 1 } };
Quaternion norm = q.normalize();
Quaternion q_norm = q.normalize();
var ii = norm.i*norm.i;
var jj = norm.j*norm.j;
var kk = norm.k*norm.k;
var ik = norm.i*norm.k;
var ij = norm.i*q.j;
var jk = norm.j*norm.k;
var li = norm.l*norm.i;
var lj = norm.l*norm.j;
var lk = norm.l*norm.k;
var x = q_norm.i;
var y = q_norm.j;
var z = q_norm.k;
var w = q_norm.l;
result.m00 = 1 - 2*(jj + kk);
result.m01 = 2*(ik + lk);
result.m02 = 2*(ij - lj);
var xx = x + x;
var yy = y + y;
var zz = z + z;
result.m10 = 2*(ij - lk);
result.m11 = 1 - 2*(ii + kk);
result.m12 = 2*(jk + li);
var xxx = xx * x;
var xxy = xx * y;
var xxz = xx * z;
result.m20 = 2*(ik + lj);
result.m21 = 2*(jk - li);
result.m22 = 1 - 2*(ii + jj);
var yyy = yy * y;
var yyz = yy * z;
var zzz = zz * z;
return result;
var yyw = yy * w;
var zzw = zz * w;
var xxw = xx * w;
return $Type {
1 - yyy - zzz, xxy - zzw, xxz + yyw, 0,
xxy + zzw, 1 - xxx - zzz, yyz - xxw, 0,
xxz - yyw, yyz + zzw, 1.0 - xxx - yyy, 0,
0, 0, 0, 1,
};
}

View File

@@ -203,10 +203,10 @@ macro matrix_look_at($Type, eye, target, up) @private
var vy = vz.cross(vx);
return $Type {
vx[0], vy[0], vz[0], 0,
vx[1], vy[1], vz[1], 0,
vx[2], vy[2], vz[2], 0,
- vx.dot(eye), - vy.dot(eye), - vz.dot(eye), 1
vx[0], vx[1], vx[2], - vx.dot(eye),
vy[0], vy[1], vy[2], - vy.dot(eye),
vz[0], vz[1], vz[2], - vz.dot(eye),
0, 0, 0, 1
};
}

View File

@@ -1,5 +1,5 @@
module std::math::random;
import std::hash::fnv32a;
import std::hash::fnv32a, std::time;
const ODD_PHI64 @local = 0x9e3779b97f4a7c15;
const MUL_MCG64 @local = 0xf1357aea2e62a9c5;
@@ -26,7 +26,7 @@ fn void seeder(char[] input, char[] out_buffer)
usz out_chars = out_buffer.len;
@pool()
{
ulong[] words = mem::temp_array(ulong, (out_chars + 7) / 8);
ulong[] words = mem::temp_alloc_array(ulong, (out_chars + 7) / 8);
words[..] = ODD_PHI64;
usz words_len_2 = words.len * 2;
@@ -86,7 +86,7 @@ fn char[8 * 4] entropy()
random_int,
hash(clock::now()),
hash(&DString.new_init),
hash(mem::heap())
hash(allocator::heap())
};
return bitcast(entropy_data, char[8 * 4]);
}

View File

@@ -56,7 +56,7 @@ fn usz! InetAddress.to_format(InetAddress* addr, Formatter* formatter) @dynamic
return formatter.printf("%d.%d.%d.%d", addr.ipv4.a, addr.ipv4.b, addr.ipv4.c, addr.ipv4.d)!;
}
fn String InetAddress.to_new_string(InetAddress* addr, Allocator* allocator = mem::heap()) @dynamic
fn String InetAddress.to_new_string(InetAddress* addr, Allocator* allocator = allocator::heap()) @dynamic
{
if (addr.is_ipv6)
{

View File

@@ -1,4 +1,5 @@
module std::net;
import std::io;
fault NetError
{
@@ -56,7 +57,7 @@ fn uint! ipv4toint(String s)
return out;
}
fn String! int_to_new_ipv4(uint val, Allocator* allocator = mem::heap())
fn String! int_to_new_ipv4(uint val, Allocator* allocator = allocator::heap())
{
char[3 * 4 + 3 + 1] buffer;
String res = (String)io::bprintf(&buffer, "%d.%d.%d.%d", val >> 24, (val >> 16) & 0xFF, (val >> 8) & 0xFF, val & 0xFF)!;
@@ -65,5 +66,5 @@ fn String! int_to_new_ipv4(uint val, Allocator* allocator = mem::heap())
fn String! int_to_temp_ipv4(uint val)
{
return int_to_new_ipv4(val, mem::temp());
return int_to_new_ipv4(val, allocator::temp());
}

View File

@@ -1,5 +1,5 @@
module std::net::os @if(env::POSIX && SUPPORTS_INET);
import libc;
import std::io, libc;
const int F_GETFL = 3;
const int F_SETFL = 4;

View File

@@ -1,6 +1,5 @@
module std::net::os @if(env::WIN32);
import std::os::win32;
import libc;
import std::os, std::io, libc;
const AIFamily PLATFORM_AF_IPX = 6;
const AIFamily PLATFORM_AF_APPLETALK = 16;

View File

@@ -1,6 +1,5 @@
module std::net @if(os::SUPPORTS_INET);
import std::io;
import libc;
import std::io, std::os, std::time, libc;
struct Socket (InStream, OutStream)
{

View File

@@ -1,5 +1,5 @@
module std::net @if(os::SUPPORTS_INET);
import libc;
import std::time, libc, std::os;
macro apply_sockoptions(sockfd, options) @private
{

View File

@@ -1,6 +1,6 @@
module std::net::tcp @if(os::SUPPORTS_INET);
import std::net @public;
import libc;
import std::time, libc;
distinct TcpSocket = inline Socket;
distinct TcpServerSocket = inline Socket;

View File

@@ -1,5 +1,5 @@
module std::os::backtrace;
import std::collections::list;
import std::collections::list, std::os, std::io;
fault BacktraceFault
{
@@ -48,9 +48,9 @@ fn usz! Backtrace.to_format(&self, Formatter* formatter) @dynamic
fn void Backtrace.free(&self)
{
if (!self.allocator) return;
self.allocator.free(self.function);
self.allocator.free(self.object_file);
self.allocator.free(self.file);
allocator::free(self.allocator, self.function);
allocator::free(self.allocator, self.object_file);
allocator::free(self.allocator, self.file);
}
fn Backtrace* Backtrace.init(&self, uptr offset, String function, String object_file, String file = "", uint line = 0, Allocator* allocator)

View File

@@ -2,14 +2,14 @@
// Use of this source code is governed by the MIT license
// a copy of which can be found in the LICENSE_STDLIB file.
module std::os::env;
import libc;
import std::io::path, libc, std::os;
/**
* @param [in] name
* @require name.len > 0
* @return! SearchResult.MISSING
**/
fn String! get_var(String name, Allocator* allocator = mem::heap())
fn String! get_var(String name, Allocator* allocator = allocator::heap())
{
@pool(allocator)
{
@@ -38,7 +38,7 @@ fn String! get_var(String name, Allocator* allocator = mem::heap())
fn String! get_var_temp(String name)
{
return get_var(name, mem::temp());
return get_var(name, allocator::temp());
}
/**
@@ -72,7 +72,7 @@ fn bool set_var(String name, String value, bool overwrite = true)
/**
* Returns the current user's home directory.
**/
fn String! get_home_dir(Allocator* using = mem::heap())
fn String! get_home_dir(Allocator* using = allocator::heap())
{
String home;
$if !env::WIN32:
@@ -86,7 +86,7 @@ fn String! get_home_dir(Allocator* using = mem::heap())
/**
* Returns the current user's config directory.
**/
fn Path! get_config_dir(Allocator* allocator = mem::heap())
fn Path! get_config_dir(Allocator* allocator = allocator::heap())
{
@pool(allocator)
{
@@ -126,7 +126,7 @@ fn bool clear_var(String name)
};
}
fn String! executable_path(Allocator *allocator = mem::heap())
fn String! executable_path(Allocator *allocator = allocator::heap())
{
$if env::DARWIN:
return darwin::executable_path(allocator);

View File

@@ -1,8 +1,5 @@
module std::os::linux @if(env::LINUX);
import libc;
import std::os::posix;
import std::io;
import std::collections::list;
import libc, std::os, std::io, std::collections::list;
extern fn isz readlink(ZString path, char* buf, usz bufsize);
@@ -133,7 +130,7 @@ fn ulong! elf_module_image_base(String path) @local
fn Backtrace! backtrace_load_from_exec(void* addr, Allocator* allocator) @local
{
char[] buf = mem::temp_array(char, 1024);
char[] buf = mem::temp_alloc_array(char, 1024);
String exec_path = process::execute_stdout_to_buffer(buf, {"realpath", "-e", string::tformat("/proc/%d/exe", posix::getpid())})!;
String obj_name = exec_path.copy(allocator);
@@ -143,7 +140,7 @@ fn Backtrace! backtrace_load_from_exec(void* addr, Allocator* allocator) @local
fn Backtrace! backtrace_load_from_dlinfo(void* addr, Linux_Dl_info* info, Allocator* allocator) @local
{
char[] buf = mem::temp_array(char, 1024);
char[] buf = mem::temp_alloc_array(char, 1024);
void* obj_addr = addr - (uptr)info.dli_fbase + (uptr)elf_module_image_base(info.dli_fname.str_view())!;
ZString obj_path = info.dli_fname;
@@ -185,7 +182,7 @@ fn Backtrace! backtrace_from_addr2line(void* addr, String addr2line, String obj_
};
}
fn Backtrace! backtrace_load_element(void* addr, Allocator* allocator = mem::heap()) @local
fn Backtrace! backtrace_load_element(void* addr, Allocator* allocator = allocator::heap()) @local
{
if (!addr) return backtrace::BACKTRACE_UNKNOWN;

View File

@@ -1,5 +1,5 @@
module std::os::darwin @if(env::DARWIN);
import std::collections::list;
import std::collections::list, std::os;
const CTL_UNSPEC = 0; /* unused */
const CTL_KERN = 1; /* "high kernel": proc, limits */
@@ -80,7 +80,7 @@ fn uptr! load_address() @local
{
Darwin_segment_command_64* cmd = darwin::getsegbyname("__TEXT");
if (!cmd) return BacktraceFault.SEGMENT_NOT_FOUND?;
String path = env::executable_path(mem::temp()) ?? BacktraceFault.EXECUTABLE_PATH_NOT_FOUND?!;
String path = env::executable_path(allocator::temp()) ?? BacktraceFault.EXECUTABLE_PATH_NOT_FOUND?!;
uint dyld_count = darwin::_dyld_image_count();
for (uint i = 0; i < dyld_count; i++)
{
@@ -93,7 +93,7 @@ fn uptr! load_address() @local
}
fn Backtrace! backtrace_load_element(String execpath, void* buffer, void* load_address, Allocator* allocator = mem::heap()) @local
fn Backtrace! backtrace_load_element(String execpath, void* buffer, void* load_address, Allocator* allocator = allocator::heap()) @local
{
@pool(allocator)
{
@@ -147,7 +147,7 @@ fn BacktraceList! symbolize_backtrace(void*[] backtrace, Allocator* allocator)
}
@pool(allocator)
{
String execpath = executable_path(mem::temp())!;
String execpath = executable_path(allocator::temp())!;
foreach (addr : backtrace)
{
list.append(backtrace_load_element(execpath, addr, load_addr, allocator) ?? backtrace::BACKTRACE_UNKNOWN);

View File

@@ -26,7 +26,7 @@ macro Class! class_by_name(char* c)
return cls;
}
macro Class[] class_get_list(Allocator *allocator = mem::heap())
macro Class[] class_get_list(Allocator *allocator = allocator::heap())
{
int num_classes = _macos_objc_getClassList(null, 0);
if (!num_classes) return {};

View File

@@ -1,4 +1,5 @@
module std::os::posix @if(env::POSIX);
import std::thread;
import libc;
const PTHREAD_MUTEX_NORMAL = 0;

View File

@@ -1,6 +1,5 @@
module std::os::process @if(env::WIN32 || env::POSIX);
import std::io::file;
import libc;
import std::io, libc, std::os;
// This code is based on https://github.com/sheredom/subprocess.h
@@ -247,7 +246,7 @@ fn SubProcess! create(String[] command_line, SubProcessOptions options = {}, Str
**/
fn ZString* tcopy_command_line(String[] command_line) @local @inline @if(env::POSIX)
{
ZString* copy = mem::temp_array(ZString, command_line.len + 1);
ZString* copy = mem::temp_alloc_array(ZString, command_line.len + 1);
foreach (i, str : command_line)
{
copy[i] = str.zstr_tcopy();
@@ -260,7 +259,7 @@ const ZString[1] EMPTY_ENVIRONMENT @if(env::POSIX) = { null };
fn ZString* tcopy_env(String[] environment) @local @inline @if(env::POSIX)
{
if (!environment.len) return &EMPTY_ENVIRONMENT;
ZString* copy = mem::temp_array(ZString, environment.len + 1);
ZString* copy = mem::temp_alloc_array(ZString, environment.len + 1);
copy[environment.len] = null;
foreach (i, str : environment)
{

View File

@@ -1,4 +1,5 @@
module std::os::win32 @if(env::WIN32);
import std::thread, std::os::backtrace;
const Win32_DWORD STARTF_USESTDHANDLES = 0x00000100;
const Win32_DWORD CREATE_NO_WINDOW = 0x08000000;

View File

@@ -1,6 +1,5 @@
module std::threads::os @if(env::POSIX);
import std::os::posix;
import libc;
module std::thread::os @if(env::POSIX);
import std::os::posix, std::time, libc;
struct NativeMutex
{
@@ -152,8 +151,7 @@ fn void* callback(void* arg) @private
fn void! NativeThread.create(&thread, ThreadFn thread_fn, void* arg)
{
PosixThreadData *thread_data = mem::new(PosixThreadData);
*thread_data = { .thread_fn = thread_fn, .arg = arg };
PosixThreadData *thread_data = mem::new(PosixThreadData, { .thread_fn = thread_fn, .arg = arg });
if (posix::pthread_create(thread, null, &callback, thread_data) != 0)
{
*thread = null;

View File

@@ -1,5 +1,5 @@
module std::thread::os @if(env::WIN32);
import std::os::win32;
import std::os::win32, std::time;
distinct NativeThread = inline Win32_HANDLE;

View File

@@ -1,5 +1,6 @@
module std::thread;
import std::thread::os;
import std::time;
distinct MutexType = int;

View File

@@ -1,4 +1,5 @@
module std::time;
import std::io;
distinct Time = long;
distinct Duration = long;