mirror of
https://github.com/c3lang/c3c.git
synced 2026-02-27 12:01:16 +00:00
Add thread::fence (from $$fence builtin). Ref and RefCounted types.
This commit is contained in:
132
lib/std/core/refcount.c3
Normal file
132
lib/std/core/refcount.c3
Normal file
@@ -0,0 +1,132 @@
|
||||
<*
|
||||
Ref provides a general *external* ref counted wrapper for a pointer. For convenience, a ref count of 0
|
||||
means the reference is still valid.
|
||||
|
||||
When the rc drops to -1, it will first run the dealloc function on the underlying pointer (if it exists),
|
||||
then free the pointer and the atomic variable assuming that they are allocated using the Allocator in the Ref.
|
||||
|
||||
@require !$defined(Type.dealloc) ||| $defined(Type.dealloc(&&(Type){})) : "'dealloc' must only take a pointer to the underlying type"
|
||||
@require !$defined(Type.dealloc) ||| @typeis((Type){}.dealloc(), void) : "'dealloc' must return 'void'"
|
||||
*>
|
||||
module std::core::mem::ref { Type };
|
||||
import std::thread, std::atomic;
|
||||
|
||||
const OVERALIGNED @private = Type.alignof > mem::DEFAULT_MEM_ALIGNMENT;
|
||||
|
||||
alias DeallocFn = fn void(void*);
|
||||
|
||||
fn Ref wrap(Type* ptr, Allocator allocator = mem)
|
||||
{
|
||||
return { .refcount = allocator::new(allocator, Atomic{int}), .ptr = ptr, .allocator = allocator };
|
||||
}
|
||||
<*
|
||||
@require $vacount < 2 : "Too many arguments."
|
||||
@require $vacount == 0 ||| @assignable_to($vaexpr[0], Type) : "The first argument must be an initializer for the type"
|
||||
*>
|
||||
macro Ref new(..., Allocator allocator = mem)
|
||||
{
|
||||
|
||||
$switch:
|
||||
$case OVERALIGNED && !$vacount:
|
||||
Type* ptr = allocator::calloc_aligned(allocator, Type.sizeof, Type.alignof)!!;
|
||||
$case OVERALIGNED:
|
||||
Type* ptr = allocator::malloc_aligned(allocator, Type.sizeof, Type.alignof)!!;
|
||||
*ptr = $vaexpr[0];
|
||||
$case !$vacount:
|
||||
Type* ptr = allocator::calloc(allocator, Type.sizeof);
|
||||
$default:
|
||||
Type* ptr = allocator::malloc(allocator, Type.sizeof);
|
||||
*ptr = $vaexpr[0];
|
||||
$endswitch
|
||||
return { .refcount = allocator::new(allocator, Atomic{int}),
|
||||
.ptr = ptr,
|
||||
.allocator = allocator };
|
||||
}
|
||||
|
||||
struct Ref
|
||||
{
|
||||
Atomic{int}* refcount;
|
||||
Type* ptr;
|
||||
Allocator allocator;
|
||||
}
|
||||
|
||||
fn Ref* Ref.retain(&self)
|
||||
{
|
||||
assert(self.refcount != null, "Reference already released");
|
||||
assert(self.refcount.load(RELAXED) >= 0, "Retaining zombie");
|
||||
self.refcount.add(1, RELAXED);
|
||||
return self;
|
||||
}
|
||||
|
||||
fn void Ref.release(&self)
|
||||
{
|
||||
assert(self.refcount != null, "Reference already released");
|
||||
assert(self.refcount.load(RELAXED) >= 0, "Overrelease of refcount");
|
||||
if (self.refcount.sub(1, RELAXED) == 0)
|
||||
{
|
||||
thread::fence(ACQUIRE);
|
||||
$if $defined(Type.dealloc):
|
||||
self.ptr.dealloc();
|
||||
$endif
|
||||
$if OVERALIGNED:
|
||||
allocator::free_aligned(self.allocator, self.ptr);
|
||||
$else
|
||||
allocator::free(self.allocator, self.ptr);
|
||||
$endif
|
||||
allocator::free(self.allocator, self.refcount);
|
||||
*self = {};
|
||||
}
|
||||
}
|
||||
|
||||
module std::core::mem::rc;
|
||||
import std::thread, std::atomic;
|
||||
|
||||
<*
|
||||
A RefCounted struct should be an inline base of a struct.
|
||||
If a `dealloc` is defined, then it will be called rather than `free`
|
||||
|
||||
For convenience, a ref count of 0 is still valid, and the struct is
|
||||
only freed when when ref count drops to -1.
|
||||
|
||||
The macros rc::retain and rc::release must be used on the full pointer,
|
||||
not on the RefCounted substruct.
|
||||
|
||||
So `Foo* f = ...; RefCounted* rc = f; rc::release(rc);` will not do the right thing.
|
||||
*>
|
||||
struct RefCounted
|
||||
{
|
||||
Atomic{int} refcount;
|
||||
}
|
||||
|
||||
<*
|
||||
@require @assignable_to(refcounted, RefCounted*) : "Expected a ref counted value"
|
||||
*>
|
||||
macro retain(refcounted)
|
||||
{
|
||||
if (refcounted)
|
||||
{
|
||||
assert(refcounted.refcount.load(RELAXED) >= 0, "Retaining zombie");
|
||||
refcounted.refcount.add(1, RELAXED);
|
||||
}
|
||||
return refcounted;
|
||||
}
|
||||
|
||||
<*
|
||||
@require @assignable_to(refcounted, RefCounted*) : "Expected a ref counted value"
|
||||
@require !$defined(refcounted.dealloc()) ||| @typeis(refcounted.dealloc(), void)
|
||||
: "Expected refcounted type to have a valid dealloc"
|
||||
*>
|
||||
macro void release(refcounted)
|
||||
{
|
||||
if (!refcounted) return;
|
||||
assert(refcounted.refcount.load(RELAXED) >= 0, "Overrelease of refcount");
|
||||
if (refcounted.refcount.sub(1, RELAXED) == 0)
|
||||
{
|
||||
thread::fence(ACQUIRE);
|
||||
$if $defined(refcounted.dealloc):
|
||||
refcounted.dealloc();
|
||||
$else
|
||||
free(refcounted);
|
||||
$endif
|
||||
}
|
||||
}
|
||||
@@ -48,6 +48,11 @@ macro void? TimedMutex.lock_timeout(&mutex, ulong ms) => NativeTimedMutex.lock_t
|
||||
macro bool TimedMutex.try_lock(&mutex) => NativeTimedMutex.try_lock((NativeTimedMutex*)mutex);
|
||||
macro void? TimedMutex.unlock(&mutex) => NativeTimedMutex.unlock((NativeTimedMutex*)mutex);
|
||||
|
||||
macro void fence(AtomicOrdering $ordering) @safemacro
|
||||
{
|
||||
$$fence($ordering.ordinal);
|
||||
}
|
||||
|
||||
macro void Mutex.@in_lock(&mutex; @body)
|
||||
{
|
||||
(void)mutex.lock();
|
||||
|
||||
@@ -29,6 +29,7 @@
|
||||
- Add komihash, a5hash, metrohash64, metrohash128, and wyhash2 variants with tests/benchmark. #2293
|
||||
- '$assignable' is deprecated.
|
||||
- Deprecate allocator::heap() and allocator::temp()
|
||||
- Add `thread::fence` providing a thread fence.
|
||||
|
||||
### Fixes
|
||||
- mkdir/rmdir would not work properly with substring paths on non-windows platforms.
|
||||
@@ -83,6 +84,7 @@
|
||||
- Added `@addr` macro.
|
||||
- Add `ConditionVariable.wait_until` and `ConditionVariable.wait_for`
|
||||
- Added readline_to_stream that takes a stream.
|
||||
- Added `Ref` and `RefCounted` experimental functionality.
|
||||
|
||||
## 0.7.3 Change list
|
||||
|
||||
|
||||
@@ -698,6 +698,7 @@ typedef struct
|
||||
bool print_stats;
|
||||
bool old_slice_copy;
|
||||
bool old_enums;
|
||||
bool single_threaded;
|
||||
int build_threads;
|
||||
TrustLevel trust_level;
|
||||
OptimizationSetting optsetting;
|
||||
|
||||
@@ -451,6 +451,7 @@ typedef enum
|
||||
BUILTIN_EXP2,
|
||||
BUILTIN_EXPECT,
|
||||
BUILTIN_EXPECT_WITH_PROBABILITY,
|
||||
BUILTIN_FENCE,
|
||||
BUILTIN_FLOOR,
|
||||
BUILTIN_FMA,
|
||||
BUILTIN_FMULADD,
|
||||
|
||||
@@ -157,6 +157,12 @@ INLINE void llvm_emit_atomic_store(GenContext *c, BEValue *result_value, Expr *e
|
||||
}
|
||||
}
|
||||
|
||||
INLINE void llvm_emit_fence(GenContext *c, BEValue *result_value, Expr *expr)
|
||||
{
|
||||
LLVMValueRef value = LLVMBuildFence(c->builder, llvm_atomic_ordering(expr->call_expr.arguments[0]->const_expr.ixx.i.low), compiler.build.single_threaded, "");
|
||||
llvm_value_set(result_value, value, type_void);
|
||||
}
|
||||
|
||||
INLINE void llvm_emit_unaligned_store(GenContext *c, BEValue *result_value, Expr *expr)
|
||||
{
|
||||
bool emit_check = c->emitting_load_store_check;
|
||||
@@ -773,6 +779,9 @@ void llvm_emit_builtin_call(GenContext *c, BEValue *result_value, Expr *expr)
|
||||
case BUILTIN_VOLATILE_LOAD:
|
||||
llvm_emit_volatile_load(c, result_value, expr);
|
||||
return;
|
||||
case BUILTIN_FENCE:
|
||||
llvm_emit_fence(c, result_value, expr);
|
||||
return;
|
||||
case BUILTIN_ATOMIC_STORE:
|
||||
llvm_emit_atomic_store(c, result_value, expr);
|
||||
return;
|
||||
|
||||
@@ -1006,6 +1006,22 @@ bool sema_expr_analyse_builtin_call(SemaContext *context, Expr *expr)
|
||||
if (!sema_check_builtin_args_match(context, args, 3)) return false;
|
||||
rtype = args[0]->type;
|
||||
break;
|
||||
case BUILTIN_FENCE:
|
||||
ASSERT(arg_count == 1);
|
||||
if (!sema_check_builtin_args(context, args, (BuiltinArg[]) {BA_INTEGER}, 1)) return false;
|
||||
if (!sema_cast_const(args[0])) RETURN_SEMA_ERROR(args[0], "Ordering must be a compile time constant.");
|
||||
if (!is_valid_atomicity(context, args[0])) return false;
|
||||
switch (args[0]->const_expr.ixx.i.low)
|
||||
{
|
||||
case ATOMIC_NONE:
|
||||
case ATOMIC_RELAXED:
|
||||
case ATOMIC_UNORDERED:
|
||||
RETURN_SEMA_ERROR(args[0], "'none', 'relaxed' and 'unordered' are not valid for fence.");
|
||||
default:
|
||||
break;
|
||||
}
|
||||
rtype = type_void;
|
||||
break;
|
||||
case BUILTIN_ATOMIC_LOAD:
|
||||
{
|
||||
ASSERT(arg_count == 3);
|
||||
@@ -1228,6 +1244,7 @@ static inline int builtin_expected_args(BuiltinFunction func)
|
||||
case BUILTIN_EXACT_NEG:
|
||||
case BUILTIN_EXP2:
|
||||
case BUILTIN_EXP:
|
||||
case BUILTIN_FENCE:
|
||||
case BUILTIN_FLOOR:
|
||||
case BUILTIN_FRAMEADDRESS:
|
||||
case BUILTIN_LLRINT:
|
||||
|
||||
@@ -224,6 +224,7 @@ void symtab_init(uint32_t capacity)
|
||||
builtin_list[BUILTIN_EXP2] = KW_DEF("exp2");
|
||||
builtin_list[BUILTIN_EXPECT] = KW_DEF("expect");
|
||||
builtin_list[BUILTIN_EXPECT_WITH_PROBABILITY] = KW_DEF("expect_with_probability");
|
||||
builtin_list[BUILTIN_FENCE] = KW_DEF("fence");
|
||||
builtin_list[BUILTIN_FLOOR] = KW_DEF("floor");
|
||||
builtin_list[BUILTIN_FMA] = KW_DEF("fma");
|
||||
builtin_list[BUILTIN_FMULADD] = KW_DEF("fmuladd");
|
||||
|
||||
Reference in New Issue
Block a user