Added $$atomic_store and $$atomic_load.

This commit is contained in:
Christoffer Lerno
2023-02-17 13:22:12 +01:00
parent a0a5c940f1
commit baa2e474b5
12 changed files with 190 additions and 18 deletions

View File

@@ -18,12 +18,42 @@ enum AtomicOrdering : int
NOT_ATOMIC, // Not atomic
UNORDERED, // No lock
MONOTONIC, // Consistent ordering
AQUIRE, // Barrier locking load/store
ACQUIRE, // Barrier locking load/store
RELEASE, // Barrier releasing load/store
ACQUIRE_RELEASE, // Barrier fence to load/store
SEQ_CONSISTENT, // Acquire semantics, ordered with other seq_consistent
}
/**
* @param [in] x "the variable or dereferenced pointer to load."
* @param $ordering "atomic ordering of the load, defaults to SEQ_CONSISTENT"
* @param $volatile "whether the load should be volatile, defaults to 'false'"
* @return "returns the value of x"
*
* @require $ordering != AtomicOrdering.RELEASE "Release ordering is not valid for load."
* @require $ordering != AtomicOrdering.ACQUIRE_RELEASE "Acquire release is not valid for load."
* @require types::may_load_atomic($typeof(x)) "Only integer, float and pointers may be used."
**/
macro @atomic_load(&x, AtomicOrdering $ordering = SEQ_CONSISTENT, $volatile = false) @builtin
{
return $$atomic_load(&x, $volatile, (int)$ordering);
}
/**
* @param [out] x "the variable or dereferenced pointer to store to."
* @param value "the value to store."
* @param $ordering "the atomic ordering of the store, defaults to SEQ_CONSISTENT"
* @param $volatile "whether the store should be volatile, defaults to 'false'"
*
* @require $ordering != AtomicOrdering.ACQUIRE "Acquire ordering is not valid for store."
* @require $ordering != AtomicOrdering.ACQUIRE_RELEASE "Acquire release is not valid for store."
* @require types::may_load_atomic($typeof(x)) "Only integer, float and pointers may be used."
**/
macro void @atomic_store(&x, value, AtomicOrdering $ordering = SEQ_CONSISTENT, $volatile = false) @builtin
{
$$atomic_store(&x, value, $volatile, (int)$ordering);
}
macro compare_exchange(ptr, compare, value, AtomicOrdering $success, AtomicOrdering $failure, bool $volatile = true, bool $weak = false, usz $alignment = 0)
{
return $$compare_exchange(ptr, compare, value, $volatile, $weak, $success.ordinal, $failure.ordinal, $alignment);

View File

@@ -183,6 +183,21 @@ macro bool @has_same(#a, #b, ...)
return true;
}
macro bool may_load_atomic($Type)
{
$switch ($Type.kindof):
$case SIGNED_INT:
$case UNSIGNED_INT:
$case POINTER:
$case FLOAT:
return true;
$case DISTINCT:
return may_load_atomic($Type.inner);
$default:
return false;
$endswitch;
}
macro bool is_promotable_to_floatlike($Type) => types::is_floatlike($Type) || types::is_int($Type);

View File

@@ -826,6 +826,8 @@ typedef enum
{
BUILTIN_ABS,
BUILTIN_ATOMIC_LOAD,
BUILTIN_ATOMIC_STORE,
BUILTIN_BITREVERSE,
BUILTIN_BSWAP,
BUILTIN_CEIL,

View File

@@ -157,6 +157,33 @@ INLINE void llvm_emit_volatile_load(GenContext *c, BEValue *result_value, Expr *
LLVMSetVolatile(result_value->value, true);
}
INLINE void llvm_emit_atomic_store(GenContext *c, BEValue *result_value, Expr *expr)
{
BEValue value;
llvm_emit_expr(c, &value, expr->call_expr.arguments[0]);
llvm_emit_expr(c, result_value, expr->call_expr.arguments[1]);
llvm_value_rvalue(c, &value);
value.kind = BE_ADDRESS;
BEValue store_value = *result_value;
LLVMValueRef store = llvm_store(c, &value, &store_value);
if (store)
{
if (expr->call_expr.arguments[2]->const_expr.b) LLVMSetVolatile(store, true);
LLVMSetOrdering(store, llvm_atomic_ordering(expr->call_expr.arguments[3]->const_expr.ixx.i.low));
}
}
INLINE void llvm_emit_atomic_load(GenContext *c, BEValue *result_value, Expr *expr)
{
llvm_emit_expr(c, result_value, expr->call_expr.arguments[0]);
llvm_value_rvalue(c, result_value);
result_value->kind = BE_ADDRESS;
result_value->type = type_lowering(result_value->type->pointer);
llvm_value_rvalue(c, result_value);
if (expr->call_expr.arguments[1]->const_expr.b) LLVMSetVolatile(result_value->value, true);
LLVMSetOrdering(result_value->value, llvm_atomic_ordering(expr->call_expr.arguments[2]->const_expr.ixx.i.low));
}
static inline LLVMValueRef llvm_syscall_asm(GenContext *c, LLVMTypeRef func_type, char *call)
{
return LLVMGetInlineAsm(func_type, call, strlen(call),
@@ -634,6 +661,12 @@ void llvm_emit_builtin_call(GenContext *c, BEValue *result_value, Expr *expr)
case BUILTIN_VOLATILE_LOAD:
llvm_emit_volatile_load(c, result_value, expr);
return;
case BUILTIN_ATOMIC_STORE:
llvm_emit_atomic_store(c, result_value, expr);
return;
case BUILTIN_ATOMIC_LOAD:
llvm_emit_atomic_load(c, result_value, expr);
return;
case BUILTIN_SYSCALL:
llvm_emit_syscall(c, result_value, expr);
return;

View File

@@ -5,7 +5,6 @@
#include "llvm_codegen_internal.h"
#include <math.h>
static LLVMAtomicOrdering llvm_atomic_ordering(Atomicity atomicity);
static LLVMValueRef llvm_emit_coerce_alignment(GenContext *c, BEValue *be_value, LLVMTypeRef coerce_type, AlignSize target_alignment, AlignSize *resulting_alignment);
static bool bitstruct_requires_bitswap(Decl *decl);
static inline LLVMValueRef llvm_const_high_bitmask(GenContext *c, LLVMTypeRef type, int type_bits, int high_bits);
@@ -4894,20 +4893,6 @@ static void llvm_emit_splatted_variadic_arg(GenContext *c, Expr *expr, Type *var
}
static LLVMAtomicOrdering llvm_atomic_ordering(Atomicity atomicity)
{
switch (atomicity)
{
case ATOMIC_NONE: return LLVMAtomicOrderingNotAtomic;
case ATOMIC_UNORDERED: return LLVMAtomicOrderingUnordered;
case ATOMIC_RELAXED: return LLVMAtomicOrderingMonotonic;
case ATOMIC_ACQUIRE: return LLVMAtomicOrderingAcquire;
case ATOMIC_RELEASE: return LLVMAtomicOrderingRelease;
case ATOMIC_ACQUIRE_RELEASE: return LLVMAtomicOrderingAcquireRelease;
case ATOMIC_SEQ_CONSISTENT: return LLVMAtomicOrderingSequentiallyConsistent;
}
UNREACHABLE
}

View File

@@ -52,3 +52,18 @@ LLVMValueRef llvm_emit_shl_fixed(GenContext *c, LLVMValueRef data, int shift)
if (shift >= bit_width) return llvm_get_zero_raw(type);
return llvm_emit_shl(c, data, LLVMConstInt(type, (unsigned)shift, false));
}
LLVMAtomicOrdering llvm_atomic_ordering(Atomicity atomicity)
{
switch (atomicity)
{
case ATOMIC_NONE: return LLVMAtomicOrderingNotAtomic;
case ATOMIC_UNORDERED: return LLVMAtomicOrderingUnordered;
case ATOMIC_RELAXED: return LLVMAtomicOrderingMonotonic;
case ATOMIC_ACQUIRE: return LLVMAtomicOrderingAcquire;
case ATOMIC_RELEASE: return LLVMAtomicOrderingRelease;
case ATOMIC_ACQUIRE_RELEASE: return LLVMAtomicOrderingAcquireRelease;
case ATOMIC_SEQ_CONSISTENT: return LLVMAtomicOrderingSequentiallyConsistent;
}
UNREACHABLE
}

View File

@@ -476,6 +476,7 @@ void llvm_emit_debug_local_var(GenContext *c, Decl *var);
void llvm_emit_debug_global_var(GenContext *c, Decl *global);
#define EMIT_LOC(c, x) do { if (c->debug.builder) llvm_emit_debug_location(c, x->span); } while (0);
LLVMAtomicOrdering llvm_atomic_ordering(Atomicity atomicity);
// Implementations

View File

@@ -636,6 +636,36 @@ bool sema_expr_analyse_builtin_call(SemaContext *context, Expr *expr)
if (!sema_check_builtin_args_match(args, arg_count)) return false;
rtype = args[0]->type;
break;
case BUILTIN_ATOMIC_LOAD:
{
if (!sema_check_builtin_args(args, (BuiltinArg[]){ BA_POINTER, BA_BOOL, BA_INTEGER }, 3)) return false;
Type *original = type_flatten(args[0]->type);
if (original == type_voidptr)
{
SEMA_ERROR(args[0], "Expected a typed pointer.");
return false;
}
if (!expr_is_const(args[1]))
{
SEMA_ERROR(args[1], "'is_volatile' must be a compile time constant.");
return false;
}
if (!expr_is_const(args[2]))
{
SEMA_ERROR(args[2], "Ordering must be a compile time constant.");
return false;
}
if (!is_valid_atomicity(args[2])) return false;
switch (expr->const_expr.ixx.i.low)
{
case ATOMIC_ACQUIRE_RELEASE:
case ATOMIC_RELEASE:
SEMA_ERROR(args[2], "'release' and 'acquire release' are not valid for atomic loads.");
return false;
}
rtype = original->pointer;
break;
}
case BUILTIN_VOLATILE_LOAD:
{
if (!sema_check_builtin_args(args, (BuiltinArg[]) { BA_POINTER }, 1)) return false;
@@ -659,6 +689,36 @@ bool sema_expr_analyse_builtin_call(SemaContext *context, Expr *expr)
rtype = args[1]->type;
break;
}
case BUILTIN_ATOMIC_STORE:
{
if (!sema_check_builtin_args(args, (BuiltinArg[]) { BA_POINTER }, 1)) return false;
if (!sema_check_builtin_args(&args[2], (BuiltinArg[]) { BA_BOOL, BA_INTEGER }, 2)) return false;
Type *original = type_flatten(args[0]->type);
if (original != type_voidptr)
{
if (!cast_implicit(context, args[1], original->pointer)) return false;
}
if (!expr_is_const(args[2]))
{
SEMA_ERROR(args[2], "'is_volatile' must be a compile time constant.");
return false;
}
if (!expr_is_const(args[3]))
{
SEMA_ERROR(args[3], "Ordering must be a compile time constant.");
return false;
}
if (!is_valid_atomicity(args[3])) return false;
switch (expr->const_expr.ixx.i.low)
{
case ATOMIC_ACQUIRE_RELEASE:
case ATOMIC_ACQUIRE:
SEMA_ERROR(args[2], "'acquire' and 'acquire release' are not valid for atomic stores.");
return false;
}
rtype = args[1]->type;
break;
}
case BUILTIN_NONE:
case BUILTIN_COMPARE_EXCHANGE:
case BUILTIN_FRAMEADDRESS:
@@ -764,7 +824,10 @@ static inline int builtin_expected_args(BuiltinFunction func)
case BUILTIN_OVERFLOW_MUL:
case BUILTIN_OVERFLOW_SUB:
case BUILTIN_PREFETCH:
case BUILTIN_ATOMIC_LOAD:
return 3;
case BUILTIN_ATOMIC_STORE:
return 4;
case BUILTIN_MEMCOPY:
case BUILTIN_MEMCOPY_INLINE:
case BUILTIN_MEMMOVE:

View File

@@ -187,6 +187,8 @@ void symtab_init(uint32_t capacity)
type_property_list[TYPE_PROPERTY_VALUES] = KW_DEF("values");
builtin_list[BUILTIN_ABS] = KW_DEF("abs");
builtin_list[BUILTIN_ATOMIC_LOAD] = KW_DEF("atomic_load");
builtin_list[BUILTIN_ATOMIC_STORE] = KW_DEF("atomic_store");
builtin_list[BUILTIN_BITREVERSE] = KW_DEF("bitreverse");
builtin_list[BUILTIN_BSWAP] = KW_DEF("bswap");
builtin_list[BUILTIN_CEIL] = KW_DEF("ceil");

View File

@@ -1 +1 @@
#define COMPILER_VERSION "0.4.69"
#define COMPILER_VERSION "0.4.70"

View File

@@ -0,0 +1,26 @@
// #target: macos-x64
module test;
import std::io;
struct Ghh
{
int a;
int b;
int c;
}
fn void main()
{
int a = 111;
int x = @atomic_load(a);
int y = @atomic_load(a, MONOTONIC, true);
@atomic_store(a, 123 + x);
@atomic_store(a, 33 + y, MONOTONIC, true);
io::printfn("%d", a);
}
/* #expect: test.ll
%0 = load atomic i32, ptr %a seq_cst, align 4
%1 = load atomic volatile i32, ptr %a monotonic, align 4
store atomic i32 %3, ptr %a seq_cst, align 4
store atomic volatile i32 %5, ptr %a monotonic, align 4

View File

@@ -288,7 +288,7 @@ after_check51: ; preds = %if.then
panic_block: ; preds = %assign_optional
%85 = load ptr, ptr @std_core_builtin_panic, align 8
call void %85(ptr @.panic_msg, i64 27, ptr @.file, i64 6, ptr @.func, i64 4, i32 280)
call void %85(ptr @.panic_msg, i64 27, ptr @.file, i64 6, ptr @.func
unreachable
noerr_block: ; preds = %after_check51