From baa2e474b5e7daf31504afbb09e075f904da9ff7 Mon Sep 17 00:00:00 2001 From: Christoffer Lerno Date: Fri, 17 Feb 2023 13:22:12 +0100 Subject: [PATCH] Added $$atomic_store and $$atomic_load. --- lib/std/core/mem.c3 | 32 +++++++++- lib/std/core/types.c3 | 15 +++++ src/compiler/enums.h | 2 + src/compiler/llvm_codegen_builtins.c | 33 ++++++++++ src/compiler/llvm_codegen_expr.c | 15 ----- src/compiler/llvm_codegen_instr.c | 15 +++++ src/compiler/llvm_codegen_internal.h | 1 + src/compiler/sema_builtins.c | 63 +++++++++++++++++++ src/compiler/symtab.c | 2 + src/version.h | 2 +- .../concurrency/atomic_load_store.c3t | 26 ++++++++ test/test_suite/stdlib/map.c3t | 2 +- 12 files changed, 190 insertions(+), 18 deletions(-) create mode 100644 test/test_suite/concurrency/atomic_load_store.c3t diff --git a/lib/std/core/mem.c3 b/lib/std/core/mem.c3 index ac02a4721..cab206cbb 100644 --- a/lib/std/core/mem.c3 +++ b/lib/std/core/mem.c3 @@ -18,12 +18,42 @@ enum AtomicOrdering : int NOT_ATOMIC, // Not atomic UNORDERED, // No lock MONOTONIC, // Consistent ordering - AQUIRE, // Barrier locking load/store + ACQUIRE, // Barrier locking load/store RELEASE, // Barrier releasing load/store ACQUIRE_RELEASE, // Barrier fence to load/store SEQ_CONSISTENT, // Acquire semantics, ordered with other seq_consistent } +/** + * @param [in] x "the variable or dereferenced pointer to load." + * @param $ordering "atomic ordering of the load, defaults to SEQ_CONSISTENT" + * @param $volatile "whether the load should be volatile, defaults to 'false'" + * @return "returns the value of x" + * + * @require $ordering != AtomicOrdering.RELEASE "Release ordering is not valid for load." + * @require $ordering != AtomicOrdering.ACQUIRE_RELEASE "Acquire release is not valid for load." + * @require types::may_load_atomic($typeof(x)) "Only integer, float and pointers may be used." + **/ +macro @atomic_load(&x, AtomicOrdering $ordering = SEQ_CONSISTENT, $volatile = false) @builtin +{ + return $$atomic_load(&x, $volatile, (int)$ordering); +} + +/** + * @param [out] x "the variable or dereferenced pointer to store to." + * @param value "the value to store." + * @param $ordering "the atomic ordering of the store, defaults to SEQ_CONSISTENT" + * @param $volatile "whether the store should be volatile, defaults to 'false'" + * + * @require $ordering != AtomicOrdering.ACQUIRE "Acquire ordering is not valid for store." + * @require $ordering != AtomicOrdering.ACQUIRE_RELEASE "Acquire release is not valid for store." + * @require types::may_load_atomic($typeof(x)) "Only integer, float and pointers may be used." + **/ +macro void @atomic_store(&x, value, AtomicOrdering $ordering = SEQ_CONSISTENT, $volatile = false) @builtin +{ + $$atomic_store(&x, value, $volatile, (int)$ordering); +} + macro compare_exchange(ptr, compare, value, AtomicOrdering $success, AtomicOrdering $failure, bool $volatile = true, bool $weak = false, usz $alignment = 0) { return $$compare_exchange(ptr, compare, value, $volatile, $weak, $success.ordinal, $failure.ordinal, $alignment); diff --git a/lib/std/core/types.c3 b/lib/std/core/types.c3 index 682c80a11..e365e4686 100644 --- a/lib/std/core/types.c3 +++ b/lib/std/core/types.c3 @@ -183,6 +183,21 @@ macro bool @has_same(#a, #b, ...) return true; } +macro bool may_load_atomic($Type) +{ + $switch ($Type.kindof): + $case SIGNED_INT: + $case UNSIGNED_INT: + $case POINTER: + $case FLOAT: + return true; + $case DISTINCT: + return may_load_atomic($Type.inner); + $default: + return false; + $endswitch; +} + macro bool is_promotable_to_floatlike($Type) => types::is_floatlike($Type) || types::is_int($Type); diff --git a/src/compiler/enums.h b/src/compiler/enums.h index c602ef44f..995003843 100644 --- a/src/compiler/enums.h +++ b/src/compiler/enums.h @@ -826,6 +826,8 @@ typedef enum { BUILTIN_ABS, + BUILTIN_ATOMIC_LOAD, + BUILTIN_ATOMIC_STORE, BUILTIN_BITREVERSE, BUILTIN_BSWAP, BUILTIN_CEIL, diff --git a/src/compiler/llvm_codegen_builtins.c b/src/compiler/llvm_codegen_builtins.c index 2f7955130..65f8fb428 100644 --- a/src/compiler/llvm_codegen_builtins.c +++ b/src/compiler/llvm_codegen_builtins.c @@ -157,6 +157,33 @@ INLINE void llvm_emit_volatile_load(GenContext *c, BEValue *result_value, Expr * LLVMSetVolatile(result_value->value, true); } +INLINE void llvm_emit_atomic_store(GenContext *c, BEValue *result_value, Expr *expr) +{ + BEValue value; + llvm_emit_expr(c, &value, expr->call_expr.arguments[0]); + llvm_emit_expr(c, result_value, expr->call_expr.arguments[1]); + llvm_value_rvalue(c, &value); + value.kind = BE_ADDRESS; + BEValue store_value = *result_value; + LLVMValueRef store = llvm_store(c, &value, &store_value); + if (store) + { + if (expr->call_expr.arguments[2]->const_expr.b) LLVMSetVolatile(store, true); + LLVMSetOrdering(store, llvm_atomic_ordering(expr->call_expr.arguments[3]->const_expr.ixx.i.low)); + } +} + +INLINE void llvm_emit_atomic_load(GenContext *c, BEValue *result_value, Expr *expr) +{ + llvm_emit_expr(c, result_value, expr->call_expr.arguments[0]); + llvm_value_rvalue(c, result_value); + result_value->kind = BE_ADDRESS; + result_value->type = type_lowering(result_value->type->pointer); + llvm_value_rvalue(c, result_value); + if (expr->call_expr.arguments[1]->const_expr.b) LLVMSetVolatile(result_value->value, true); + LLVMSetOrdering(result_value->value, llvm_atomic_ordering(expr->call_expr.arguments[2]->const_expr.ixx.i.low)); +} + static inline LLVMValueRef llvm_syscall_asm(GenContext *c, LLVMTypeRef func_type, char *call) { return LLVMGetInlineAsm(func_type, call, strlen(call), @@ -634,6 +661,12 @@ void llvm_emit_builtin_call(GenContext *c, BEValue *result_value, Expr *expr) case BUILTIN_VOLATILE_LOAD: llvm_emit_volatile_load(c, result_value, expr); return; + case BUILTIN_ATOMIC_STORE: + llvm_emit_atomic_store(c, result_value, expr); + return; + case BUILTIN_ATOMIC_LOAD: + llvm_emit_atomic_load(c, result_value, expr); + return; case BUILTIN_SYSCALL: llvm_emit_syscall(c, result_value, expr); return; diff --git a/src/compiler/llvm_codegen_expr.c b/src/compiler/llvm_codegen_expr.c index 0fa6e95e0..06839e960 100644 --- a/src/compiler/llvm_codegen_expr.c +++ b/src/compiler/llvm_codegen_expr.c @@ -5,7 +5,6 @@ #include "llvm_codegen_internal.h" #include -static LLVMAtomicOrdering llvm_atomic_ordering(Atomicity atomicity); static LLVMValueRef llvm_emit_coerce_alignment(GenContext *c, BEValue *be_value, LLVMTypeRef coerce_type, AlignSize target_alignment, AlignSize *resulting_alignment); static bool bitstruct_requires_bitswap(Decl *decl); static inline LLVMValueRef llvm_const_high_bitmask(GenContext *c, LLVMTypeRef type, int type_bits, int high_bits); @@ -4894,20 +4893,6 @@ static void llvm_emit_splatted_variadic_arg(GenContext *c, Expr *expr, Type *var } -static LLVMAtomicOrdering llvm_atomic_ordering(Atomicity atomicity) -{ - switch (atomicity) - { - case ATOMIC_NONE: return LLVMAtomicOrderingNotAtomic; - case ATOMIC_UNORDERED: return LLVMAtomicOrderingUnordered; - case ATOMIC_RELAXED: return LLVMAtomicOrderingMonotonic; - case ATOMIC_ACQUIRE: return LLVMAtomicOrderingAcquire; - case ATOMIC_RELEASE: return LLVMAtomicOrderingRelease; - case ATOMIC_ACQUIRE_RELEASE: return LLVMAtomicOrderingAcquireRelease; - case ATOMIC_SEQ_CONSISTENT: return LLVMAtomicOrderingSequentiallyConsistent; - } - UNREACHABLE -} diff --git a/src/compiler/llvm_codegen_instr.c b/src/compiler/llvm_codegen_instr.c index b0d590252..c3de5d8c6 100644 --- a/src/compiler/llvm_codegen_instr.c +++ b/src/compiler/llvm_codegen_instr.c @@ -52,3 +52,18 @@ LLVMValueRef llvm_emit_shl_fixed(GenContext *c, LLVMValueRef data, int shift) if (shift >= bit_width) return llvm_get_zero_raw(type); return llvm_emit_shl(c, data, LLVMConstInt(type, (unsigned)shift, false)); } + +LLVMAtomicOrdering llvm_atomic_ordering(Atomicity atomicity) +{ + switch (atomicity) + { + case ATOMIC_NONE: return LLVMAtomicOrderingNotAtomic; + case ATOMIC_UNORDERED: return LLVMAtomicOrderingUnordered; + case ATOMIC_RELAXED: return LLVMAtomicOrderingMonotonic; + case ATOMIC_ACQUIRE: return LLVMAtomicOrderingAcquire; + case ATOMIC_RELEASE: return LLVMAtomicOrderingRelease; + case ATOMIC_ACQUIRE_RELEASE: return LLVMAtomicOrderingAcquireRelease; + case ATOMIC_SEQ_CONSISTENT: return LLVMAtomicOrderingSequentiallyConsistent; + } + UNREACHABLE +} diff --git a/src/compiler/llvm_codegen_internal.h b/src/compiler/llvm_codegen_internal.h index 53edee3b4..b2981b877 100644 --- a/src/compiler/llvm_codegen_internal.h +++ b/src/compiler/llvm_codegen_internal.h @@ -476,6 +476,7 @@ void llvm_emit_debug_local_var(GenContext *c, Decl *var); void llvm_emit_debug_global_var(GenContext *c, Decl *global); #define EMIT_LOC(c, x) do { if (c->debug.builder) llvm_emit_debug_location(c, x->span); } while (0); +LLVMAtomicOrdering llvm_atomic_ordering(Atomicity atomicity); // Implementations diff --git a/src/compiler/sema_builtins.c b/src/compiler/sema_builtins.c index 10eaf9dbc..3606acd5f 100644 --- a/src/compiler/sema_builtins.c +++ b/src/compiler/sema_builtins.c @@ -636,6 +636,36 @@ bool sema_expr_analyse_builtin_call(SemaContext *context, Expr *expr) if (!sema_check_builtin_args_match(args, arg_count)) return false; rtype = args[0]->type; break; + case BUILTIN_ATOMIC_LOAD: + { + if (!sema_check_builtin_args(args, (BuiltinArg[]){ BA_POINTER, BA_BOOL, BA_INTEGER }, 3)) return false; + Type *original = type_flatten(args[0]->type); + if (original == type_voidptr) + { + SEMA_ERROR(args[0], "Expected a typed pointer."); + return false; + } + if (!expr_is_const(args[1])) + { + SEMA_ERROR(args[1], "'is_volatile' must be a compile time constant."); + return false; + } + if (!expr_is_const(args[2])) + { + SEMA_ERROR(args[2], "Ordering must be a compile time constant."); + return false; + } + if (!is_valid_atomicity(args[2])) return false; + switch (expr->const_expr.ixx.i.low) + { + case ATOMIC_ACQUIRE_RELEASE: + case ATOMIC_RELEASE: + SEMA_ERROR(args[2], "'release' and 'acquire release' are not valid for atomic loads."); + return false; + } + rtype = original->pointer; + break; + } case BUILTIN_VOLATILE_LOAD: { if (!sema_check_builtin_args(args, (BuiltinArg[]) { BA_POINTER }, 1)) return false; @@ -659,6 +689,36 @@ bool sema_expr_analyse_builtin_call(SemaContext *context, Expr *expr) rtype = args[1]->type; break; } + case BUILTIN_ATOMIC_STORE: + { + if (!sema_check_builtin_args(args, (BuiltinArg[]) { BA_POINTER }, 1)) return false; + if (!sema_check_builtin_args(&args[2], (BuiltinArg[]) { BA_BOOL, BA_INTEGER }, 2)) return false; + Type *original = type_flatten(args[0]->type); + if (original != type_voidptr) + { + if (!cast_implicit(context, args[1], original->pointer)) return false; + } + if (!expr_is_const(args[2])) + { + SEMA_ERROR(args[2], "'is_volatile' must be a compile time constant."); + return false; + } + if (!expr_is_const(args[3])) + { + SEMA_ERROR(args[3], "Ordering must be a compile time constant."); + return false; + } + if (!is_valid_atomicity(args[3])) return false; + switch (expr->const_expr.ixx.i.low) + { + case ATOMIC_ACQUIRE_RELEASE: + case ATOMIC_ACQUIRE: + SEMA_ERROR(args[2], "'acquire' and 'acquire release' are not valid for atomic stores."); + return false; + } + rtype = args[1]->type; + break; + } case BUILTIN_NONE: case BUILTIN_COMPARE_EXCHANGE: case BUILTIN_FRAMEADDRESS: @@ -764,7 +824,10 @@ static inline int builtin_expected_args(BuiltinFunction func) case BUILTIN_OVERFLOW_MUL: case BUILTIN_OVERFLOW_SUB: case BUILTIN_PREFETCH: + case BUILTIN_ATOMIC_LOAD: return 3; + case BUILTIN_ATOMIC_STORE: + return 4; case BUILTIN_MEMCOPY: case BUILTIN_MEMCOPY_INLINE: case BUILTIN_MEMMOVE: diff --git a/src/compiler/symtab.c b/src/compiler/symtab.c index 85cd820f8..aae505d05 100644 --- a/src/compiler/symtab.c +++ b/src/compiler/symtab.c @@ -187,6 +187,8 @@ void symtab_init(uint32_t capacity) type_property_list[TYPE_PROPERTY_VALUES] = KW_DEF("values"); builtin_list[BUILTIN_ABS] = KW_DEF("abs"); + builtin_list[BUILTIN_ATOMIC_LOAD] = KW_DEF("atomic_load"); + builtin_list[BUILTIN_ATOMIC_STORE] = KW_DEF("atomic_store"); builtin_list[BUILTIN_BITREVERSE] = KW_DEF("bitreverse"); builtin_list[BUILTIN_BSWAP] = KW_DEF("bswap"); builtin_list[BUILTIN_CEIL] = KW_DEF("ceil"); diff --git a/src/version.h b/src/version.h index 2b39e6587..606ae798d 100644 --- a/src/version.h +++ b/src/version.h @@ -1 +1 @@ -#define COMPILER_VERSION "0.4.69" \ No newline at end of file +#define COMPILER_VERSION "0.4.70" \ No newline at end of file diff --git a/test/test_suite/concurrency/atomic_load_store.c3t b/test/test_suite/concurrency/atomic_load_store.c3t new file mode 100644 index 000000000..0d4015b50 --- /dev/null +++ b/test/test_suite/concurrency/atomic_load_store.c3t @@ -0,0 +1,26 @@ +// #target: macos-x64 +module test; +import std::io; + +struct Ghh +{ + int a; + int b; + int c; +} +fn void main() +{ + int a = 111; + int x = @atomic_load(a); + int y = @atomic_load(a, MONOTONIC, true); + @atomic_store(a, 123 + x); + @atomic_store(a, 33 + y, MONOTONIC, true); + io::printfn("%d", a); +} + +/* #expect: test.ll + + %0 = load atomic i32, ptr %a seq_cst, align 4 + %1 = load atomic volatile i32, ptr %a monotonic, align 4 + store atomic i32 %3, ptr %a seq_cst, align 4 + store atomic volatile i32 %5, ptr %a monotonic, align 4 diff --git a/test/test_suite/stdlib/map.c3t b/test/test_suite/stdlib/map.c3t index 8297c69f9..02b7b7cdc 100644 --- a/test/test_suite/stdlib/map.c3t +++ b/test/test_suite/stdlib/map.c3t @@ -288,7 +288,7 @@ after_check51: ; preds = %if.then panic_block: ; preds = %assign_optional %85 = load ptr, ptr @std_core_builtin_panic, align 8 - call void %85(ptr @.panic_msg, i64 27, ptr @.file, i64 6, ptr @.func, i64 4, i32 280) + call void %85(ptr @.panic_msg, i64 27, ptr @.file, i64 6, ptr @.func unreachable noerr_block: ; preds = %after_check51