Add mem::store and mem::load which may combine both aligned and volatile operations.

This commit is contained in:
Christoffer Lerno
2026-01-17 18:35:44 +01:00
parent 2a8fbb8fec
commit 945a3f3fc0
6 changed files with 91 additions and 14 deletions

View File

@@ -200,7 +200,7 @@ macro @scatter_aligned(ptrvec, value, bool[<*>] mask, usz $alignment)
*>
macro @unaligned_load(#x, usz $alignment) @builtin
{
return $$unaligned_load(&#x, $alignment);
return $$unaligned_load(&#x, $alignment, false);
}
<*
@@ -215,9 +215,10 @@ macro @unaligned_load(#x, usz $alignment) @builtin
*>
macro @unaligned_store(#x, value, usz $alignment) @builtin
{
return $$unaligned_store(&#x, ($typeof(#x))value, $alignment);
return $$unaligned_store(&#x, ($typeof(#x))value, $alignment, false);
}
<*
@param #x : "The variable or dereferenced pointer to load."
@return "The value of the variable"
@@ -242,6 +243,36 @@ macro @volatile_store(#x, value) @builtin
return $$volatile_store(&#x, ($typeof(#x))value);
}
<*
@param ptr : "The pointer to load from"
@param $align : "The alignment to assume for the load"
@param $volatile : "Whether the load is volatile or not, defaults to false"
@return "The value of the variable"
@require $defined(*ptr) : "This must be a typed pointer"
@require @constant_is_power_of_2($align) : "The alignment must be a power of two"
*>
macro load(ptr, usz $align, bool $volatile = false)
{
return $$unaligned_load(ptr, $align, $volatile);
}
<*
@param ptr : "The pointer to store to."
@param value : "The value to store."
@param $align : "The alignment to assume for the store"
@param $volatile : "Whether the store is volatile, defaults to false"
@return "The value stored"
@require $defined(*ptr) : "This must be a typed pointer"
@require $defined(*ptr = value) : "The value doesn't match the variable"
@require @constant_is_power_of_2($align) : "The alignment must be a power of two"
*>
macro store(ptr, value, usz $align, bool $volatile = false)
{
return $$unaligned_store(ptr, ($typeof(*ptr))value, $align, $volatile);
}
<*
All possible atomic orderings
*>
@@ -1056,10 +1087,10 @@ typedef UnalignedRef = Type*;
macro Type UnalignedRef.get(self)
{
return @unaligned_load(*(Type*)self, ALIGNMENT);
return @unaligned_load(*(Type*)self, ALIGNMENT, false);
}
macro Type UnalignedRef.set(&self, Type val)
{
return @unaligned_store(*(Type*)self, val, ALIGNMENT);
return @unaligned_store(*(Type*)self, val, ALIGNMENT, false);
}

View File

@@ -16,6 +16,7 @@
- On win32 utf-8 console output is now enabled by default in compiled programs
- Add `$$VERSION` and `$$PRERELEASE` compile time constants.
- Require () around assignment in conditionals. #2716
- $$unaligned_load and $$unaligned_store now also takes a "is_volatile" parameter.
### Fixes
- Regression with npot vector in struct triggering an assert #2219.
@@ -90,6 +91,7 @@
- Add extra `AsciiCharset` constants and combine its related compile-time/runtime macros. #2688
- Use a `Printable` struct for ansi RGB formatting instead of explicit allocation and deprecate the old method.
- HashSet.len() now returns usz instead of int. #2740
- Add `mem::store` and `mem::load` which may combine both aligned and volatile operations.
## 0.7.8 Change list

View File

@@ -171,7 +171,18 @@ INLINE void llvm_emit_unaligned_store(GenContext *c, BEValue *result_value, Expr
llvm_emit_expr(c, &value, expr->call_expr.arguments[0]);
llvm_value_rvalue(c, &value);
llvm_emit_expr(c, result_value, expr->call_expr.arguments[1]);
llvm_store_to_ptr_aligned(c, value.value, result_value, expr->call_expr.arguments[2]->const_expr.ixx.i.low);
LLVMValueRef store = llvm_store_to_ptr_aligned(c, value.value, result_value, expr->call_expr.arguments[2]->const_expr.ixx.i.low);
if (store && expr->call_expr.arguments[3]->const_expr.b)
{
if (LLVMIsAMemCpyInst(store))
{
LLVMSetOperand(store, 3, LLVMConstAllOnes(c->bool_type));
}
else
{
LLVMSetVolatile(store, true);
}
}
c->emitting_load_store_check = emit_check;
}
@@ -253,9 +264,11 @@ INLINE void llvm_emit_unaligned_load(GenContext *c, BEValue *result_value, Expr
bool emit_check = c->emitting_load_store_check;
c->emitting_load_store_check = true;
llvm_emit_expr(c, result_value, expr->call_expr.arguments[0]);
bool is_volatile = expr->call_expr.arguments[2]->const_expr.b;
llvm_value_deref(c, result_value);
result_value->alignment = expr->call_expr.arguments[1]->const_expr.ixx.i.low;
llvm_value_rvalue(c, result_value);
if (is_volatile && result_value->value) LLVMSetVolatile(result_value->value, is_volatile);
c->emitting_load_store_check = emit_check;
}

View File

@@ -1204,21 +1204,23 @@ bool sema_expr_analyse_builtin_call(SemaContext *context, Expr *expr)
}
case BUILTIN_UNALIGNED_LOAD:
{
ASSERT(arg_count == 2);
if (!sema_check_builtin_args(context, args, (BuiltinArg[]) {BA_POINTER, BA_INTEGER}, 2)) return false;
ASSERT(arg_count == 3);
if (!sema_check_builtin_args(context, args, (BuiltinArg[]) {BA_POINTER, BA_INTEGER, BA_BOOL}, 3)) return false;
Type *original = type_flatten(args[0]->type);
if (original == type_voidptr) RETURN_SEMA_ERROR(args[0], "Expected a typed pointer.");
if (!sema_check_alignment_expression(context, args[1])) return false;
if (!sema_cast_const(args[2])) RETURN_SEMA_ERROR(args[2], "'is_volatile' must be a compile time constant.");
rtype = original->pointer;
break;
}
case BUILTIN_UNALIGNED_STORE:
{
ASSERT(arg_count == 3);
ASSERT(arg_count == 4);
if (!sema_check_builtin_args(context, args, (BuiltinArg[]) {BA_POINTER}, 1)) return false;
if (!sema_check_builtin_args(context, &args[2], (BuiltinArg[]) {BA_INTEGER}, 1)) return false;
if (!sema_check_builtin_args(context, &args[2], (BuiltinArg[]) {BA_INTEGER, BA_BOOL}, 2)) return false;
Type *original = type_flatten(args[0]->type);
if (!sema_check_alignment_expression(context, args[2])) return false;
if (!sema_cast_const(args[3])) RETURN_SEMA_ERROR(args[3], "'is_volatile' must be a compile time constant.");
if (original != type_voidptr)
{
if (!cast_implicit(context, args[1], original->pointer, false)) return false;
@@ -1462,7 +1464,6 @@ static inline int builtin_expected_args(BuiltinFunction func)
case BUILTIN_SAT_SHL:
case BUILTIN_SAT_SUB:
case BUILTIN_STR_FIND:
case BUILTIN_UNALIGNED_LOAD:
case BUILTIN_VECCOMPEQ:
case BUILTIN_VECCOMPGE:
case BUILTIN_VECCOMPGT:
@@ -1481,7 +1482,7 @@ static inline int builtin_expected_args(BuiltinFunction func)
case BUILTIN_OVERFLOW_SUB:
case BUILTIN_PREFETCH:
case BUILTIN_ATOMIC_LOAD:
case BUILTIN_UNALIGNED_STORE:
case BUILTIN_UNALIGNED_LOAD:
case BUILTIN_SELECT:
case BUILTIN_MATRIX_TRANSPOSE:
return 3;
@@ -1491,6 +1492,7 @@ static inline int builtin_expected_args(BuiltinFunction func)
case BUILTIN_GATHER:
case BUILTIN_SCATTER:
case BUILTIN_STR_REPLACE:
case BUILTIN_UNALIGNED_STORE:
return 4;
case BUILTIN_ATOMIC_FETCH_EXCHANGE:
case BUILTIN_ATOMIC_FETCH_ADD:

View File

@@ -6,6 +6,16 @@ struct Foo
float[4] a;
}
fn void test()
{
int b = 123;
int[100] g;
mem::store(&b, 342, $align: 2, $volatile: true);
int z = mem::load(&b, $align: 2, $volatile: true);
int[100] gg = mem::load(&g, $align: 4, $volatile: true);
mem::store(&g, gg, $align: 4, $volatile: true);
}
fn void main()
{
Foo* foo;
@@ -15,8 +25,8 @@ fn void main()
a = *(float[<4>]*)&foo.a;
*(float[<4>]*)&foo.a = a;
a = $$unaligned_load((float[<4>]*)&foo.a, 1);
$$unaligned_store((float[<4>]*)&foo.a, a, 1);
a = $$unaligned_load((float[<4>]*)&foo.a, 1, false);
$$unaligned_store((float[<4>]*)&foo.a, a, 1, false);
b = @unaligned_load(*(float[<4>]*)&foo.a, 1);
@unaligned_store(*(float[<4>]*)&foo.a, b, 1);
@@ -24,6 +34,25 @@ fn void main()
/* #expect: test.ll
define void @test.test() #0 {
entry:
%b = alloca i32, align 4
%g = alloca [100 x i32], align 16
%z = alloca i32, align 4
%gg = alloca [100 x i32], align 16
%value = alloca [100 x i32], align 16
store i32 123, ptr %b, align 4
call void @llvm.memset.p0.i64(ptr align 16 %g, i8 0, i64 400, i1 false)
store volatile i32 342, ptr %b, align 4
%0 = load volatile i32, ptr %b, align 2
store i32 %0, ptr %z, align 4
%1 = load volatile [100 x i32], ptr %g, align 4
store [100 x i32] %1, ptr %gg, align 16
call void @llvm.memcpy.p0.p0.i32(ptr align 16 %value, ptr align 16 %gg, i32 400, i1 false)
call void @llvm.memcpy.p0.p0.i32(ptr align 4 %g, ptr align 16 %value, i32 400, i1 true)
ret void
}
define void @test.main() #0 {
entry:
%foo = alloca ptr, align 8

View File

@@ -5,7 +5,7 @@ import std, libc;
fn void unaligned_load_store(void* dst, void* src) @nostrip
{
$$unaligned_store(dst, $$unaligned_load((char*)src, 2), 2);
$$unaligned_store(dst, $$unaligned_load((char*)src, 2), 2, false);
}
fn int main()