diff --git a/lib/std/math/math.c3 b/lib/std/math/math.c3 index 82793a247..99b393ae3 100644 --- a/lib/std/math/math.c3 +++ b/lib/std/math/math.c3 @@ -1162,7 +1162,7 @@ macro bool overflow_sub(a, b, out) => $$overflow_sub(a, b, out); @require values::@is_flat_intlike(a) &&& values::@is_flat_intlike(b) : "a and b must both be integer or integer vector based" @require $defined(*out) &&& @typematch(*out, a) : "out must be a pointer of the same type as a and b" *> -macro bool overflow_mul(a, b, out) => $$overflow_mul(a, b, out); +macro overflow_mul(a, b, out) => $$overflow_mul(a, b, out); <* @require types::is_vector($Type) || ($Type.kindof == ARRAY &&& types::is_numerical($typefrom($Type.inner))) diff --git a/releasenotes.md b/releasenotes.md index 166afab94..e500b9f1c 100644 --- a/releasenotes.md +++ b/releasenotes.md @@ -10,6 +10,7 @@ - Function referencing in `@return?` for simplified fault declarations. Check `@return?` eagerly #2340. - Enums now work with `membersof` to return the associated values. #2571 - Deprecated `SomeEnum.associated` in favour of `SomeEnum.membersof` +- Refactored `@simd` implementation. ### Fixes - `Foo.is_eq` would return false if the type was a `typedef` and had an overload, but the underlying type was not comparable. @@ -24,6 +25,8 @@ - Fix appending to `c:\` or `\` #2569. - When encountering a foreach over a `ZString*` it would not properly emit a compilation error, but hit an assert #2573. - Casting a distinct type based on a pointer to an `any` would accidentally be permitted. #2575 +- `overflow_*` vector ops now correctly return a bool vector. +- Regression vector ABI: npot vectors would load incorrectly from pointers and other things. #2576 ### Stdlib changes diff --git a/src/compiler/abi/c_abi.c b/src/compiler/abi/c_abi.c index 9caf03cc8..2ebf434db 100644 --- a/src/compiler/abi/c_abi.c +++ b/src/compiler/abi/c_abi.c @@ -252,7 +252,7 @@ void c_abi_func_create(Signature *sig, FunctionPrototype *proto, Expr **vaargs) Type *rtype = type_infoptr(sig->rtype)->type; Type *rtype_flat = type_flatten(rtype); unsigned param_count = 0; - if (rtype_flat->type_kind == TYPE_VECTOR && !type_is_simd(rtype)) + if (rtype_flat->type_kind == TYPE_VECTOR) { rtype_flat = type_array_from_vector(rtype_flat); proto->return_rewrite = PARAM_RW_VEC_TO_ARRAY; @@ -285,7 +285,7 @@ void c_abi_func_create(Signature *sig, FunctionPrototype *proto, Expr **vaargs) Decl *decl = sig->params[i]; Type *flat_type = type_flatten(decl->type); ParamInfo param_info = (ParamInfo) { .type = flat_type }; - if (flat_type->type_kind == TYPE_VECTOR && !type_is_simd(decl->type)) + if (flat_type->type_kind == TYPE_VECTOR) { param_info.rewrite = PARAM_RW_VEC_TO_ARRAY; param_info.type = type_array_from_vector(flat_type); diff --git a/src/compiler/abi/c_abi_aarch64.c b/src/compiler/abi/c_abi_aarch64.c index 36e9f0087..68726f908 100644 --- a/src/compiler/abi/c_abi_aarch64.c +++ b/src/compiler/abi/c_abi_aarch64.c @@ -6,7 +6,7 @@ INLINE bool is_aarch64_illegal_vector(Type *type) { - if (type->type_kind != TYPE_VECTOR) + if (type->type_kind != TYPE_SIMD_VECTOR) { // Return true if scaled vector return false; @@ -57,7 +57,7 @@ ABIArgInfo *aarch64_coerce_illegal_vector(Type *type, ParamInfo param) UNREACHABLE }*/ } - ASSERT(type->type_kind == TYPE_VECTOR); + ASSERT(type->type_kind == TYPE_SIMD_VECTOR); TypeSize size = type_size(type); // CLANG: Android promotes char[<2>] to ushort, not uint @@ -160,7 +160,7 @@ ABIArgInfo *aarch64_classify_return_type(ParamInfo param, bool variadic) TypeSize size = type_size(type); // Large vectors by mem. - if (type->type_kind == TYPE_VECTOR && size > 16) + if (type->type_kind == TYPE_SIMD_VECTOR && size > 16) { return abi_arg_new_direct_coerce_type(abi_type_get(type), param); } diff --git a/src/compiler/abi/c_abi_riscv.c b/src/compiler/abi/c_abi_riscv.c index 18e6ea038..58a5c3aa7 100644 --- a/src/compiler/abi/c_abi_riscv.c +++ b/src/compiler/abi/c_abi_riscv.c @@ -187,7 +187,7 @@ static ABIArgInfo *riscv_classify_argument_type(ParamInfo param, bool is_fixed, *gprs -= needed_gprs; - if (!type_is_abi_aggregate(type) && type->type_kind != TYPE_VECTOR) + if (!type_is_abi_aggregate(type) && type->type_kind != TYPE_SIMD_VECTOR) { // All integral types are promoted to XLen width, unless passed on the // stack. diff --git a/src/compiler/abi/c_abi_win64.c b/src/compiler/abi/c_abi_win64.c index a02bfc57a..000d6536c 100644 --- a/src/compiler/abi/c_abi_win64.c +++ b/src/compiler/abi/c_abi_win64.c @@ -24,20 +24,20 @@ ABIArgInfo *win64_classify(Regs *regs, ParamInfo param, bool is_return, bool is_ { // Enough registers AND return / builtin / vector if (regs->float_regs >= elements && - (is_return || type_is_builtin(type->type_kind) || type->type_kind == TYPE_VECTOR)) + (is_return || type_is_builtin(type->type_kind) || type->type_kind == TYPE_SIMD_VECTOR)) { regs->float_regs -= elements; return abi_arg_new_direct(param); } // HVAs are handled later. - if (is_return || (!type_is_builtin(type->type_kind) && type->type_kind != TYPE_VECTOR)) + if (is_return || (!type_is_builtin(type->type_kind) && type->type_kind != TYPE_SIMD_VECTOR)) { return abi_arg_new_indirect_not_by_val(type, param); } // => to main handling. } ByteSize size = type_size(type); - bool type_is_vector_to_pass_as_array = compiler.build.feature.pass_win64_simd_as_arrays && type->type_kind == TYPE_VECTOR; + bool type_is_vector_to_pass_as_array = compiler.build.feature.pass_win64_simd_as_arrays && type->type_kind == TYPE_SIMD_VECTOR; if (type_is_vector_to_pass_as_array || type_is_abi_aggregate(type)) { // Not 1, 2, 4, 8? Pass indirect. @@ -77,7 +77,7 @@ ABIArgInfo *win64_reclassify_hva_arg(Regs *regs, ParamInfo param, ABIArgInfo *in Type *base = NULL; unsigned elements = 0; Type *type = type_lowering(param.type); - if (!type_is_builtin(type->type_kind) && type->type_kind != TYPE_VECTOR && type_is_homogenous_aggregate(type, &base, &elements)) + if (!type_is_builtin(type->type_kind) && type->type_kind != TYPE_SIMD_VECTOR && type_is_homogenous_aggregate(type, &base, &elements)) { if (regs->float_regs >= elements) { diff --git a/src/compiler/abi/c_abi_x64.c b/src/compiler/abi/c_abi_x64.c index 03f422848..99b7dc25f 100644 --- a/src/compiler/abi/c_abi_x64.c +++ b/src/compiler/abi/c_abi_x64.c @@ -57,7 +57,7 @@ ABIArgInfo *x64_indirect_return_result(Type *type, ParamInfo param) static bool x64_type_is_illegal_vector(Type *type) { // Only check vectors. - if (type->type_kind != TYPE_VECTOR) return false; + if (type->type_kind != TYPE_SIMD_VECTOR) return false; ByteSize size = type_size(type); // Less than 64 bits or larger than the avx native size => not allowed. if (size <= 8 || size > compiler.platform.x64.native_vector_size_avx) return true; @@ -373,6 +373,7 @@ static void x64_classify(Type *type, ByteSize offset_base, X64Class *lo_class, X { case LOWERED_TYPES: case TYPE_FUNC_RAW: + case TYPE_VECTOR: UNREACHABLE_VOID case TYPE_VOID: *current = CLASS_NO_CLASS; @@ -417,7 +418,7 @@ static void x64_classify(Type *type, ByteSize offset_base, X64Class *lo_class, X case TYPE_ARRAY: x64_classify_array(type, offset_base, current, lo_class, hi_class, named); break; - case TYPE_VECTOR: + case TYPE_SIMD_VECTOR: x64_classify_vector(type, offset_base, current, lo_class, hi_class, named); break; } @@ -559,6 +560,7 @@ AbiType x64_get_int_type_at_offset(Type *type, unsigned offset, Type *source_typ case LOWERED_TYPES: case TYPE_VOID: case TYPE_FUNC_RAW: + case TYPE_VECTOR: UNREACHABLE_VOID case TYPE_U64: case TYPE_I64: @@ -611,7 +613,7 @@ AbiType x64_get_int_type_at_offset(Type *type, unsigned offset, Type *source_typ case TYPE_U128: case ALL_FLOATS: case TYPE_UNION: - case TYPE_VECTOR: + case TYPE_SIMD_VECTOR: break; } ByteSize size = type_size(source_type); @@ -633,7 +635,7 @@ static AbiType x64_get_byte_vector_type(Type *type) type = type_lowering(type); // If vector - if (type->type_kind == TYPE_VECTOR) + if (type->type_kind == TYPE_SIMD_VECTOR) { Type *element = type->array.base->canonical; if (compiler.platform.x64.pass_int128_vector_in_mem && type_is_int128(element)) diff --git a/src/compiler/abi/c_abi_x86.c b/src/compiler/abi/c_abi_x86.c index 0869c73de..037029532 100644 --- a/src/compiler/abi/c_abi_x86.c +++ b/src/compiler/abi/c_abi_x86.c @@ -13,7 +13,7 @@ static ABIArgInfo **x86_create_params(CallABI abi, ParamInfo *params, unsigned p static inline bool type_is_simd_vector(Type *type) { type = type->canonical; - return type->type_kind == TYPE_VECTOR && type_size(type) == 16; + return type->type_kind == TYPE_SIMD_VECTOR && type_size(type) == 16; } static bool type_is_union_struct_with_simd_vector(Type *type) @@ -99,7 +99,7 @@ static bool x86_should_return_type_in_reg(Type *type) // Require power of two for everything except mcu. if (!compiler.platform.x86.is_mcu_api && !is_power_of_two(size)) return false; - if (type->type_kind == TYPE_VECTOR) + if (type->type_kind == TYPE_SIMD_VECTOR) { // 64 (and 128 bit) vectors are not returned as registers return size < 8; @@ -120,6 +120,7 @@ static bool x86_should_return_type_in_reg(Type *type) case TYPE_ALIAS: case TYPE_TYPEID: case TYPE_VECTOR: + case TYPE_SIMD_VECTOR: case TYPE_VOID: UNREACHABLE case ALL_INTS: @@ -168,7 +169,7 @@ ABIArgInfo *x86_classify_return(CallABI call, Regs *regs, ParamInfo param) Type *base = NULL; unsigned elements = 0; - if (type->type_kind == TYPE_VECTOR) return abi_arg_new_direct(param); + if (type->type_kind == TYPE_SIMD_VECTOR) return abi_arg_new_direct(param); if (type_is_abi_aggregate(type)) { @@ -217,7 +218,7 @@ ABIArgInfo *x86_classify_return(CallABI call, Regs *regs, ParamInfo param) static inline bool x86_is_mmxtype(Type *type) { // Return true if the type is an MMX type <2 x i32>, <4 x i16>, or <8 x i8>. - if (type->type_kind != TYPE_VECTOR) return false; + if (type->type_kind != TYPE_SIMD_VECTOR) return false; Type *element = lowered_array_element_type(type); if (type_size(element) >= 8) return false; if (!type_is_integer(element)) return false; @@ -344,7 +345,7 @@ UNUSED static inline ABIArgInfo *x86_classify_homogenous_aggregate(Regs *regs, T } // If it is a builtin, then expansion is not needed. - if (type_is_builtin(type->type_kind) || type->type_kind == TYPE_VECTOR) + if (type_is_builtin(type->type_kind) || type->type_kind == TYPE_SIMD_VECTOR) { return abi_arg_new_direct(param); } @@ -453,6 +454,7 @@ static ABIArgInfo *x86_classify_argument(CallABI call, Regs *regs, ParamInfo par case TYPE_VOID: case TYPE_FUNC_RAW: case TYPE_FLEXIBLE_ARRAY: + case TYPE_VECTOR: UNREACHABLE case ALL_FLOATS: case ALL_INTS: @@ -460,7 +462,7 @@ static ABIArgInfo *x86_classify_argument(CallABI call, Regs *regs, ParamInfo par case TYPE_FUNC_PTR: case TYPE_POINTER: return x86_classify_primitives(call, regs, type, param); - case TYPE_VECTOR: + case TYPE_SIMD_VECTOR: return x86_classify_vector(regs, type, param); case TYPE_STRUCT: case TYPE_UNION: diff --git a/src/compiler/c_codegen.c b/src/compiler/c_codegen.c index acbf1029d..0a90f2a9c 100644 --- a/src/compiler/c_codegen.c +++ b/src/compiler/c_codegen.c @@ -115,6 +115,7 @@ static const char *c_type_name(GenContext *c, Type *type) case TYPE_ARRAY: case TYPE_FLEXIBLE_ARRAY: case TYPE_VECTOR: + case TYPE_SIMD_VECTOR: { void *prev = htable_get(&c->gen_decl, type); if (!prev) return "NOT_REGISTERED"; @@ -265,6 +266,7 @@ static bool c_emit_type_decl(GenContext *c, Type *type) htable_set(&c->gen_decl, type, scratch_buffer_copy()); return true; } + case TYPE_SIMD_VECTOR: case TYPE_VECTOR: error_exit("Vectors are not supported in the C backend yet."); } @@ -681,6 +683,7 @@ static void c_emit_local_decl(GenContext *c, Decl *decl, CValue *value) case TYPE_SLICE: case TYPE_ARRAY: case TYPE_VECTOR: + case TYPE_SIMD_VECTOR: PRINT("/* TODO ZERO INIT */\n"); } diff --git a/src/compiler/codegen_general.c b/src/compiler/codegen_general.c index 99ff0485e..b6da03084 100644 --- a/src/compiler/codegen_general.c +++ b/src/compiler/codegen_general.c @@ -56,7 +56,7 @@ bool type_is_homogenous_base_type(Type *type) case TYPE_F32: case TYPE_F64: return !compiler.platform.ppc64.is_softfp; - case TYPE_VECTOR: + case TYPE_SIMD_VECTOR: return type_size(type) == 128 / 8; default: return false; @@ -69,7 +69,7 @@ bool type_is_homogenous_base_type(Type *type) case TYPE_F64: case TYPE_F32: return true; - case TYPE_VECTOR: + case TYPE_SIMD_VECTOR: switch (type_size(type)) { case 16: @@ -88,7 +88,7 @@ bool type_is_homogenous_base_type(Type *type) { case ALL_FLOATS: return true; - case TYPE_VECTOR: + case TYPE_SIMD_VECTOR: switch (type_size(type)) { case 8: @@ -108,7 +108,7 @@ bool type_is_homogenous_base_type(Type *type) case TYPE_F64: case TYPE_F128: return true; - case TYPE_VECTOR: + case TYPE_SIMD_VECTOR: switch (type_size(type)) { case 8: @@ -138,7 +138,7 @@ bool type_homogenous_aggregate_small_enough(Type *type, unsigned members) { case ABI_PPC64_SVR4: if (type->type_kind == TYPE_F128 && compiler.platform.float128) return members <= 8; - if (type->type_kind == TYPE_VECTOR) return members <= 8; + if (type->type_kind == TYPE_SIMD_VECTOR) return members <= 8; // Use max 8 registers. return ((type_size(type) + 7) / 8) * members <= 8; case ABI_X64: @@ -174,6 +174,9 @@ bool type_is_homogenous_aggregate(LoweredType *type, Type **base, unsigned *elem { case LOWERED_TYPES: UNREACHABLE + case TYPE_VECTOR: + // Converted in ABI + UNREACHABLE case TYPE_VOID: case TYPE_FUNC_RAW: case TYPE_SLICE: @@ -241,7 +244,7 @@ bool type_is_homogenous_aggregate(LoweredType *type, Type **base, unsigned *elem break; case ALL_UNSIGNED_INTS: case ALL_FLOATS: - case TYPE_VECTOR: + case TYPE_SIMD_VECTOR: break; case TYPE_POINTER: case TYPE_FUNC_PTR: @@ -258,15 +261,18 @@ bool type_is_homogenous_aggregate(LoweredType *type, Type **base, unsigned *elem { *base = type; // Special handling of non-power-of-2 vectors - if (type->type_kind == TYPE_VECTOR) + // If we allowed it + /* + if (type->type_kind == TYPE_SIMD_VECTOR) { // Widen the type with elements. unsigned vec_elements = type_size(type) / type_size(type->array.base); *base = type_get_vector(type->array.base, vec_elements); } + */ } // One is vector - other isn't => failure - if (((*base)->type_kind == TYPE_VECTOR) != (type->type_kind == TYPE_VECTOR)) return false; + if (((*base)->type_kind == TYPE_SIMD_VECTOR) != (type->type_kind == TYPE_SIMD_VECTOR)) return false; // Size does not match => failure if (type_size(*base) != type_size(type)) return false; diff --git a/src/compiler/codegen_internal.h b/src/compiler/codegen_internal.h index 48f7e0836..0f7f95585 100644 --- a/src/compiler/codegen_internal.h +++ b/src/compiler/codegen_internal.h @@ -65,6 +65,7 @@ static inline LoweredType *type_lowering(Type *type) case TYPE_SLICE: case TYPE_ARRAY: case TYPE_VECTOR: + case TYPE_SIMD_VECTOR: case TYPE_FLEXIBLE_ARRAY: { Type *arr_type = type->array.base; @@ -76,8 +77,8 @@ static inline LoweredType *type_lowering(Type *type) return type_get_slice(flat); case TYPE_ARRAY: return type_get_array(flat, type->array.len); - case TYPE_VECTOR: - return type_get_vector(flat, type->array.len); + case VECTORS: + return type_get_vector_from_vector(flat, type); case TYPE_FLEXIBLE_ARRAY: return type_get_flexible_array(flat); default: @@ -103,11 +104,6 @@ static inline LoweredType *type_lowering_abi(Type *type) type = type->optional; continue; case TYPE_TYPEDEF: - if (type->decl->attr_simd) - { - type = type->decl->distinct->type; - return type_get_vector(type_lowering(type->array.base), type->array.len); - } type = type->decl->distinct->type; continue; case TYPE_CONST_ENUM: @@ -143,12 +139,15 @@ static inline LoweredType *type_lowering_abi(Type *type) case TYPE_ARRAY: case TYPE_VECTOR: case TYPE_FLEXIBLE_ARRAY: + case TYPE_SIMD_VECTOR: { Type *flat = type_lowering_abi(type->array.base); switch (type->type_kind) { case TYPE_SLICE: return type_get_slice(flat); + case TYPE_SIMD_VECTOR: + return type_get_vector(flat, TYPE_SIMD_VECTOR, type->array.len); case TYPE_ARRAY: case TYPE_VECTOR: return type_get_array(flat, type->array.len); @@ -177,29 +176,29 @@ static inline bool abi_type_match(AbiType type, Type *other_type) case ABI_TYPE_INT_56: return false; case ABI_TYPE_INT_VEC_2: - return other_type == type_get_vector(type_uint, 2); + return other_type == type_get_vector(type_uint, TYPE_SIMD_VECTOR, 2); case ABI_TYPE_INT_VEC_4: - return other_type == type_get_vector(type_uint, 4); + return other_type == type_get_vector(type_uint, TYPE_SIMD_VECTOR, 4); case ABI_TYPE_FLOAT_VEC_2: - return other_type == type_get_vector(type_float, 2); + return other_type == type_get_vector(type_float, TYPE_SIMD_VECTOR, 2); case ABI_TYPE_FLOAT_VEC_4: - return other_type == type_get_vector(type_float, 4); + return other_type == type_get_vector(type_float, TYPE_SIMD_VECTOR, 4); case ABI_TYPE_FLOAT16_VEC_2: - return other_type == type_get_vector(type_float16, 2); + return other_type == type_get_vector(type_float16, TYPE_SIMD_VECTOR, 2); case ABI_TYPE_FLOAT16_VEC_4: - return other_type == type_get_vector(type_float16, 4); + return other_type == type_get_vector(type_float16, TYPE_SIMD_VECTOR, 4); case ABI_TYPE_BFLOAT16_VEC_2: - return other_type == type_get_vector(type_bfloat, 2); + return other_type == type_get_vector(type_bfloat, TYPE_SIMD_VECTOR, 2); case ABI_TYPE_BFLOAT16_VEC_4: - return other_type == type_get_vector(type_bfloat, 4); + return other_type == type_get_vector(type_bfloat, TYPE_SIMD_VECTOR, 4); case ABI_TYPE_LONG_VEC_2: - return other_type == type_get_vector(type_ulong, 2); + return other_type == type_get_vector(type_ulong, TYPE_SIMD_VECTOR, 2); case ABI_TYPE_DOUBLE_VEC_2: - return other_type == type_get_vector(type_double, 2); + return other_type == type_get_vector(type_double, TYPE_SIMD_VECTOR, 2); case ABI_TYPE_DOUBLE_VEC_4: - return other_type == type_get_vector(type_double, 4); + return other_type == type_get_vector(type_double, TYPE_SIMD_VECTOR, 4); case ABI_TYPE_DOUBLE_VEC_8: - return other_type == type_get_vector(type_double, 8); + return other_type == type_get_vector(type_double, TYPE_SIMD_VECTOR, 8); } UNREACHABLE } @@ -218,8 +217,8 @@ static inline bool abi_type_is_valid(AbiType type) static inline bool expr_is_vector_index_or_swizzle(Expr *expr) { - return (expr->expr_kind == EXPR_SUBSCRIPT && type_lowering(exprtype(expr->subscript_expr.expr))->type_kind == TYPE_VECTOR) - || (expr->expr_kind == EXPR_SWIZZLE && type_lowering(exprtype(expr->swizzle_expr.parent))->type_kind == TYPE_VECTOR); + return (expr->expr_kind == EXPR_SUBSCRIPT && type_kind_is_real_vector(type_lowering(exprtype(expr->subscript_expr.expr))->type_kind)) + || (expr->expr_kind == EXPR_SWIZZLE && type_kind_is_real_vector(type_lowering(exprtype(expr->swizzle_expr.parent))->type_kind)); } const char *codegen_create_asm(Ast *ast); diff --git a/src/compiler/compiler_internal.h b/src/compiler/compiler_internal.h index 13500ccc6..24c90b89c 100644 --- a/src/compiler/compiler_internal.h +++ b/src/compiler/compiler_internal.h @@ -143,7 +143,6 @@ typedef struct struct ConstInitializer_ { ConstInitType kind; - bool is_simd; // Type, should always be flattened Type *type; union @@ -359,6 +358,7 @@ struct TypeInfo_ TypeInfoKind kind : 6; bool optional : 1; bool in_def : 1; + bool is_simd : 1; TypeInfoCompressedKind subtype : 4; Type *type; SourceSpan span; @@ -695,7 +695,6 @@ typedef struct Decl_ bool resolved_attributes : 1; bool allow_deprecated : 1; bool attr_structlike : 1; - bool attr_simd : 1; union { void *backend_ref; @@ -902,7 +901,6 @@ typedef struct typedef struct { - bool raw_offset : 1; ExprId ptr; ExprId offset; } ExprPointerOffset; @@ -2609,8 +2607,8 @@ bool type_is_comparable(Type *type); bool type_is_ordered(Type *type); unsigned type_get_introspection_kind(TypeKind kind); void type_mangle_introspect_name_to_buffer(Type *type); +AlignSize type_alloca_alignment(Type *type); AlignSize type_abi_alignment(Type *type); -AlignSize type_simd_alignment(CanonicalType *type); bool type_func_match(Type *fn_type, Type *rtype, unsigned arg_count, ...); Type *type_find_largest_union_element(Type *type); Type *type_find_max_type(Type *type, Type *other, Expr *first, Expr *second); @@ -2630,10 +2628,12 @@ Type *type_get_slice(Type *arr_type); Type *type_get_inferred_array(Type *arr_type); Type *type_get_inferred_vector(Type *arr_type); Type *type_get_flexible_array(Type *arr_type); -AlignSize type_alloca_alignment(Type *type); Type *type_get_optional(Type *optional_type); -Type *type_get_vector(Type *vector_type, unsigned len); -Type *type_get_vector_bool(Type *original_type); +Type *type_get_vector(Type *vector_type, TypeKind kind, unsigned len); +Type *type_get_vector_from_vector(Type *base_type, Type *orginal_vector); +Type *type_get_simd_from_vector(Type *orginal_vector); +Type *type_get_vector_bool(Type *original_type, TypeKind kind); + Type *type_int_signed_by_bitsize(BitSize bitsize); Type *type_int_unsigned_by_bitsize(BitSize bit_size); bool type_is_matching_int(CanonicalType *type1, CanonicalType *type2); @@ -2644,7 +2644,6 @@ void type_func_prototype_init(uint32_t capacity); Type *type_find_parent_type(Type *type); bool type_is_subtype(Type *type, Type *possible_subtype); bool type_is_abi_aggregate(Type *type); -bool type_is_simd(Type *type); bool type_is_aggregate(Type *type); bool type_is_int128(Type *type); @@ -2723,6 +2722,7 @@ int type_kind_bitsize(TypeKind kind); INLINE bool type_kind_is_signed(TypeKind kind); INLINE bool type_kind_is_unsigned(TypeKind kind); INLINE bool type_kind_is_any_integer(TypeKind kind); +INLINE bool type_kind_is_real_vector(TypeKind kind); void advance(ParseContext *c); INLINE void advance_and_verify(ParseContext *context, TokenType token_type); @@ -2769,12 +2769,14 @@ INLINE Type *type_from_inferred(Type *flattened, Type *element_type, unsigned co case TYPE_POINTER: ASSERT(count == 0); return type_get_ptr(element_type); + case TYPE_SIMD_VECTOR: + ASSERT(flattened->array.len == count); + return type_get_vector(element_type, TYPE_SIMD_VECTOR, count); case TYPE_VECTOR: ASSERT(flattened->array.len == count); FALLTHROUGH; case TYPE_INFERRED_VECTOR: - return type_get_vector(element_type, count); - break; + return type_get_vector(element_type, TYPE_VECTOR, count); case TYPE_ARRAY: ASSERT(flattened->array.len == count); FALLTHROUGH; @@ -2800,7 +2802,7 @@ INLINE bool type_len_is_inferred(Type *type) continue; case TYPE_ARRAY: case TYPE_SLICE: - case TYPE_VECTOR: + case VECTORS: type = type->array.base; continue; case TYPE_INFERRED_ARRAY: @@ -2907,7 +2909,7 @@ INLINE bool type_info_poison(TypeInfo *type) INLINE bool type_is_arraylike(Type *type) { DECL_TYPE_KIND_REAL(kind, type); - return kind == TYPE_ARRAY || kind == TYPE_VECTOR || kind == TYPE_FLEXIBLE_ARRAY; + return kind == TYPE_ARRAY || kind == TYPE_VECTOR || kind == TYPE_FLEXIBLE_ARRAY || kind == TYPE_SIMD_VECTOR; } INLINE bool type_is_any_arraylike(Type *type) @@ -2937,7 +2939,7 @@ static inline bool type_is_pointer_like(Type *type) { case TYPE_POINTER: return true; - case TYPE_VECTOR: + case VECTORS: return type_is_pointer_like(type->array.base->canonical); default: return false; @@ -2946,7 +2948,7 @@ static inline bool type_is_pointer_like(Type *type) INLINE bool type_is_pointer_vector(Type *type) { - return type->type_kind == TYPE_VECTOR && type->array.base->canonical->type_kind == TYPE_POINTER; + return type_kind_is_real_vector(type->type_kind) && type->array.base->canonical->type_kind == TYPE_POINTER; } INLINE bool type_is_atomic(Type *type_flat) @@ -2996,7 +2998,7 @@ INLINE bool type_may_negate(Type *type) RETRY: switch (type->type_kind) { - case TYPE_VECTOR: + case VECTORS: type = type->array.base; goto RETRY; case ALL_FLOATS: @@ -3033,7 +3035,7 @@ INLINE bool type_is_floatlike(Type *type) { type = type->canonical; TypeKind kind = type->type_kind; - if (kind == TYPE_VECTOR && type_is_float(type->array.base)) return true; + if (type_kind_is_real_vector(kind) && type_is_float(type->array.base)) return true; return kind >= TYPE_FLOAT_FIRST && kind <= TYPE_FLOAT_LAST; } @@ -3237,7 +3239,7 @@ static inline Type *type_flat_distinct_inline(Type *type) static inline CanonicalType *type_vector_base(CanonicalType *type) { - return type->type_kind == TYPE_VECTOR ? type->array.base->canonical : type; + return type_kind_is_real_vector(type->type_kind) ? type->array.base->canonical : type; } static inline Type *type_flatten_and_inline(Type *type) @@ -3358,7 +3360,7 @@ static inline Type *type_flatten_to_int(Type *type) break; case TYPE_ENUM: return type; - case TYPE_VECTOR: + case VECTORS: ASSERT(type_is_integer(type->array.base)); return type; case TYPE_ALIAS: @@ -3466,7 +3468,7 @@ static inline bool type_flat_is_valid_for_arg_h(Type *type) INLINE Type *type_vector_type(Type *type) { Type *flatten = type_flatten(type); - return flatten->type_kind == TYPE_VECTOR ? flatten->array.base : NULL; + return type_kind_is_real_vector(flatten->type_kind) ? flatten->array.base : NULL; } INLINE bool type_is_builtin(TypeKind kind) { return kind >= TYPE_VOID && kind <= TYPE_TYPEID; } @@ -3483,7 +3485,7 @@ INLINE bool type_is_signed(Type *type) { TypeKind kind = type->type_kind; if (kind >= TYPE_I8 && kind < TYPE_U8) return true; - if (kind != TYPE_VECTOR) return false; + if (!type_kind_is_real_vector(kind)) return false; kind = type->array.base->type_kind; return kind >= TYPE_I8 && kind < TYPE_U8; } @@ -3566,7 +3568,7 @@ INLINE bool type_is_numeric(Type *type) RETRY:; DECL_TYPE_KIND_REAL(kind, type); if ((kind >= TYPE_I8) & (kind <= TYPE_FLOAT_LAST)) return true; - if (type->type_kind == TYPE_VECTOR) + if (type_kind_is_real_vector(type->type_kind)) { type = type->array.base; goto RETRY; @@ -3584,26 +3586,43 @@ INLINE bool type_underlying_may_add_sub(CanonicalType *type) return type->type_kind == TYPE_ENUM || type->type_kind == TYPE_POINTER || type_is_numeric(type); } +INLINE bool type_is_vec(FlatType *type) +{ + ASSERT(type_flatten(type) == type); + TypeKind kind = type->type_kind; + return kind == TYPE_VECTOR || kind == TYPE_SIMD_VECTOR; +} + +INLINE bool type_kind_is_real_vector(TypeKind kind) +{ + return kind == TYPE_VECTOR || kind == TYPE_SIMD_VECTOR; +} + INLINE bool type_flat_is_vector(Type *type) { - return type_flatten(type)->type_kind == TYPE_VECTOR; + return type_kind_is_real_vector(type_flatten(type)->type_kind); } INLINE bool type_flat_is_vector_bitstruct(Type *type) { TypeKind kind = type_flatten(type)->type_kind; - return kind == TYPE_VECTOR || kind == TYPE_BITSTRUCT; + return kind == TYPE_VECTOR || kind == TYPE_BITSTRUCT || kind == TYPE_SIMD_VECTOR; +} + +INLINE bool type_kind_is_any_non_simd_vector(TypeKind kind) +{ + return kind == TYPE_VECTOR || kind == TYPE_INFERRED_VECTOR; } INLINE bool type_kind_is_any_vector(TypeKind kind) { - return kind == TYPE_VECTOR || kind == TYPE_INFERRED_VECTOR; + return kind == TYPE_VECTOR || kind == TYPE_INFERRED_VECTOR || kind == TYPE_SIMD_VECTOR; } INLINE bool type_flat_is_bool_vector(Type *type) { Type *flat = type_flatten(type); - return flat->type_kind == TYPE_VECTOR && type_flatten(flat->array.base) == type_bool; + return type_kind_is_real_vector(flat->type_kind) && type_flatten(flat->array.base) == type_bool; } INLINE bool type_is_union_or_strukt(Type *type) @@ -4554,7 +4573,6 @@ INLINE bool check_module_name(Path *path) INLINE void const_init_set_type(ConstInitializer *init, Type *type) { - init->is_simd = type_is_simd(type); init->type = type_flatten(type); } diff --git a/src/compiler/enums.h b/src/compiler/enums.h index 3ccebb879..add7aea74 100644 --- a/src/compiler/enums.h +++ b/src/compiler/enums.h @@ -1407,6 +1407,7 @@ typedef enum TYPE_FLEXIBLE_ARRAY, TYPE_INFERRED_ARRAY, TYPE_VECTOR, + TYPE_SIMD_VECTOR, TYPE_INFERRED_VECTOR, TYPE_LAST_ARRAYLIKE = TYPE_INFERRED_VECTOR, TYPE_OPTIONAL, @@ -1813,6 +1814,9 @@ static_assert(EXPR_LAST < 128, "Too many expression types"); #define ALL_SIGNED_INTS TYPE_I8: case TYPE_I16: case TYPE_I32: case TYPE_I64: case TYPE_I128 #define ALL_UNSIGNED_INTS TYPE_U8: case TYPE_U16: case TYPE_U32: case TYPE_U64: case TYPE_U128 #define ALL_FLOATS TYPE_BF16: case TYPE_F16: case TYPE_F32: case TYPE_F64: case TYPE_F128 +#define ALL_VECTORS TYPE_VECTOR: case TYPE_INFERRED_VECTOR: case TYPE_SIMD_VECTOR +#define ALL_ARRAYLIKE TYPE_VECTOR: case TYPE_INFERRED_VECTOR: case TYPE_SIMD_VECTOR: case TYPE_ARRAY: case TYPE_INFERRED_ARRAY: case TYPE_FLEXIBLE_ARRAY +#define VECTORS TYPE_VECTOR: case TYPE_SIMD_VECTOR #define LOWERED_TYPES CT_TYPES: case TYPE_ENUM: case TYPE_ALIAS: case TYPE_TYPEID: \ case TYPE_TYPEDEF: case TYPE_ANYFAULT: case TYPE_BITSTRUCT: \ diff --git a/src/compiler/expr.c b/src/compiler/expr.c index 7ec04168f..a0d97bd66 100644 --- a/src/compiler/expr.c +++ b/src/compiler/expr.c @@ -710,8 +710,8 @@ void expr_rewrite_to_const_zero(Expr *expr, Type *type) case TYPE_STRUCT: case TYPE_UNION: case TYPE_BITSTRUCT: - case TYPE_VECTOR: case TYPE_ARRAY: + case VECTORS: expr_rewrite_const_initializer(expr, type, const_init_new_zero(type)); return; case TYPE_TYPEDEF: diff --git a/src/compiler/headers.c b/src/compiler/headers.c index 16aa743da..6b156d4f3 100644 --- a/src/compiler/headers.c +++ b/src/compiler/headers.c @@ -212,6 +212,8 @@ static void header_print_type(HeaderContext *c, Type *type) case TYPE_SLICE: PRINTF("c3slice_t"); return; + case TYPE_SIMD_VECTOR: + TODO case TYPE_VECTOR: switch (type_flatten(type->array.base)->type_kind) { @@ -568,6 +570,8 @@ RETRY: case TYPE_FLEXIBLE_ARRAY: type = type->array.base; goto RETRY; + case TYPE_SIMD_VECTOR: + TODO case TYPE_VECTOR: { if (!header_try_gen_both(c, type)) return; diff --git a/src/compiler/llvm_codegen.c b/src/compiler/llvm_codegen.c index 69ae73373..6e4c7e9d9 100644 --- a/src/compiler/llvm_codegen.c +++ b/src/compiler/llvm_codegen.c @@ -246,7 +246,7 @@ LLVMValueRef llvm_emit_const_initializer(GenContext *c, ConstInitializer *const_ Type *element_type = array_type->array.base; LLVMTypeRef element_type_llvm = llvm_get_type(c, element_type); ConstInitializer **elements = const_init->init_array_full; - ASSERT(array_type->type_kind == TYPE_ARRAY || array_type->type_kind == TYPE_VECTOR); + ASSERT(type_is_arraylike(array_type)); ArraySize size = array_type->array.len; ASSERT(size > 0); LLVMValueRef *parts = VECNEW(LLVMValueRef, size); @@ -256,7 +256,7 @@ LLVMValueRef llvm_emit_const_initializer(GenContext *c, ConstInitializer *const_ if (element_type_llvm != LLVMTypeOf(element)) was_modified = true; vec_add(parts, element); } - if ((!in_aggregate && array_type->type_kind == TYPE_VECTOR) || const_init->is_simd) + if ((!in_aggregate && array_type->type_kind == TYPE_VECTOR) || array_type->type_kind == TYPE_SIMD_VECTOR) { return LLVMConstVector(parts, vec_size(parts)); } @@ -282,7 +282,7 @@ LLVMValueRef llvm_emit_const_initializer(GenContext *c, ConstInitializer *const_ unsigned alignment = 0; LLVMValueRef *parts = NULL; bool pack = false; - bool is_vec = const_init->is_simd || (!in_aggregate && array_type->type_kind == TYPE_VECTOR); + bool is_vec = array_type->type_kind == TYPE_SIMD_VECTOR || (!in_aggregate && array_type->type_kind == TYPE_VECTOR); FOREACH(ConstInitializer *, element, elements) { ASSERT(element->kind == CONST_INIT_ARRAY_VALUE); @@ -444,7 +444,7 @@ void llvm_emit_ptr_from_array(GenContext *c, BEValue *value) value->kind = BE_ADDRESS; return; case TYPE_ARRAY: - case TYPE_VECTOR: + case VECTORS: case TYPE_FLEXIBLE_ARRAY: return; case TYPE_SLICE: @@ -736,8 +736,7 @@ void gencontext_print_llvm_ir(GenContext *context) } } - -LLVMValueRef llvm_emit_alloca(GenContext *c, LLVMTypeRef type, unsigned alignment, const char *name) +INLINE LLVMValueRef llvm_emit_alloca_internal(GenContext *c, LLVMTypeRef type, unsigned alignment, const char *name) { ASSERT(LLVMGetTypeKind(type) != LLVMVoidTypeKind); ASSERT(!llvm_is_global_eval(c)); @@ -751,9 +750,31 @@ LLVMValueRef llvm_emit_alloca(GenContext *c, LLVMTypeRef type, unsigned alignmen return alloca; } -LLVMValueRef llvm_emit_alloca_aligned(GenContext *c, Type *type, const char *name) +BEValue llvm_emit_alloca_b(GenContext *c, Type *type, const char *name) { - return llvm_emit_alloca(c, llvm_get_type(c, type), type_alloca_alignment(type), name); + type = type_lowering(type); + if (type->type_kind == TYPE_VECTOR) + { + type = type_get_vector(type->array.base, TYPE_SIMD_VECTOR, type->array.len); + } + LLVMTypeRef llvm_type = llvm_get_type(c, type); + AlignSize alignment = type_alloca_alignment(type); + LLVMValueRef alloca = llvm_emit_alloca_internal(c, llvm_type, alignment, name); + return (BEValue){.value = alloca, .alignment = alignment, .kind = BE_ADDRESS, .type = type}; +} + +BEValue llvm_emit_alloca_b_realign(GenContext *c, Type *type, AlignSize alignment, const char *name) +{ + ASSERT(alignment != 0); + type = type_lowering(type); + LLVMTypeRef llvm_type = llvm_get_type(c, type); + LLVMValueRef alloca = llvm_emit_alloca_internal(c, llvm_type, alignment, name); + return (BEValue){.value = alloca, .alignment = alignment, .kind = BE_ADDRESS, .type = type}; +} + +LLVMValueRef llvm_emit_alloca(GenContext *c, LLVMTypeRef type, unsigned alignment, const char *name) +{ + return llvm_emit_alloca_internal(c, type, alignment, name); } void llvm_emit_and_set_decl_alloca(GenContext *c, Decl *decl) @@ -761,7 +782,7 @@ void llvm_emit_and_set_decl_alloca(GenContext *c, Decl *decl) Type *type = type_lowering(decl->type); if (type == type_void) return; ASSERT(!decl->backend_ref && !decl->is_value); - decl->backend_ref = llvm_emit_alloca(c, llvm_get_type(c, type), decl->alignment, decl->name ? decl->name : ".anon"); + decl->backend_ref = llvm_emit_alloca_internal(c, llvm_get_type(c, type), decl->alignment, decl->name ? decl->name : ".anon"); } void llvm_emit_local_var_alloca(GenContext *c, Decl *decl) diff --git a/src/compiler/llvm_codegen_builtins.c b/src/compiler/llvm_codegen_builtins.c index e49eb52e4..460cf3dcb 100644 --- a/src/compiler/llvm_codegen_builtins.c +++ b/src/compiler/llvm_codegen_builtins.c @@ -360,7 +360,7 @@ INLINE unsigned llvm_intrinsic_by_type(Type *type, unsigned int_intrinsic, unsig return uint_intrinsic; case ALL_FLOATS: return float_intrinsic; - case TYPE_VECTOR: + case VECTORS: type = type->array.base; goto RETRY; default: @@ -679,7 +679,7 @@ static void llvm_emit_wrap_builtin(GenContext *c, BEValue *result_value, Expr *e LLVMValueRef arg_slots[2]; llvm_emit_intrinsic_args(c, args, arg_slots, func == BUILTIN_EXACT_NEG ? 1 : 2); Type *base_type = type_lowering(args[0]->type); - if (base_type->type_kind == TYPE_VECTOR) base_type = base_type->array.base; + if (type_kind_is_real_vector(base_type->type_kind)) base_type = base_type->array.base; ASSERT(type_is_integer(base_type)); LLVMValueRef res; switch (func) diff --git a/src/compiler/llvm_codegen_debug_info.c b/src/compiler/llvm_codegen_debug_info.c index a3dd22e7f..7e035b250 100644 --- a/src/compiler/llvm_codegen_debug_info.c +++ b/src/compiler/llvm_codegen_debug_info.c @@ -561,7 +561,7 @@ static LLVMMetadataRef llvm_debug_vector_type(GenContext *c, Type *type) { LLVMMetadataRef *ranges = NULL; Type *current_type = type; - while (current_type->canonical->type_kind == TYPE_VECTOR) + while (type_kind_is_any_vector(current_type->canonical->type_kind)) { vec_add(ranges, LLVMDIBuilderGetOrCreateSubrange(c->debug.builder, 0, current_type->canonical->array.len)); current_type = current_type->canonical->array.base; @@ -641,7 +641,7 @@ static inline LLVMMetadataRef llvm_get_debug_type_internal(GenContext *c, Type * case TYPE_F64: case TYPE_F128: return llvm_debug_simple_type(c, type, DW_ATE_float); - case TYPE_VECTOR: + case VECTORS: return type->backend_debug_type = llvm_debug_vector_type(c, type); case TYPE_VOID: return NULL; diff --git a/src/compiler/llvm_codegen_expr.c b/src/compiler/llvm_codegen_expr.c index c999c75e0..fe6721704 100644 --- a/src/compiler/llvm_codegen_expr.c +++ b/src/compiler/llvm_codegen_expr.c @@ -46,7 +46,7 @@ static void llvm_emit_macro_body_expansion(GenContext *c, BEValue *value, Expr * static void llvm_emit_post_unary_expr(GenContext *context, BEValue *be_value, Expr *expr); static void llvm_emit_unary_expr(GenContext *c, BEValue *value, Expr *expr); static inline void llvm_emit_memcmp(GenContext *c, BEValue *be_value, LLVMValueRef ptr, LLVMValueRef other_ptr, LLVMValueRef size); -static LLVMTypeRef llvm_find_inner_struct_type_for_coerce(GenContext *c, LLVMTypeRef struct_type, ByteSize dest_size); +static LLVMTypeRef llvm_find_inner_struct_type_for_coerce(GenContext *c, LLVMTypeRef type, ByteSize dest_size); static void llvm_expand_type_to_args(GenContext *context, Type *param_type, LLVMValueRef expand_ptr, LLVMValueRef *args, unsigned *arg_count_ref, AlignSize alignment); static inline void llvm_emit_initialize_reference_designated_bitstruct(GenContext *c, BEValue *ref, Decl *bitstruct, Expr **elements, Expr *splat); INLINE LLVMValueRef llvm_emit_bitstruct_value_update(GenContext *c, LLVMValueRef current_val, TypeSize bits, LLVMTypeRef bitstruct_type, Decl *member, LLVMValueRef val); @@ -188,11 +188,8 @@ BEValue llvm_emit_assign_expr(GenContext *c, BEValue *ref, Expr *ref_expr, Expr } else { - BEValue val; Type *type = ref_expr ? type_lowering(ref_expr->type) : ref->type; - AlignSize alignment = type_alloca_alignment(type); - LLVMValueRef temp = llvm_emit_alloca(c, llvm_get_type(c, type), alignment, ".assign_list"); - llvm_value_set_address(c, &val, temp, type, alignment); + BEValue val = llvm_emit_alloca_b(c, type, ".assign_list"); llvm_emit_initialize_reference(c, &val, expr); if (ref_expr) llvm_emit_expr(c, ref, ref_expr); llvm_store(c, ref, &val); @@ -258,10 +255,10 @@ static LLVMValueRef llvm_emit_coerce_alignment(GenContext *c, BEValue *be_value, if (!llvm_value_is_addr(be_value) || be_value->alignment < target_alignment) { // COERCE UPDATE bitcast removed, check for ways to optimize - LLVMValueRef target = llvm_emit_alloca(c, llvm_get_type(c, be_value->type), target_alignment, "coerce"); - llvm_store_to_ptr_aligned(c, target, be_value, target_alignment); + BEValue target = llvm_emit_alloca_b_realign(c, be_value->type, target_alignment, "coerce"); + llvm_store(c, &target, be_value); *resulting_alignment = target_alignment; - return target; + return target.value; } *resulting_alignment = be_value->alignment; return be_value->value; @@ -367,7 +364,7 @@ LLVMValueRef llvm_emit_const_padding(GenContext *c, AlignSize size) static inline LLVMValueRef llvm_emit_add_int(GenContext *c, Type *type, LLVMValueRef left, LLVMValueRef right, SourceSpan loc) { - if (compiler.build.feature.trap_on_wrap && type->type_kind != TYPE_VECTOR) + if (compiler.build.feature.trap_on_wrap && !type_kind_is_real_vector(type->type_kind)) { LLVMTypeRef type_to_use = llvm_get_type(c, type->canonical); LLVMValueRef args[2] = { left, right }; @@ -596,14 +593,14 @@ void llvm_emit_coerce_store(GenContext *c, LLVMValueRef addr, AlignSize alignmen void llvm_emit_convert_value_from_coerced(GenContext *c, BEValue *result, LLVMTypeRef coerced, LLVMValueRef value, Type *original_type) { LLVMTypeRef target_type = llvm_get_type(c, original_type); - LLVMValueRef addr = llvm_emit_alloca(c, target_type, type_abi_alignment(original_type), "result"); - llvm_emit_coerce_store(c, addr, type_abi_alignment(original_type), coerced, value, target_type); + LLVMValueRef addr = llvm_emit_alloca(c, target_type, type_alloca_alignment(original_type), "result"); + llvm_emit_coerce_store(c, addr, type_alloca_alignment(original_type), coerced, value, target_type); llvm_value_set_address_abi_aligned(c, result, addr, original_type); } static inline LLVMValueRef llvm_emit_sub_int(GenContext *c, Type *type, LLVMValueRef left, LLVMValueRef right, SourceSpan loc) { - if (compiler.build.feature.trap_on_wrap && type->type_kind != TYPE_VECTOR) + if (compiler.build.feature.trap_on_wrap && !type_kind_is_real_vector(type->type_kind)) { LLVMTypeRef type_to_use = llvm_get_type(c, type); LLVMValueRef args[2] = { left, right }; @@ -655,24 +652,21 @@ static inline void llvm_emit_subscript_addr_with_base(GenContext *c, BEValue *re switch (type->type_kind) { case TYPE_POINTER: + llvm_value_set_address_abi_aligned( c, result, - llvm_emit_pointer_inbounds_gep_raw(c, llvm_get_pointee_type(c, parent->type), parent->value, index->value), + llvm_emit_pointer_inbounds_gep_raw(c, parent->value, index->value, type_size(type->pointer)), type->pointer); return; case TYPE_ARRAY: case TYPE_FLEXIBLE_ARRAY: - case TYPE_VECTOR: - { - AlignSize alignment; - LLVMValueRef ptr = llvm_emit_array_gep_raw_index(c, parent->value, llvm_get_type(c, type), index, parent->alignment, &alignment); - llvm_value_set_address(c, result, ptr, type->array.base, alignment); + case VECTORS: + *result = llvm_emit_array_gep_index(c, parent, index); return; - } case TYPE_SLICE: { - LLVMValueRef ptr = llvm_emit_pointer_inbounds_gep_raw(c, llvm_get_type(c, type->array.base), parent->value, index->value); + LLVMValueRef ptr = llvm_emit_pointer_inbounds_gep_raw(c, parent->value, index->value, type_size(type->array.base)); llvm_value_set_address(c, result, ptr, type->array.base, type_abi_alignment(type->array.base)); } return; @@ -687,7 +681,7 @@ static inline void llvm_emit_vector_subscript(GenContext *c, BEValue *value, Exp llvm_emit_exprid(c, value, expr->subscript_expr.expr); llvm_value_rvalue(c, value); Type *vec = value->type; - ASSERT(vec->type_kind == TYPE_VECTOR); + ASSERT(type_kind_is_real_vector(vec->type_kind)); Type *element = vec->array.base; LLVMValueRef vector = value->value; llvm_emit_exprid(c, value, expr->subscript_expr.index.expr); @@ -738,7 +732,7 @@ static inline void llvm_emit_subscript_addr(GenContext *c, BEValue *value, Expr } } } - else if (parent_type_kind == TYPE_ARRAY || parent_type_kind == TYPE_VECTOR) + else if (parent_type_kind == TYPE_ARRAY || type_kind_is_real_vector(parent_type_kind)) { // From back should always be folded. ASSERT(!expr_is_const(expr) || !start_from_end); @@ -779,7 +773,7 @@ static inline void llvm_emit_subscript(GenContext *c, BEValue *value, Expr *expr { Expr *parent_expr = exprptr(expr->subscript_expr.expr); Type *parent_type = type_lowering(parent_expr->type); - if (parent_type->type_kind == TYPE_VECTOR) + if (type_kind_is_real_vector(parent_type->type_kind)) { llvm_emit_vector_subscript(c, value, expr); return; @@ -818,17 +812,8 @@ static inline void llvm_emit_pointer_offset(GenContext *c, BEValue *value, Expr llvm_emit_expr(c, &offset, offset_expr); llvm_value_rvalue(c, &offset); - LLVMTypeRef element_type; - ArraySize vec_len = pointer->type->type_kind == TYPE_VECTOR ? pointer->type->array.len : 0; - if (expr->pointer_offset_expr.raw_offset) - { - element_type = vec_len ? LLVMVectorType(c->byte_type, vec_len) : c->byte_type; - } - else - { - element_type = llvm_get_pointee_type(c, vec_len ? pointer->type->array.base : pointer->type); - } - value->value = llvm_emit_pointer_gep_raw(c, element_type, value->value, offset.value); + Type *element = type_is_vec(value->type) ? value->type->array.base->pointer : value->type->pointer; + value->value = llvm_emit_pointer_gep_raw(c, value->value, offset.value, type_size(element)); } @@ -932,7 +917,7 @@ static inline void llvm_extract_bool_bit_from_array(GenContext *c, BEValue *be_v LLVMTypeRef array_type = llvm_get_type(c, type_char); unsigned start_bit = member->var.start_bit; // Grab the byte - LLVMValueRef byte_ptr = llvm_emit_array_gep_raw(c, array_ptr, llvm_get_type(c, be_value->type), + LLVMValueRef byte_ptr = llvm_emit_array_gep_raw(c, array_ptr, type_char, start_bit / 8, be_value->alignment, &alignment); LLVMValueRef element = llvm_load(c, array_type, byte_ptr, alignment, ""); // Shift the bit to the zero position. @@ -974,7 +959,7 @@ static inline void llvm_extract_bitvalue_from_array(GenContext *c, BEValue *be_v for (int i = start_byte; i <= end_byte; i++) { AlignSize alignment; - LLVMValueRef byte_ptr = llvm_emit_array_gep_raw(c, array_ptr, llvm_get_type(c, be_value->type), + LLVMValueRef byte_ptr = llvm_emit_array_gep_raw(c, array_ptr, type_char, (unsigned)i, be_value->alignment, &alignment); LLVMValueRef element = llvm_load(c, array_type, byte_ptr, alignment, ""); element = llvm_zext_trunc(c, element, llvm_member_type); @@ -1063,7 +1048,6 @@ static inline void llvm_extract_bitvalue(GenContext *c, BEValue *be_value, Decl static inline void llvm_emit_update_bitstruct_array(GenContext *c, LLVMValueRef array_ptr, AlignSize array_alignment, - LLVMTypeRef array_type, bool need_bitswap, Decl *member, LLVMValueRef value) @@ -1077,7 +1061,7 @@ static inline void llvm_emit_update_bitstruct_array(GenContext *c, ASSERT(start_bit == end_bit); value = llvm_emit_shl_fixed(c, value, start_bit % 8); AlignSize alignment; - LLVMValueRef byte_ptr = llvm_emit_array_gep_raw(c, array_ptr, array_type, start_bit / 8, array_alignment, &alignment); + LLVMValueRef byte_ptr = llvm_emit_array_gep_raw(c, array_ptr, type_char, start_bit / 8, array_alignment, &alignment); LLVMValueRef current = llvm_load(c, c->byte_type, byte_ptr, alignment, ""); LLVMValueRef bit = llvm_emit_shl_fixed(c, LLVMConstInt(c->byte_type, 1, 0), start_bit % 8); current = llvm_emit_and_raw(c, current, LLVMBuildNot(c->builder, bit, "")); @@ -1100,7 +1084,7 @@ static inline void llvm_emit_update_bitstruct_array(GenContext *c, for (int i = start_byte; i <= end_byte; i++) { AlignSize alignment; - LLVMValueRef byte_ptr = llvm_emit_array_gep_raw(c, array_ptr, array_type, + LLVMValueRef byte_ptr = llvm_emit_array_gep_raw(c, array_ptr, type_char, (unsigned)i, array_alignment, &alignment); if (i == start_byte && start_mod != 0) { @@ -1161,7 +1145,7 @@ static inline void llvm_emit_update_bitstruct_array(GenContext *c, static inline void llvm_emit_bitassign_array(GenContext *c, LLVMValueRef result, BEValue parent, Decl *parent_decl, Decl *member) { llvm_value_addr(c, &parent); - llvm_emit_update_bitstruct_array(c, parent.value, parent.alignment, llvm_get_type(c, parent.type), + llvm_emit_update_bitstruct_array(c, parent.value, parent.alignment, bitstruct_requires_bitswap(parent_decl), member, result); } @@ -1257,10 +1241,9 @@ static inline void llvm_emit_access_addr(GenContext *c, BEValue *be_value, Expr llvm_value_rvalue(c, be_value); if (!flat_type->decl->backend_ref) llvm_get_typeid(c, parent->type); ASSERT(member->backend_ref); - LLVMTypeRef value_type = llvm_get_type(c, type_get_array(member->type, vec_size(flat_type->decl->enums.values))); AlignSize align = LLVMGetAlignment(member->backend_ref); AlignSize alignment; - LLVMValueRef ptr = llvm_emit_array_gep_raw_index(c, member->backend_ref, value_type, be_value, align, &alignment); + LLVMValueRef ptr = llvm_emit_array_gep_raw_index(c, member->backend_ref, member->type, be_value, align, &alignment); llvm_value_set_address(c, be_value, ptr, member->type, alignment); return; } @@ -1325,7 +1308,7 @@ void llvm_new_phi(GenContext *c, BEValue *value, const char *name, Type *type, L } -static inline void llvm_emit_initialize_reference(GenContext *c, BEValue *value, Expr *expr); +static inline void llvm_emit_initialize_reference(GenContext *c, BEValue *ref, Expr *expr); // Prune the common occurrence where the optional is not used. @@ -1469,7 +1452,7 @@ static bool llvm_should_use_const_copy(ConstInitializer *const_init) } static void llvm_emit_const_init_ref(GenContext *c, BEValue *ref, ConstInitializer *const_init, bool top) { - if (const_init->type->type_kind == TYPE_VECTOR) + if (type_kind_is_real_vector(const_init->type->type_kind)) { LLVMValueRef val = llvm_emit_const_initializer(c, const_init, !top); llvm_store_raw(c, ref, val); @@ -1513,38 +1496,24 @@ static void llvm_emit_const_init_ref(GenContext *c, BEValue *ref, ConstInitializ UNREACHABLE_VOID case CONST_INIT_ARRAY_FULL: { - LLVMValueRef array_ref = ref->value; - Type *array_type = const_init->type; - Type *element_type = array_type->array.base; - ArrayIndex size = (ArrayIndex)array_type->array.len; - LLVMTypeRef array_type_llvm = llvm_get_type(c, array_type); + ArrayIndex size = (ArrayIndex)const_init->type->array.len; ASSERT(size <= UINT32_MAX); for (ArrayIndex i = 0; i < size; i++) { - AlignSize alignment; - LLVMValueRef array_pointer = llvm_emit_array_gep_raw(c, array_ref, array_type_llvm, (unsigned)i, ref->alignment, &alignment); - BEValue value; - llvm_value_set_address(c, &value, array_pointer, element_type, alignment); + BEValue value = llvm_emit_array_gep(c, ref, i); llvm_emit_const_init_ref(c, &value, const_init->init_array_full[i], false); } return; } case CONST_INIT_ARRAY: { - LLVMValueRef array_ref = ref->value; llvm_store_zero(c, ref); - Type *array_type = const_init->type; - Type *element_type = array_type->array.base; - LLVMTypeRef array_type_llvm = llvm_get_type(c, array_type); ConstInitializer **elements = const_init->init_array.elements; FOREACH(ConstInitializer *, element, elements) { ASSERT(element->kind == CONST_INIT_ARRAY_VALUE); ArrayIndex element_index = element->init_array_value.index; - AlignSize alignment; - LLVMValueRef array_pointer = llvm_emit_array_gep_raw(c, array_ref, array_type_llvm, (unsigned)element_index, ref->alignment, &alignment); - BEValue value; - llvm_value_set_address(c, &value, array_pointer, element_type, alignment); + BEValue value = llvm_emit_array_gep(c, ref, element_index); llvm_emit_const_init_ref(c, &value, element->init_array_value.element, false); } return; @@ -1602,7 +1571,6 @@ static inline void llvm_emit_initialize_reference_vector(GenContext *c, BEValue INLINE void llvm_emit_initialize_reference_bitstruct_array(GenContext *c, BEValue *ref, Decl *bitstruct, Expr** elements) { - LLVMTypeRef type = llvm_get_type(c, ref->type); bool is_bitswap = bitstruct_requires_bitswap(bitstruct); llvm_value_addr(c, ref); llvm_store_zero(c, ref); @@ -1614,7 +1582,7 @@ INLINE void llvm_emit_initialize_reference_bitstruct_array(GenContext *c, BEValu Decl *member = bitstruct->strukt.members[i]; BEValue val; llvm_emit_expr(c, &val, init); - llvm_emit_update_bitstruct_array(c, array_ptr, alignment, type, is_bitswap, member, + llvm_emit_update_bitstruct_array(c, array_ptr, alignment, is_bitswap, member, llvm_load_value_store(c, &val)); } } @@ -1660,7 +1628,7 @@ static inline void llvm_emit_initialize_reference_list(GenContext *c, BEValue *r // Getting ready to initialize, get the real type. Type *real_type = type_lowering(ref->type); - if (real_type->type_kind == TYPE_VECTOR) + if (type_kind_is_real_vector(real_type->type_kind)) { llvm_emit_initialize_reference_vector(c, ref, real_type, elements); return; @@ -1670,7 +1638,6 @@ static inline void llvm_emit_initialize_reference_list(GenContext *c, BEValue *r llvm_value_addr(c, ref); LLVMValueRef value = ref->value; - LLVMTypeRef llvm_type = llvm_get_type(c, real_type); bool is_struct = type_is_union_or_strukt(real_type); bool is_array = real_type->type_kind == TYPE_ARRAY; @@ -1685,9 +1652,7 @@ static inline void llvm_emit_initialize_reference_list(GenContext *c, BEValue *r else if (is_array) { REMINDER("Optimize array reference list init"); - AlignSize alignment; - LLVMValueRef ptr = llvm_emit_array_gep_raw(c, value, llvm_type, i, ref->alignment, &alignment); - llvm_value_set_address(c, &pointer, ptr, element->type, alignment); + pointer = llvm_emit_array_gep(c, ref, i); } else { @@ -1723,13 +1688,10 @@ static void llvm_emit_initialize_designated_const_range(GenContext *c, BEValue * llvm_emit_expr(c, &emitted_local, expr); emitted_value = &emitted_local; } - LLVMTypeRef ref_type = llvm_get_type(c, ref->type); + ASSERT(type_is_arraylike(ref->type)); for (ArrayIndex i = curr->index; i <= curr->index_end; i++) { - BEValue new_ref; - AlignSize alignment; - LLVMValueRef ptr = llvm_emit_array_gep_raw(c, ref->value, ref_type, (unsigned)i, ref->alignment, &alignment); - llvm_value_set_address(c, &new_ref, ptr, type_get_indexed_type(ref->type), alignment); + BEValue new_ref = llvm_emit_array_gep(c, ref, i); llvm_emit_initialize_designated_element(c, &new_ref, offset, current + 1, last, expr, emitted_value); } } @@ -1794,7 +1756,7 @@ static void llvm_emit_initialize_designated_element(GenContext *c, BEValue *ref, bool is_bitswap = bitstruct_requires_bitswap(type->decl); if (underlying_type->type_kind == TYPE_ARRAY) { - llvm_emit_update_bitstruct_array(c, value.value, value.alignment, bitstruct_type, is_bitswap, member, val); + llvm_emit_update_bitstruct_array(c, value.value, value.alignment, is_bitswap, member, val); break; } LLVMValueRef current_val = llvm_load_value(c, &value); @@ -1809,9 +1771,7 @@ static void llvm_emit_initialize_designated_element(GenContext *c, BEValue *ref, { Type *type = ref->type->array.base; offset += (unsigned)curr->index * type_size(type); - AlignSize alignment; - LLVMValueRef ptr = llvm_emit_array_gep_raw(c, ref->value, llvm_get_type(c, ref->type), (unsigned)curr->index, ref->alignment, &alignment); - llvm_value_set_address(c, &value, ptr, type, alignment); + value = llvm_emit_array_gep(c, ref, curr->index); llvm_emit_initialize_designated_element(c, &value, offset, current + 1, last, expr, emitted_value); break; } @@ -1825,7 +1785,6 @@ static void llvm_emit_initialize_designated_element(GenContext *c, BEValue *ref, static inline void llvm_emit_initialize_reference_designated_bitstruct_array(GenContext *c, BEValue *ref, Decl *bitstruct, Expr **elements, Expr *splat) { - LLVMTypeRef type = llvm_get_type(c, ref->type); bool is_bitswap = bitstruct_requires_bitswap(bitstruct); llvm_value_addr(c, ref); if (splat) @@ -1849,7 +1808,7 @@ static inline void llvm_emit_initialize_reference_designated_bitstruct_array(Gen Decl *member = bitstruct->strukt.members[element->index]; BEValue val; llvm_emit_expr(c, &val, designator->designator_expr.value); - llvm_emit_update_bitstruct_array(c, array_ptr, alignment, type, is_bitswap, member, llvm_load_value_store(c, &val)); + llvm_emit_update_bitstruct_array(c, array_ptr, alignment, is_bitswap, member, llvm_load_value_store(c, &val)); } } @@ -2118,7 +2077,7 @@ static inline LLVMValueRef llvm_emit_inc_dec_value(GenContext *c, SourceSpan spa { // Use byte here, we don't need a big offset. LLVMValueRef add = LLVMConstInt(diff < 0 ? llvm_get_type(c, type_isz) : llvm_get_type(c, type_usz), (unsigned long long)diff, diff < 0); - return llvm_emit_pointer_gep_raw(c, llvm_get_pointee_type(c, type), original->value, add); + return llvm_emit_pointer_gep_raw(c, original->value, add, type_size(type->pointer)); } case ALL_FLOATS: { @@ -2148,7 +2107,7 @@ static inline LLVMValueRef llvm_emit_inc_dec_value(GenContext *c, SourceSpan spa ? llvm_emit_add_int(c, original->type, original->value, diff_value, span) : llvm_emit_sub_int(c, original->type, original->value, diff_value, span); } - case TYPE_VECTOR: + case VECTORS: { Type *element = type_lowering(type->array.base); LLVMValueRef diff_value; @@ -2253,7 +2212,6 @@ static void llvm_emit_vec_comp(GenContext *c, BEValue *result, BEValue *lhs, BEV } } llvm_value_set(result, res, type); - return; } static inline void llvm_emit_inc_dec_change(GenContext *c, BEValue *addr, BEValue *after, BEValue *before, @@ -2279,7 +2237,7 @@ static inline bool expr_is_vector_subscript(Expr *expr) { if (expr->expr_kind != EXPR_SUBSCRIPT) return false; Type *type = type_lowering(exprptr(expr->subscript_expr.expr)->type); - return type->type_kind == TYPE_VECTOR; + return type_kind_is_real_vector(type->type_kind); } /** @@ -2331,7 +2289,7 @@ static inline void llvm_emit_pre_post_inc_dec_vector(GenContext *c, BEValue *val // But we also want the value (of the full vector) llvm_value_rvalue(c, value); Type *vec = value->type; - ASSERT(vec->type_kind == TYPE_VECTOR); + ASSERT(type_kind_is_real_vector(vec->type_kind)); Type *element = vec->array.base; LLVMValueRef vector = value->value; @@ -2511,7 +2469,7 @@ static void llvm_emit_unary_expr(GenContext *c, BEValue *value, Expr *expr) { llvm_value = LLVMBuildICmp(c->builder, LLVMIntEQ, value->value, llvm_get_zero(c, type), "not"); } - Type *res_type = type_get_vector_bool(type); + Type *res_type = type_get_vector_bool(type, TYPE_SIMD_VECTOR); llvm_value = LLVMBuildSExt(c->builder, llvm_value, llvm_get_type(c, res_type), ""); llvm_value_set(value, llvm_value, res_type); return; @@ -2654,24 +2612,20 @@ static void llvm_emit_trap_invalid_shift(GenContext *c, LLVMValueRef value, Type LLVMValueRef equal_or_greater = LLVMBuildICmp(c->builder, LLVMIntSGE, flat_max, max, "shift_exceeds"); llvm_emit_panic_on_true(c, equal_or_greater, "Invalid shift", loc, error, &val, NULL); return; - } - else + unsigned type_bit_size = type_size(type) * 8; + LLVMValueRef max = llvm_const_int(c, type, type_bit_size); + if (type_is_unsigned(type)) { - unsigned type_bit_size = type_size(type) * 8; - LLVMValueRef max = llvm_const_int(c, type, type_bit_size); - if (type_is_unsigned(type)) - { - LLVMValueRef equal_or_greater = LLVMBuildICmp(c->builder, LLVMIntUGE, value, max, "shift_exceeds"); - llvm_emit_panic_on_true(c, equal_or_greater, "Invalid shift", loc, error, &val, NULL); - return; - } - LLVMValueRef zero = llvm_const_int(c, type, 0); - LLVMValueRef negative = LLVMBuildICmp(c->builder, LLVMIntSLT, value, zero, "shift_underflow"); - llvm_emit_panic_on_true(c, negative, "Invalid shift", loc, error, &val, NULL); - LLVMValueRef equal_or_greater = LLVMBuildICmp(c->builder, LLVMIntSGE, value, max, "shift_exceeds"); + LLVMValueRef equal_or_greater = LLVMBuildICmp(c->builder, LLVMIntUGE, value, max, "shift_exceeds"); llvm_emit_panic_on_true(c, equal_or_greater, "Invalid shift", loc, error, &val, NULL); + return; } + LLVMValueRef zero = llvm_const_int(c, type, 0); + LLVMValueRef negative = LLVMBuildICmp(c->builder, LLVMIntSLT, value, zero, "shift_underflow"); + llvm_emit_panic_on_true(c, negative, "Invalid shift", loc, error, &val, NULL); + LLVMValueRef equal_or_greater = LLVMBuildICmp(c->builder, LLVMIntSGE, value, max, "shift_exceeds"); + llvm_emit_panic_on_true(c, equal_or_greater, "Invalid shift", loc, error, &val, NULL); } static void llvm_emit_slice_values(GenContext *c, Expr *slice, BEValue *parent_ref, BEValue *start_ref, BEValue *end_ref, bool *is_exclusive) @@ -2706,7 +2660,7 @@ static void llvm_emit_slice_values(GenContext *c, Expr *slice, BEValue *parent_r break; case TYPE_FLEXIBLE_ARRAY: case TYPE_ARRAY: - case TYPE_VECTOR: + case VECTORS: parent_base = parent_addr; break; default: @@ -2754,7 +2708,7 @@ static void llvm_emit_slice_values(GenContext *c, Expr *slice, BEValue *parent_r llvm_value_set(&len, llvm_emit_extract_value(c, parent_load_value, 1), start_type); break; case TYPE_ARRAY: - case TYPE_VECTOR: + case VECTORS: llvm_value_set_int(c, &len, start_type, parent_type->array.len); break; default: @@ -2901,18 +2855,18 @@ static void gencontext_emit_slice(GenContext *c, BEValue *be_value, Expr *expr) { case TYPE_FLEXIBLE_ARRAY: case TYPE_ARRAY: - case TYPE_VECTOR: + case VECTORS: { // Move pointer AlignSize alignment; - start_pointer = llvm_emit_array_gep_raw_index(c, parent.value, llvm_get_type(c, parent.type), &start, type_abi_alignment(parent.type), &alignment); + start_pointer = llvm_emit_array_gep_raw_index(c, parent.value, type->array.base, &start, type_abi_alignment(parent.type), &alignment); break; } case TYPE_SLICE: - start_pointer = llvm_emit_pointer_inbounds_gep_raw(c, llvm_get_type(c, parent.type->array.base), parent.value, start.value); + start_pointer = llvm_emit_pointer_inbounds_gep_raw(c, parent.value, start.value, type_size(type->array.base)); break; case TYPE_POINTER: - start_pointer = llvm_emit_pointer_inbounds_gep_raw(c, llvm_get_pointee_type(c, parent.type), parent.value, start.value); + start_pointer = llvm_emit_pointer_inbounds_gep_raw(c, parent.value, start.value, type_size(type->pointer)); break; default: UNREACHABLE_VOID @@ -3424,7 +3378,7 @@ static void llvm_emit_struct_comparison(GenContext *c, BEValue *result, BEValue static inline LLVMValueRef llvm_emit_mult_int(GenContext *c, Type *type, LLVMValueRef left, LLVMValueRef right, SourceSpan loc) { - if (compiler.build.feature.trap_on_wrap && type->type_kind != TYPE_VECTOR) + if (compiler.build.feature.trap_on_wrap && !type_kind_is_real_vector(type->type_kind)) { LLVMTypeRef type_to_use = llvm_get_type(c, type); LLVMValueRef args[2] = { left, right }; @@ -3452,7 +3406,7 @@ static void llvm_emit_slice_comp(GenContext *c, BEValue *be_value, BEValue *lhs, Type *array_base_type = type_lowering(lhs->type->array.base); Type *array_base_pointer = type_get_ptr(array_base_type); - LLVMTypeRef llvm_base_type = llvm_get_type(c, array_base_type); + ByteSize array_base_size = type_size(array_base_type); LLVMBasicBlockRef exit = llvm_basic_block_new(c, "slice_cmp_exit"); LLVMBasicBlockRef value_cmp = llvm_basic_block_new(c, "slice_cmp_values"); @@ -3477,7 +3431,7 @@ static void llvm_emit_slice_comp(GenContext *c, BEValue *be_value, BEValue *lhs, llvm_emit_block(c, value_cmp); BEValue index_var; - llvm_value_set_address_abi_aligned(c, &index_var, llvm_emit_alloca_aligned(c, type_isz, "cmp.idx"), type_isz); + llvm_value_set_alloca(c, &index_var, type_isz, type_alloca_alignment(type_isz), "cmp.idx"); LLVMValueRef one = llvm_const_int(c, type_isz, 1); llvm_store_raw(c, &index_var, llvm_get_zero(c, type_isz)); llvm_emit_br(c, loop_begin); @@ -3496,16 +3450,15 @@ static void llvm_emit_slice_comp(GenContext *c, BEValue *be_value, BEValue *lhs, llvm_value_set_address_abi_aligned(c, &lhs_to_compare, llvm_emit_pointer_inbounds_gep_raw(c, - llvm_base_type, lhs_value.value, - current_index.value), array_base_type); + current_index.value, array_base_size), array_base_type); llvm_value_set_address_abi_aligned(c, &rhs_to_compare, llvm_emit_pointer_inbounds_gep_raw(c, - llvm_base_type, rhs_value.value, - current_index.value), array_base_type); + current_index.value, array_base_size), array_base_type); llvm_emit_comp(c, &cmp, &lhs_to_compare, &rhs_to_compare, BINARYOP_EQ); + LLVMBasicBlockRef match_fail_block = c->current_block; llvm_store_raw(c, &index_var, LLVMBuildAdd(c->builder, current_index.value, one, "")); llvm_emit_cond_br(c, &cmp, loop_begin, exit); @@ -3621,8 +3574,8 @@ MEMCMP: llvm_emit_memcmp(c, be_value, lhs->value, rhs->value, llvm_const_int(c, type_usz, type_size(lhs->type))); llvm_emit_int_comp_zero(c, be_value, be_value, binary_op); return; - case TYPE_VECTOR: - if (is_power_of_two(array_base->array.len)) goto MEMCMP; + case VECTORS: + if (is_power_of_two(array_base->array.len) && !type_flat_is_floatlike(array_base->array.base)) goto MEMCMP; break; case TYPE_UNION: case TYPE_STRUCT: @@ -3654,7 +3607,6 @@ MEMCMP: bool want_match = binary_op == BINARYOP_EQ; ArraySize len = lhs->type->array.len; Type *array_base_type = type_lowering(array_base); - LLVMTypeRef array_type = llvm_get_type(c, lhs->type); if (should_inline_array_comp(len, array_base_type)) { if (array_base_type == type_bool) @@ -3679,14 +3631,8 @@ MEMCMP: for (unsigned i = 0; i < len; i++) { value_block[i] = failure; - AlignSize align_lhs; - BEValue lhs_v; - LLVMValueRef lhs_ptr = llvm_emit_array_gep_raw(c, lhs->value, array_type, i, lhs->alignment, &align_lhs); - llvm_value_set_address(c, &lhs_v, lhs_ptr, array_base_type, align_lhs); - AlignSize align_rhs; - BEValue rhs_v; - LLVMValueRef rhs_ptr = llvm_emit_array_gep_raw(c, rhs->value, array_type, i, rhs->alignment, &align_rhs); - llvm_value_set_address(c, &rhs_v, rhs_ptr, array_base_type, align_rhs); + BEValue lhs_v = llvm_emit_array_gep(c, lhs, i); + BEValue rhs_v = llvm_emit_array_gep(c, rhs, i); BEValue comp; llvm_emit_comp(c, &comp, &lhs_v, &rhs_v, BINARYOP_EQ); blocks[i] = c->current_block; @@ -3711,23 +3657,16 @@ MEMCMP: LLVMValueRef len_val = llvm_const_int(c, type_isz, len); LLVMValueRef one = llvm_const_int(c, type_isz, 1); - BEValue index_var; - llvm_value_set_address_abi_aligned(c, &index_var, llvm_emit_alloca_aligned(c, type_isz, "cmp.idx"), type_isz); + BEValue index_var = llvm_emit_alloca_b(c, type_isz, "cmp.idx"); llvm_store_raw(c, &index_var, llvm_get_zero(c, type_isz)); llvm_emit_br(c, loop_begin); llvm_emit_block(c, loop_begin); - AlignSize align_lhs; - BEValue lhs_v; BEValue index_copy = index_var; llvm_value_rvalue(c, &index_copy); - LLVMValueRef lhs_ptr = llvm_emit_array_gep_raw_index(c, lhs->value, array_type, &index_copy, lhs->alignment, &align_lhs); - llvm_value_set_address(c, &lhs_v, lhs_ptr, array_base_type, align_lhs); - AlignSize align_rhs; - BEValue rhs_v; - LLVMValueRef rhs_ptr = llvm_emit_array_gep_raw_index(c, rhs->value, array_type, &index_copy, rhs->alignment, &align_rhs); - llvm_value_set_address(c, &rhs_v, rhs_ptr, array_base_type, align_rhs); + BEValue lhs_v = llvm_emit_array_gep_index(c, lhs, &index_copy); + BEValue rhs_v = llvm_emit_array_gep_index(c, rhs, &index_copy); BEValue comp; llvm_emit_comp(c, &comp, &lhs_v, &rhs_v, BINARYOP_EQ); LLVMBasicBlockRef loop_begin_phi = c->current_block; @@ -3853,7 +3792,7 @@ void llvm_emit_comp(GenContext *c, BEValue *result, BEValue *lhs, BEValue *rhs, case TYPE_SLICE: llvm_emit_slice_comp(c, result, lhs, rhs, binary_op); return; - case TYPE_VECTOR: + case VECTORS: if (type_is_float(type_vector_type(lhs->type))) { llvm_emit_float_comp(c, result, lhs, rhs, binary_op, lhs->type); @@ -4159,9 +4098,9 @@ void llvm_emit_binary(GenContext *c, BEValue *be_value, Expr *expr, BEValue *lhs llvm_emit_vec_comp(c, be_value, &lhs, &rhs, binary_op, expr->type); return; } - Type *lhs_type = lhs.type; - Type *rhs_type = rhs.type; - Type *vector_type = lhs_type->type_kind == TYPE_VECTOR ? lhs_type->array.base : NULL; + LoweredType *lhs_type = lhs.type; + LoweredType *rhs_type = rhs.type; + Type *vector_type = type_kind_is_real_vector(lhs_type->type_kind) ? lhs_type->array.base : NULL; bool is_float = type_is_float(lhs_type) || (vector_type && type_is_float(vector_type)); LLVMValueRef val = NULL; LLVMValueRef lhs_value = lhs.value; @@ -4183,7 +4122,7 @@ void llvm_emit_binary(GenContext *c, BEValue *be_value, Expr *expr, BEValue *lhs { Type *element_type = lhs_type->array.base->pointer; unsigned len = LLVMGetVectorSize(LLVMTypeOf(lhs_value)); - LLVMTypeRef int_vec_type = llvm_get_type(c, type_get_vector(type_isz, len)); + LLVMTypeRef int_vec_type = llvm_get_type(c, type_get_vector_from_vector(type_isz, lhs_type)); if (lhs_type == rhs_type) { val = LLVMBuildSub(c->builder, LLVMBuildPtrToInt(c->builder, lhs_value, int_vec_type, ""), @@ -4193,7 +4132,7 @@ void llvm_emit_binary(GenContext *c, BEValue *be_value, Expr *expr, BEValue *lhs break; } rhs_value = LLVMBuildNeg(c->builder, rhs_value, ""); - val = llvm_emit_pointer_gep_raw(c, llvm_get_type(c, element_type), lhs_value, rhs_value); + val = llvm_emit_pointer_gep_raw(c, lhs_value, rhs_value, type_size(element_type)); break; } if (lhs_type->type_kind == TYPE_POINTER) @@ -4207,7 +4146,7 @@ void llvm_emit_binary(GenContext *c, BEValue *be_value, Expr *expr, BEValue *lhs break; } rhs_value = LLVMBuildNeg(c->builder, rhs_value, ""); - val = llvm_emit_pointer_gep_raw(c, llvm_get_pointee_type(c, lhs_type), lhs_value, rhs_value); + val = llvm_emit_pointer_gep_raw(c, lhs_value, rhs_value, type_size(lhs_type->pointer)); break; } if (is_float) @@ -4221,13 +4160,13 @@ void llvm_emit_binary(GenContext *c, BEValue *be_value, Expr *expr, BEValue *lhs if (type_is_pointer_vector(lhs_type)) { Type *element_type = lhs_type->array.base->pointer; - val = llvm_emit_pointer_gep_raw(c, llvm_get_type(c, element_type), lhs_value, rhs_value); + val = llvm_emit_pointer_gep_raw(c, lhs_value, rhs_value, type_size(element_type)); break; } if (lhs_type->type_kind == TYPE_POINTER) { ASSERT(type_is_integer(rhs_type)); - val = llvm_emit_pointer_gep_raw(c, llvm_get_pointee_type(c, lhs_type), lhs_value, rhs_value); + val = llvm_emit_pointer_gep_raw(c, lhs_value, rhs_value, type_size(lhs_type->pointer)); break; } if (is_float) @@ -4423,9 +4362,9 @@ static inline void llvm_emit_rethrow_expr(GenContext *c, BEValue *be_value, Expr LLVMBasicBlockRef no_err_block = llvm_basic_block_new(c, "noerr_block"); // Set the catch/error var - LLVMValueRef error_var = llvm_emit_alloca_aligned(c, type_fault, "error_var"); + BEValue error_var_ref = llvm_emit_alloca_b(c, type_fault, "error_var"); - PUSH_CATCH_VAR_BLOCK(error_var, guard_block); + PUSH_CATCH_VAR_BLOCK(error_var_ref.value, guard_block); llvm_emit_expr(c, be_value, expr->rethrow_expr.inner); @@ -4444,23 +4383,21 @@ static inline void llvm_emit_rethrow_expr(GenContext *c, BEValue *be_value, Expr // Ensure we are on a branch that is non-empty. if (llvm_emit_check_block_branch(c)) { - PUSH_DEFER_ERROR(error_var); + PUSH_DEFER_ERROR(error_var_ref.value); llvm_emit_statement_chain(c, expr->rethrow_expr.cleanup); POP_DEFER_ERROR(); - BEValue value; - llvm_value_set_address_abi_aligned(c, &value, error_var, type_fault); if (expr->rethrow_expr.in_block) { BlockExit *exit = *expr->rethrow_expr.in_block; if (exit->block_error_var) { - llvm_store_to_ptr(c, exit->block_error_var, &value); + llvm_store_to_ptr(c, exit->block_error_var, &error_var_ref); } llvm_emit_br(c, exit->block_optional_exit); } else { - llvm_emit_return_abi(c, NULL, &value); + llvm_emit_return_abi(c, NULL, &error_var_ref); } } @@ -4478,9 +4415,9 @@ static inline void llvm_emit_force_unwrap_expr(GenContext *c, BEValue *be_value, LLVMBasicBlockRef no_err_block = llvm_basic_block_new(c, "noerr_block"); // Set the catch/error var - LLVMValueRef error_var = llvm_emit_alloca_aligned(c, type_fault, "error_var"); + BEValue error_var_ref = llvm_emit_alloca_b(c, type_fault, "error_var"); - PUSH_CATCH_VAR_BLOCK(error_var, panic_block); + PUSH_CATCH_VAR_BLOCK(error_var_ref.value, panic_block); llvm_emit_expr(c, be_value, expr->inner_expr); llvm_value_fold_optional(c, be_value); @@ -4502,10 +4439,8 @@ static inline void llvm_emit_force_unwrap_expr(GenContext *c, BEValue *be_value, // TODO, we should add info about the error. SourceSpan loc = expr->span; BEValue *varargs = NULL; - BEValue fault_arg; - llvm_value_set_address(c, &fault_arg, error_var, type_fault, type_abi_alignment(type_fault)); - llvm_emit_any_from_value(c, &fault_arg, type_fault); - vec_add(varargs, fault_arg); + llvm_emit_any_from_value(c, &error_var_ref, type_fault); + vec_add(varargs, error_var_ref); llvm_emit_panic(c, "Force unwrap failed!", loc, "Unexpected fault '%s' was unwrapped!", varargs); } llvm_emit_block(c, no_err_block); @@ -4749,7 +4684,7 @@ static inline void llvm_emit_const_initializer_list_expr(GenContext *c, BEValue llvm_value_set(value, llvm_emit_const_initializer(c, expr->const_expr.initializer, false), expr->type); return; } - llvm_value_set_address_abi_aligned(c, value, llvm_emit_alloca_aligned(c, expr->type, "literal"), expr->type); + *value = llvm_emit_alloca_b(c, expr->type, "literal"); llvm_emit_const_initialize_reference(c, value, expr); } @@ -4798,7 +4733,7 @@ static void llvm_emit_const_expr(GenContext *c, BEValue *be_value, Expr *expr) llvm_value_set(be_value, llvm_get_zero(c, type), type); return; } - else + else // NOLINT { ConstInitializer *init = expr->const_expr.slice_init; if (llvm_is_global_eval(c) || type_flat_is_vector(expr->type) || type_flatten(expr->type)->type_kind == TYPE_BITSTRUCT) @@ -4817,9 +4752,9 @@ static void llvm_emit_const_expr(GenContext *c, BEValue *be_value, Expr *expr) else { ASSERT(type_is_arraylike(init->type)); - llvm_value_set_address_abi_aligned(c, be_value, llvm_emit_alloca_aligned(c, init->type, "literal"), init->type); - llvm_emit_const_init_ref(c, be_value, init, true); - LLVMValueRef val = llvm_emit_aggregate_two(c, type, be_value->value, llvm_const_int(c, type_usz, init->type->array.len)); + BEValue literal = llvm_emit_alloca_b(c, init->type, "literal"); + llvm_emit_const_init_ref(c, &literal, init, true); + LLVMValueRef val = llvm_emit_aggregate_two(c, type, literal.value, llvm_const_int(c, type_usz, init->type->array.len)); llvm_value_set(be_value, val, type); } } @@ -4843,7 +4778,7 @@ static void llvm_emit_const_expr(GenContext *c, BEValue *be_value, Expr *expr) case CONST_STRING: { Type *str_type = type_lowering(expr->type); - bool is_array = str_type->type_kind == TYPE_ARRAY || (str_type->type_kind == TYPE_VECTOR && type_size(type->array.base) == 1); + bool is_array = str_type->type_kind == TYPE_ARRAY || (type_kind_is_real_vector(str_type->type_kind) && type_size(type->array.base) == 1); if (is_array && llvm_is_global_eval(c)) { // In the global alloc case, create the byte array. @@ -4940,11 +4875,11 @@ static void llvm_emit_const_expr(GenContext *c, BEValue *be_value, Expr *expr) static void llvm_expand_array_to_args(GenContext *c, Type *param_type, LLVMValueRef expand_ptr, LLVMValueRef *args, unsigned *arg_count_ref, AlignSize alignment) { - LLVMTypeRef array_type = llvm_get_type(c, param_type); + Type *element = param_type->array.base; for (ByteSize i = 0; i < param_type->array.len; i++) { AlignSize load_align; - LLVMValueRef element_ptr = llvm_emit_array_gep_raw(c, expand_ptr, array_type, (unsigned)i, alignment, &load_align); + LLVMValueRef element_ptr = llvm_emit_array_gep_raw(c, expand_ptr, element, (unsigned)i, alignment, &load_align); llvm_expand_type_to_args(c, param_type->array.base, element_ptr, args, arg_count_ref, load_align); } } @@ -4992,9 +4927,11 @@ static void llvm_expand_type_to_args(GenContext *context, Type *param_type, LLVM case TYPE_ARRAY: llvm_expand_array_to_args(context, param_type, expand_ptr, args, arg_count_ref, alignment); break; + case TYPE_VECTOR: + UNREACHABLE_VOID; case TYPE_UNION: case TYPE_SLICE: - case TYPE_VECTOR: + case TYPE_SIMD_VECTOR: case TYPE_ANY: TODO break; @@ -5027,26 +4964,42 @@ LLVMValueRef llvm_emit_struct_gep_raw(GenContext *c, LLVMValueRef ptr, LLVMTypeR return llvm_emit_const_ptradd_inbounds_raw(c, ptr, offset); } +BEValue llvm_emit_array_gep_index(GenContext *c, BEValue *parent, BEValue *index) +{ + ASSERT(llvm_value_is_addr(parent)); + Type *element = type_lowering(parent->type->array.base); + AlignSize alignment; + LLVMValueRef ptr = llvm_emit_array_gep_raw_index(c, parent->value, element, index, parent->alignment, &alignment); + return (BEValue) { .value = ptr, .type = element, .kind = BE_ADDRESS, .alignment = alignment }; +} -LLVMValueRef llvm_emit_array_gep_raw_index(GenContext *c, LLVMValueRef ptr, LLVMTypeRef array_type, BEValue *index, AlignSize array_alignment, AlignSize *alignment) +LLVMValueRef llvm_emit_array_gep_raw_index(GenContext *c, LLVMValueRef ptr, Type *element_type, BEValue *index, AlignSize array_alignment, AlignSize *alignment) { LLVMValueRef index_val = llvm_load_value(c, index); - LLVMTypeRef element_type = LLVMGetElementType(array_type); Type *index_type = index->type; ASSERT(type_is_integer(index_type)); if (type_is_unsigned(index_type) && type_size(index_type) < type_size(type_usz)) { index_val = llvm_zext_trunc(c, index_val, llvm_get_type(c, type_usz)); } - *alignment = type_min_alignment(llvm_abi_size(c, element_type), array_alignment); - return llvm_emit_pointer_inbounds_gep_raw(c, element_type, ptr, index_val); + ByteSize size = type_size(element_type); + *alignment = type_min_alignment(size, array_alignment); + return llvm_emit_pointer_inbounds_gep_raw(c, ptr, index_val, size); } -LLVMValueRef llvm_emit_array_gep_raw(GenContext *c, LLVMValueRef ptr, LLVMTypeRef array_type, unsigned index, AlignSize array_alignment, AlignSize *alignment) +BEValue llvm_emit_array_gep(GenContext *c, BEValue *parent, ArrayIndex index) +{ + ASSERT(llvm_value_is_addr(parent)); + BEValue index_value; + llvm_value_set(&index_value, llvm_const_int(c, type_usz, index), type_usz); + return llvm_emit_array_gep_index(c, parent, &index_value); +} + +LLVMValueRef llvm_emit_array_gep_raw(GenContext *c, LLVMValueRef ptr, Type *element_type, unsigned index, AlignSize array_alignment, AlignSize *alignment) { BEValue index_value; llvm_value_set(&index_value, llvm_const_int(c, type_usz, index), type_usz); - return llvm_emit_array_gep_raw_index(c, ptr, array_type, &index_value, array_alignment, alignment); + return llvm_emit_array_gep_raw_index(c, ptr, element_type, &index_value, array_alignment, alignment); } LLVMValueRef llvm_emit_ptradd_raw(GenContext *c, LLVMValueRef ptr, LLVMValueRef offset, ByteSize mult) @@ -5092,9 +5045,8 @@ LLVMValueRef llvm_emit_const_vector(LLVMValueRef value, ArraySize len) } -LLVMValueRef llvm_ptr_mult(GenContext *c, LLVMValueRef offset, LLVMTypeRef pointee_type) +static LLVMValueRef llvm_ptr_mult(GenContext *c, LLVMValueRef offset, ByteSize size) { - ByteSize size = llvm_abi_size(c, pointee_type); if (size == 1) return offset; LLVMTypeRef offset_type = LLVMTypeOf(offset); @@ -5109,22 +5061,23 @@ LLVMValueRef llvm_ptr_mult(GenContext *c, LLVMValueRef offset, LLVMTypeRef point } return LLVMBuildMul(c->builder, offset, mult, ""); } -LLVMValueRef llvm_emit_pointer_gep_raw(GenContext *c, LLVMTypeRef pointee_type, LLVMValueRef ptr, LLVMValueRef offset) +LLVMValueRef llvm_emit_pointer_gep_raw(GenContext *c, LLVMValueRef ptr, LLVMValueRef offset, ByteSize element_size) { if (LLVMIsConstant(offset)) { - return llvm_emit_ptradd_raw(c, ptr, llvm_ptr_mult(c, offset, pointee_type), 1); + return llvm_emit_ptradd_raw(c, ptr, llvm_ptr_mult(c, offset, element_size), 1); } - return llvm_emit_ptradd_raw(c, ptr, offset, llvm_abi_size(c, pointee_type)); + return llvm_emit_ptradd_raw(c, ptr, offset, element_size); } -LLVMValueRef llvm_emit_pointer_inbounds_gep_raw(GenContext *c, LLVMTypeRef pointee_type, LLVMValueRef ptr, LLVMValueRef offset) + +LLVMValueRef llvm_emit_pointer_inbounds_gep_raw(GenContext *c, LLVMValueRef ptr, LLVMValueRef offset, ByteSize size) { if (LLVMIsConstant(offset)) { - return llvm_emit_ptradd_inbounds_raw(c, ptr, llvm_ptr_mult(c, offset, pointee_type), 1); + return llvm_emit_ptradd_inbounds_raw(c, ptr, llvm_ptr_mult(c, offset, size), 1); } - return llvm_emit_ptradd_inbounds_raw(c, ptr, offset, llvm_abi_size(c, pointee_type)); + return llvm_emit_ptradd_inbounds_raw(c, ptr, offset, size); } LLVMValueRef llvm_emit_const_ptradd_inbounds_raw(GenContext *c, LLVMValueRef ptr, ByteSize offset) @@ -5162,15 +5115,15 @@ NEXT:; llvm_value_set(pointer, ptr, ptr_type); } -static void llvm_emit_any_pointer(GenContext *c, BEValue *value, BEValue *pointer) +static void llvm_emit_any_pointer(GenContext *c, BEValue *any, BEValue *pointer) { - llvm_value_fold_optional(c, value); - if (value->kind == BE_ADDRESS) + llvm_value_fold_optional(c, any); + if (any->kind == BE_ADDRESS) { - llvm_emit_struct_gep_ref(c, value, pointer, type_voidptr, 0); + llvm_emit_struct_gep_ref(c, any, pointer, type_voidptr, 0); return; } - LLVMValueRef ptr = llvm_emit_extract_value(c, value->value, 0); + LLVMValueRef ptr = llvm_emit_extract_value(c, any->value, 0); llvm_value_set(pointer, ptr, type_voidptr); } @@ -5222,9 +5175,9 @@ void llvm_emit_parameter(GenContext *c, LLVMValueRef *args, unsigned *arg_count_ args[(*arg_count_ref)++] = be_value->value; return; } - LLVMValueRef indirect = llvm_emit_alloca(c, llvm_get_type(c, type), info->indirect.alignment, "indirectarg"); - llvm_store_to_ptr_aligned(c, indirect, be_value, info->indirect.alignment); - args[(*arg_count_ref)++] = indirect; + BEValue indirect = llvm_emit_alloca_b_realign(c, type, info->indirect.alignment, "indirectarg"); + llvm_store(c, &indirect, be_value); + args[(*arg_count_ref)++] = indirect.value; return; } case ABI_ARG_DIRECT: @@ -5349,7 +5302,7 @@ void llvm_emit_parameter(GenContext *c, LLVMValueRef *args, unsigned *arg_count_ // Move this to an address (if needed) llvm_value_addr(c, be_value); llvm_expand_type_to_args(c, type, be_value->value, args, arg_count_ref, be_value->alignment); - return; + return; // NOLINT } } } @@ -5475,8 +5428,8 @@ void llvm_emit_raw_call(GenContext *c, BEValue *result_value, FunctionPrototype // 15a. Create memory to hold the return type. // COERCE UPDATE bitcast removed, check for ways to optimize - LLVMValueRef addr = llvm_emit_alloca_aligned(c, call_return_type, ""); - llvm_value_set_address_abi_aligned(c, result_value, addr, call_return_type); + *result_value = llvm_emit_alloca_b(c, call_return_type, ""); + LLVMValueRef addr = result_value->value; // Store lower AlignSize align = result_value->alignment; @@ -5691,8 +5644,8 @@ static LLVMValueRef llvm_emit_dynamic_search(GenContext *c, LLVMValueRef type_id LLVMDisposeBuilder(builder); } // Insert cache. - LLVMValueRef cache_fn_ptr = llvm_emit_alloca_aligned(c, type_voidptr, ".inlinecache"); - LLVMValueRef cache_type_id_ptr = llvm_emit_alloca_aligned(c, type_voidptr, ".cachedtype"); + BEValue cache_fn_ptr = llvm_emit_alloca_b(c, type_voidptr, ".inlinecache"); + BEValue cache_type_id_ptr = llvm_emit_alloca_b(c, type_voidptr, ".cachedtype"); LLVMBasicBlockRef current_block = LLVMGetInsertBlock(c->builder); LLVMValueRef next_after_alloca = LLVMGetNextInstruction(c->alloca_point); if (next_after_alloca) @@ -5703,23 +5656,23 @@ static LLVMValueRef llvm_emit_dynamic_search(GenContext *c, LLVMValueRef type_id { LLVMPositionBuilderAtEnd(c->builder, LLVMGetInstructionParent(c->alloca_point)); } - llvm_store_to_ptr_zero(c, cache_type_id_ptr, type_voidptr); + llvm_store_zero(c, &cache_type_id_ptr); LLVMPositionBuilderAtEnd(c->builder, current_block); LLVMBasicBlockRef cache_miss = llvm_basic_block_new(c, "cache_miss"); LLVMBasicBlockRef cache_hit = llvm_basic_block_new(c, "cache_hit"); LLVMBasicBlockRef exit = llvm_basic_block_new(c, ""); - LLVMValueRef cached_type_id = llvm_load_abi_alignment(c, type_voidptr, cache_type_id_ptr, "type"); + LLVMValueRef cached_type_id = llvm_load_value(c, &cache_type_id_ptr); LLVMValueRef compare = LLVMBuildICmp(c->builder, LLVMIntEQ, type_id_ptr, cached_type_id, ""); llvm_emit_cond_br_raw(c, compare, cache_hit, cache_miss); llvm_emit_block(c, cache_miss); LLVMValueRef params[2] = { type_id_ptr, selector }; LLVMValueRef call = LLVMBuildCall2(c->builder, type, func, params, 2, ""); // Store in cache. - llvm_store_to_ptr_raw(c, cache_fn_ptr, call, type_voidptr); - llvm_store_to_ptr_raw(c, cache_type_id_ptr, type_id_ptr, type_voidptr); + llvm_store_raw(c, &cache_fn_ptr, call); + llvm_store_raw(c, &cache_type_id_ptr, type_id_ptr); llvm_emit_br(c, exit); llvm_emit_block(c, cache_hit); - LLVMValueRef cached_val = llvm_load_abi_alignment(c, type_voidptr, cache_fn_ptr, "cache_hit_fn"); + LLVMValueRef cached_val = llvm_load_value(c, &cache_fn_ptr); llvm_emit_br(c, exit); llvm_emit_block(c, exit); LLVMValueRef phi = LLVMBuildPhi(c->builder, c->ptr_type, "fn_phi"); @@ -5790,10 +5743,7 @@ INLINE void llvm_emit_call_invocation(GenContext *c, BEValue *result_value, sret_return = true; break; } - llvm_value_set_address(c, - result_value, - llvm_emit_alloca(c, llvm_get_type(c, call_return_type), alignment, "sretparam"), - call_return_type, alignment); + *result_value = llvm_emit_alloca_b_realign(c, call_return_type, alignment, "sretparam"); // 6c. Add the pointer to the list of arguments. arg_values[arg_count++] = result_value->value; @@ -5820,7 +5770,8 @@ INLINE void llvm_emit_call_invocation(GenContext *c, BEValue *result_value, // 7b. Create the address to hold the return. Type *actual_return_type_ptr = abi_args[0]->original_type; Type *actual_return_type = actual_return_type_ptr->pointer; - llvm_value_set(&synthetic_return_param, llvm_emit_alloca_aligned(c, actual_return_type, "retparam"), actual_return_type_ptr); + BEValue retparam = llvm_emit_alloca_b(c, actual_return_type, "retparam"); + llvm_value_set(&synthetic_return_param, retparam.value, actual_return_type_ptr); // 7c. Emit it as a parameter as a pointer (will implicitly add it to the value list) llvm_emit_parameter(c, arg_values, &arg_count, abi_args[0], &synthetic_return_param); // 7d. Update the be_value to actually be an address. @@ -5885,23 +5836,15 @@ INLINE void llvm_emit_varargs_expr(GenContext *c, BEValue *value_ref, Expr **var Type *pointee_type = param->array.base; unsigned elements = vec_size(varargs); Type *array = type_get_array(pointee_type, elements); - LLVMTypeRef llvm_array_type = llvm_get_type(c, array); - AlignSize alignment = type_alloca_alignment(array); - LLVMValueRef array_ref = llvm_emit_alloca(c, llvm_array_type, alignment, varargslots_name); + BEValue array_ref = llvm_emit_alloca_b(c, array, varargslots_name); FOREACH_IDX(foreach_index, Expr *, val, varargs) { llvm_emit_expr(c, &inner_temp, val); llvm_value_fold_optional(c, &inner_temp); - AlignSize store_alignment; - LLVMValueRef slot = llvm_emit_array_gep_raw(c, - array_ref, - llvm_array_type, - foreach_index, - alignment, - &store_alignment); - llvm_store_to_ptr_aligned(c, slot, &inner_temp, store_alignment); + BEValue slot = llvm_emit_array_gep(c, &array_ref, foreach_index); + llvm_store(c, &slot, &inner_temp); } - llvm_value_aggregate_two(c, value_ref, param, array_ref, llvm_const_int(c, type_usz, elements)); + llvm_value_aggregate_two(c, value_ref, param, array_ref.value, llvm_const_int(c, type_usz, elements)); LLVMSetValueName2(value_ref->value, temp_name, 6); } @@ -6184,7 +6127,7 @@ static inline void llvm_emit_return_block(GenContext *c, BEValue *be_value, Type if (type_lowered != type_void) { - exit.block_return_out = llvm_emit_alloca_aligned(c, type_lowered, "blockret"); + exit.block_return_out = llvm_emit_alloca_b(c, type_lowered, "blockret").value; } c->catch.fault = NULL; c->catch.block = NULL; @@ -6464,7 +6407,7 @@ static inline void llvm_emit_initializer_list_expr(GenContext *c, BEValue *value return; } ASSERT(!IS_OPTIONAL(expr) || c->catch.block); - llvm_value_set_address_abi_aligned(c, value, llvm_emit_alloca_aligned(c, type, "literal"), type); + *value = llvm_emit_alloca_b(c, type, "literal"); llvm_emit_initialize_reference(c, value, expr); } @@ -6548,8 +6491,7 @@ void llvm_emit_catch_unwrap(GenContext *c, BEValue *value, Expr *expr) } else { - LLVMValueRef temp_err = llvm_emit_alloca_aligned(c, type_fault, "temp_err"); - llvm_value_set_address_abi_aligned(c, &addr, temp_err, type_fault); + addr = llvm_emit_alloca_b(c, type_fault, "temp_err"); } LLVMBasicBlockRef catch_block = llvm_basic_block_new(c, "end_block"); @@ -6731,28 +6673,25 @@ void llvm_emit_try_unwrap_chain(GenContext *c, BEValue *value, Expr *expr) ASSERT(llvm_value_is_bool(value)); return; } - else + for (unsigned i = 0; i < elements; i++) { - for (unsigned i = 0; i < elements; i++) + if (next_block) { - if (next_block) - { - llvm_emit_br(c, next_block); - llvm_emit_block(c, next_block); - } - next_block = llvm_basic_block_new(c, "chain_next"); - Expr *link = exprs[i]; - BEValue res; - llvm_emit_expr(c, &res, link); - llvm_value_rvalue(c, &res); - ASSERT(llvm_value_is_bool(&res)); - llvm_emit_cond_br(c, &res, next_block, fail_block); + llvm_emit_br(c, next_block); + llvm_emit_block(c, next_block); } - llvm_emit_block(c, next_block); - llvm_emit_br(c, end_block); - llvm_emit_block(c, fail_block); - llvm_emit_br(c, end_block); + next_block = llvm_basic_block_new(c, "chain_next"); + Expr *link = exprs[i]; + BEValue res; + llvm_emit_expr(c, &res, link); + llvm_value_rvalue(c, &res); + ASSERT(llvm_value_is_bool(&res)); + llvm_emit_cond_br(c, &res, next_block, fail_block); } + llvm_emit_block(c, next_block); + llvm_emit_br(c, end_block); + llvm_emit_block(c, fail_block); + llvm_emit_br(c, end_block); // Finally set up our phi llvm_emit_block(c, end_block); @@ -6798,9 +6737,8 @@ static inline void llvm_emit_builtin_access(GenContext *c, BEValue *be_value, Ex (void)inner_type; ASSERT(inner_type->type_kind == TYPE_ANYFAULT); llvm_value_rvalue(c, be_value); - LLVMValueRef val = llvm_emit_alloca_aligned(c, type_chars, "faultname_zero"); - BEValue zero; - llvm_value_set_address_abi_aligned(c, &zero, val, type_chars); + + BEValue zero = llvm_emit_alloca_b(c, type_chars, "faultname_zero"); LLVMBasicBlockRef exit_block = llvm_basic_block_new(c, "faultname_exit"); LLVMBasicBlockRef zero_block = llvm_basic_block_new(c, "faultname_no"); LLVMBasicBlockRef ok_block = llvm_basic_block_new(c, "faultname_ok"); @@ -6824,13 +6762,12 @@ static inline void llvm_emit_builtin_access(GenContext *c, BEValue *be_value, Ex Type *inner_type = type_no_optional(inner->type)->canonical; ASSERT(inner_type->canonical->type_kind == TYPE_ENUM); llvm_value_rvalue(c, be_value); - LLVMTypeRef slice = llvm_get_type(c, type_chars); LLVMValueRef to_introspect = LLVMBuildIntToPtr(c->builder, llvm_get_typeid(c, inner_type), c->ptr_type, ""); LLVMValueRef ptr = LLVMBuildStructGEP2(c->builder, c->introspect_type, to_introspect, INTROSPECT_INDEX_ADDITIONAL, ""); LLVMValueRef val = llvm_zext_trunc(c, be_value->value, c->size_type); llvm_value_set_address(c, be_value, - llvm_emit_pointer_gep_raw(c, slice, ptr, val), type_chars, llvm_abi_alignment(c, slice)); + llvm_emit_pointer_gep_raw(c, ptr, val, type_size(type_chars)), type_chars, type_abi_alignment(type_chars)); return; } case ACCESS_TYPEOFANYFAULT: @@ -7149,10 +7086,9 @@ void llvm_emit_slice_to_vec_array(GenContext *c, BEValue *value, Expr *expr) llvm_value_rvalue(c, &pointer); Type *to_type = type_lowering(expr->type); LLVMTypeRef type = llvm_get_type(c, to_type); - AlignSize alignment = llvm_abi_alignment(c, type); - LLVMValueRef temp = llvm_emit_alloca(c, type, alignment, ".temp"); - llvm_emit_memcpy(c, temp, alignment, pointer.value, element_alignment, llvm_abi_size(c, type)); - llvm_value_set_address(c, value, temp, to_type, alignment); + BEValue temp = llvm_emit_alloca_b(c, to_type, ".temp"); + llvm_emit_memcpy(c, temp.value, temp.alignment, pointer.value, element_alignment, llvm_abi_size(c, type)); + *value = temp; } static inline void llvm_emit_make_slice(GenContext *c, BEValue *value, Expr *expr) diff --git a/src/compiler/llvm_codegen_function.c b/src/compiler/llvm_codegen_function.c index 0a1e852a8..bace67f7b 100644 --- a/src/compiler/llvm_codegen_function.c +++ b/src/compiler/llvm_codegen_function.c @@ -77,13 +77,12 @@ static void llvm_expand_from_args(GenContext *c, Type *type, LLVMValueRef ref, u switch (type->type_kind) { case TYPE_ARRAY: - case TYPE_VECTOR: + case VECTORS: { - LLVMTypeRef array_type = llvm_get_type(c, type); for (unsigned i = 0; i < type->array.len; i++) { AlignSize element_align; - LLVMValueRef target = llvm_emit_array_gep_raw(c, ref, array_type, i, alignment, &element_align); + LLVMValueRef target = llvm_emit_array_gep_raw(c, ref, type->array.base, i, alignment, &element_align); llvm_expand_from_args(c, type->array.base, target, index, element_align); } break; @@ -164,7 +163,7 @@ static inline void llvm_process_parameter_value_inner(GenContext *c, Decl *decl, llvm_store_to_ptr_raw_aligned(c, addr, llvm_get_next_param(c, index), decl_alignment); // Calculate the address - addr = llvm_emit_pointer_inbounds_gep_raw(c, hi, addr, llvm_const_int(c, type_usz, hi_offset / hi_aligned_size)); + addr = llvm_emit_pointer_inbounds_gep_raw(c, addr, llvm_const_int(c, type_usz, hi_offset / hi_aligned_size), llvm_abi_size(c, hi)); // Store it in the hi location llvm_store_to_ptr_raw_aligned(c, addr, llvm_get_next_param(c, index), type_min_alignment(decl_alignment, hi_offset)); diff --git a/src/compiler/llvm_codegen_internal.h b/src/compiler/llvm_codegen_internal.h index 12f20b9df..11b0f4ee9 100644 --- a/src/compiler/llvm_codegen_internal.h +++ b/src/compiler/llvm_codegen_internal.h @@ -335,6 +335,7 @@ void llvm_value_deref(GenContext *c, BEValue *value); void llvm_value_set(BEValue *value, LLVMValueRef llvm_value, Type *type); void llvm_value_set_int(GenContext *c, BEValue *value, Type *type, uint64_t i); void llvm_value_set_address(GenContext *c, BEValue *value, LLVMValueRef llvm_value, Type *type, AlignSize alignment); +void llvm_value_set_alloca(GenContext *c, BEValue *value, Type *type, AlignSize align, const char *name); void llvm_value_set_address_abi_aligned(GenContext *c, BEValue *value, LLVMValueRef llvm_value, Type *type); void llvm_value_set_decl_address(GenContext *c, BEValue *value, Decl *decl); void llvm_value_set_decl(GenContext *c, BEValue *value, Decl *decl); @@ -388,8 +389,9 @@ void llvm_add_global_decl(GenContext *c, Decl *decl); void llvm_emit_global_variable_init(GenContext *c, Decl *decl); // -- Alloca -- +BEValue llvm_emit_alloca_b(GenContext *c, Type *type, const char *name); +BEValue llvm_emit_alloca_b_realign(GenContext *c, Type *type, AlignSize alignment, const char *name); LLVMValueRef llvm_emit_alloca(GenContext *c, LLVMTypeRef type, unsigned alignment, const char *name); -LLVMValueRef llvm_emit_alloca_aligned(GenContext *c, Type *type, const char *name); void llvm_emit_and_set_decl_alloca(GenContext *c, Decl *decl); INLINE void llvm_set_alignment(LLVMValueRef alloca, AlignSize alignment); INLINE AlignSize llvm_type_or_alloca_align(LLVMValueRef dest, Type *type); @@ -477,13 +479,15 @@ LLVMValueRef llvm_emit_const_vector(LLVMValueRef value, ArraySize len); LLVMValueRef llvm_emit_const_vector_pot(LLVMValueRef value, ArraySize len); LLVMValueRef llvm_emit_struct_gep_raw(GenContext *c, LLVMValueRef ptr, LLVMTypeRef struct_type, unsigned index, unsigned struct_alignment, AlignSize *alignment); -LLVMValueRef llvm_emit_array_gep_raw(GenContext *c, LLVMValueRef ptr, LLVMTypeRef array_type, unsigned index, AlignSize array_alignment, AlignSize *alignment); -LLVMValueRef llvm_emit_array_gep_raw_index(GenContext *c, LLVMValueRef ptr, LLVMTypeRef array_type, BEValue *index, AlignSize array_alignment, AlignSize *alignment); -LLVMValueRef llvm_emit_pointer_gep_raw(GenContext *c, LLVMTypeRef pointee_type, LLVMValueRef ptr, LLVMValueRef offset); +LLVMValueRef llvm_emit_array_gep_raw(GenContext *c, LLVMValueRef ptr, Type *element_type, unsigned index, AlignSize array_alignment, AlignSize *alignment); +BEValue llvm_emit_array_gep_index(GenContext *c, BEValue *parent, BEValue *index); +BEValue llvm_emit_array_gep(GenContext *c, BEValue *parent, ArrayIndex index); +LLVMValueRef llvm_emit_array_gep_raw_index(GenContext *c, LLVMValueRef ptr, Type *element_type, BEValue *index, AlignSize array_alignment, AlignSize *alignment); +LLVMValueRef llvm_emit_pointer_gep_raw(GenContext *c, LLVMValueRef ptr, LLVMValueRef offset, ByteSize element_size); LLVMValueRef llvm_emit_ptradd_raw(GenContext *c, LLVMValueRef ptr, LLVMValueRef offset, ByteSize mult); LLVMValueRef llvm_emit_ptradd_inbounds_raw(GenContext *c, LLVMValueRef ptr, LLVMValueRef offset, ByteSize mult); LLVMValueRef llvm_emit_const_ptradd_inbounds_raw(GenContext *c, LLVMValueRef ptr, ByteSize offset); -LLVMValueRef llvm_emit_pointer_inbounds_gep_raw(GenContext *c, LLVMTypeRef pointee_type, LLVMValueRef ptr, LLVMValueRef offset); +LLVMValueRef llvm_emit_pointer_inbounds_gep_raw(GenContext *c, LLVMValueRef ptr, LLVMValueRef offset, ByteSize size); LLVMTypeRef llvm_coerce_expand_hi_offset(GenContext *c, LLVMValueRef *addr, ABIArgInfo *info, AlignSize *align); void llvm_emit_ptr_from_array(GenContext *c, BEValue *value); void llvm_emit_struct_member_ref(GenContext *c, BEValue *struct_ref, BEValue *member_ref, unsigned member_id); diff --git a/src/compiler/llvm_codegen_internal_impl.h b/src/compiler/llvm_codegen_internal_impl.h index e89795c81..b68520596 100644 --- a/src/compiler/llvm_codegen_internal_impl.h +++ b/src/compiler/llvm_codegen_internal_impl.h @@ -48,7 +48,7 @@ INLINE bool type_is_intlike(Type *type) { type = type_flatten(type); if (type_is_integer_or_bool_kind(type)) return true; - if (type->type_kind != TYPE_VECTOR) return false; + if (!type_kind_is_real_vector(type->type_kind)) return false; type = type->array.base; return type_is_integer_or_bool_kind(type); } @@ -108,7 +108,7 @@ INLINE AlignSize llvm_type_or_alloca_align(LLVMValueRef dest, Type *type) { return LLVMGetAlignment(dest); } - return type_abi_alignment(type); + return type_alloca_alignment(type); } INLINE LLVMValueRef llvm_store_to_ptr(GenContext *c, LLVMValueRef destination, BEValue *value) diff --git a/src/compiler/llvm_codegen_stmt.c b/src/compiler/llvm_codegen_stmt.c index 18d104c5c..0e8b0341d 100644 --- a/src/compiler/llvm_codegen_stmt.c +++ b/src/compiler/llvm_codegen_stmt.c @@ -113,9 +113,8 @@ void llvm_emit_local_decl(GenContext *c, Decl *decl, BEValue *value) // Create a local alloca ASSERT(!decl->backend_ref); - Type *type_low = type_lowering(decl->type); if (decl->var.is_temp && !IS_OPTIONAL(decl) && !decl->var.is_addr && !decl->var.is_written && !type_is_user_defined( - type_low) && type_low->type_kind != TYPE_ARRAY) + var_type) && var_type->type_kind != TYPE_ARRAY) { ASSERT(decl->var.init_expr); llvm_emit_expr(c, value, decl->var.init_expr); @@ -133,7 +132,7 @@ void llvm_emit_local_decl(GenContext *c, Decl *decl, BEValue *value) scratch_buffer_clear(); scratch_buffer_append(decl->name ? decl->name : "anon"); scratch_buffer_append(".f"); - decl->var.optional_ref = llvm_emit_alloca_aligned(c, type_fault, scratch_buffer_to_string()); + decl->var.optional_ref = llvm_emit_alloca_b(c, type_fault, scratch_buffer_to_string()).value; // Only clear out the result if the assignment isn't an optional. } @@ -234,9 +233,9 @@ static inline void llvm_emit_return(GenContext *c, Ast *ast) if (ast->return_stmt.cleanup_fail) { llvm_value_rvalue(c, &be_value); - LLVMValueRef error_out = llvm_emit_alloca_aligned(c, type_fault, "reterr"); - llvm_store_to_ptr(c, error_out, &be_value); - PUSH_DEFER_ERROR(error_out); + BEValue error_out_ref = llvm_emit_alloca_b(c, type_fault, "reterr"); + llvm_store(c, &error_out_ref, &be_value); + PUSH_DEFER_ERROR(error_out_ref.value); llvm_emit_statement_chain(c, ast->return_stmt.cleanup_fail); POP_DEFER_ERROR(); } @@ -247,12 +246,12 @@ static inline void llvm_emit_return(GenContext *c, Ast *ast) PUSH_CATCH(); LLVMBasicBlockRef error_return_block = NULL; - LLVMValueRef error_out = NULL; + BEValue error_out_ref = { .value = NULL }; if (c->cur_func.prototype && c->cur_func.prototype->ret_rewrite != RET_NORMAL) { error_return_block = llvm_basic_block_new(c, "err_retblock"); - error_out = llvm_emit_alloca_aligned(c, type_fault, "reterr"); - c->catch = (OptionalCatch) { error_out, error_return_block }; + error_out_ref = llvm_emit_alloca_b(c, type_fault, "reterr"); + c->catch = (OptionalCatch) { error_out_ref.value, error_return_block }; } bool has_return_value = ast->return_stmt.expr != NULL; @@ -272,9 +271,9 @@ static inline void llvm_emit_return(GenContext *c, Ast *ast) { if (llvm_temp_as_address(return_value.type)) { - LLVMValueRef temp = llvm_emit_alloca_aligned(c, return_value.type, "ret$temp"); - llvm_store_to_ptr(c, temp, &return_value); - llvm_value_set_address_abi_aligned(c, &return_value, temp, return_value.type); + BEValue temp = llvm_emit_alloca_b(c, return_value.type, "ret$temp"); + llvm_store(c, &temp, &return_value); + return_value = temp; } else { @@ -299,12 +298,10 @@ static inline void llvm_emit_return(GenContext *c, Ast *ast) if (error_return_block && LLVMGetFirstUse(LLVMBasicBlockAsValue(error_return_block))) { llvm_emit_block(c, error_return_block); - PUSH_DEFER_ERROR(error_out); + PUSH_DEFER_ERROR(error_out_ref.value); llvm_emit_statement_chain(c, ast->return_stmt.cleanup_fail); POP_DEFER_ERROR(); - BEValue value; - llvm_value_set_address_abi_aligned(c, &value, error_out, type_fault); - llvm_emit_return_abi(c, NULL, &value); + llvm_emit_return_abi(c, NULL, &error_out_ref); } } @@ -769,8 +766,7 @@ static LLVMValueRef llvm_emit_switch_jump_stmt(GenContext *c, c->current_block = NULL; llvm_emit_block(c, switch_block); AlignSize align; - LLVMTypeRef type = LLVMArrayType(c->ptr_type, count); - LLVMValueRef index = llvm_emit_array_gep_raw_index(c, jump_table, type, switch_value, llvm_abi_alignment(c, type), &align); + LLVMValueRef index = llvm_emit_array_gep_raw_index(c, jump_table, type_voidptr, switch_value, type_abi_alignment(type_voidptr), &align); LLVMValueRef addr = llvm_load(c, c->ptr_type, index, align, "target"); LLVMValueRef instr = LLVMBuildIndirectBr(c->builder, addr, case_count); c->current_block = NULL; @@ -984,7 +980,7 @@ static void llvm_emit_switch_body(GenContext *c, BEValue *switch_value, Ast *swi } BEValue switch_var; - llvm_value_set_address_abi_aligned(c, &switch_var, llvm_emit_alloca_aligned(c, switch_type, "switch"), switch_type); + llvm_value_set_alloca(c, &switch_var, switch_type, type_alloca_alignment(switch_type), "switch"); switch_ast->switch_stmt.codegen.retry.var = &switch_var; llvm_store(c, &switch_var, switch_value); @@ -1500,7 +1496,7 @@ LLVMValueRef llvm_emit_zstring_named(GenContext *c, const char *str, const char LLVMSetGlobalConstant(global_string, 1); LLVMSetInitializer(global_string, llvm_get_zstring(c, str, len)); AlignSize alignment; - LLVMValueRef string = llvm_emit_array_gep_raw(c, global_string, char_array_type, 0, 1, &alignment); + LLVMValueRef string = llvm_emit_array_gep_raw(c, global_string, type_char, 0, 1, &alignment); ReusableConstant reuse = { .string = str_copy(str, len), .name = str_copy(extname, strlen(extname)), .value = string }; vec_add(c->reusable_constants, reuse); return string; @@ -1555,23 +1551,15 @@ void llvm_emit_panic(GenContext *c, const char *message, SourceSpan loc, const c unsigned elements = vec_size(varargs); Type *any_slice = type_get_slice(type_any); Type *any_array = type_get_array(type_any, elements); - LLVMTypeRef llvm_array_type = llvm_get_type(c, any_array); - AlignSize alignment = type_alloca_alignment(any_array); - LLVMValueRef array_ref = llvm_emit_alloca(c, llvm_array_type, alignment, varargslots_name); + BEValue array_ref = llvm_emit_alloca_b(c, any_array, varargslots_name); unsigned vacount = vec_size(varargs); for (unsigned i = 0; i < vacount; i++) { - AlignSize store_alignment; - LLVMValueRef slot = llvm_emit_array_gep_raw(c, - array_ref, - llvm_array_type, - i, - alignment, - &store_alignment); - llvm_store_to_ptr_aligned(c, slot, &varargs[i], store_alignment); + BEValue slot = llvm_emit_array_gep(c, &array_ref, i); + llvm_store(c, &slot, &varargs[i]); } BEValue value; - llvm_value_aggregate_two(c, &value, any_slice, array_ref, llvm_const_int(c, type_usz, elements)); + llvm_value_aggregate_two(c, &value, any_slice, array_ref.value, llvm_const_int(c, type_usz, elements)); LLVMSetValueName2(value.value, temp_name, 6); llvm_emit_parameter(c, actual_args, &count, abi_args[4], &value); diff --git a/src/compiler/llvm_codegen_storeload.c b/src/compiler/llvm_codegen_storeload.c index 6d45b8bfe..1eb710992 100644 --- a/src/compiler/llvm_codegen_storeload.c +++ b/src/compiler/llvm_codegen_storeload.c @@ -9,6 +9,11 @@ LLVMValueRef llvm_store_to_ptr_raw_aligned(GenContext *c, LLVMValueRef pointer, ASSERT(alignment > 0); LLVMTypeRef type = LLVMTypeOf(value); ASSERT(type != c->bool_type); + if (LLVMIsAAllocaInst(pointer) || LLVMIsAGlobalVariable(pointer)) + { + ASSERT(alignment <= LLVMGetAlignment(pointer)); + alignment = LLVMGetAlignment(pointer); + } if (LLVMGetTypeKind(type) == LLVMVectorTypeKind) { unsigned len = LLVMGetVectorSize(LLVMTypeOf(value)); @@ -153,7 +158,7 @@ LLVMValueRef llvm_load_value_store(GenContext *c, BEValue *value) LLVMValueRef val = llvm_load_value(c, value); if (value->kind == BE_BOOLVECTOR) { - return LLVMBuildSExt(c->builder, val, llvm_get_type(c, type_get_vector_bool(value->type)), ""); + return LLVMBuildSExt(c->builder, val, llvm_get_type(c, type_get_vector_bool(value->type, TYPE_SIMD_VECTOR)), ""); } if (value->kind != BE_BOOLEAN) return val; return LLVMBuildZExt(c->builder, val, c->byte_type, ""); @@ -166,7 +171,7 @@ LLVMValueRef llvm_store_zero(GenContext *c, BEValue *ref) Type *type = ref->type; if (!type_is_aggregate(type) || type_is_builtin(type->type_kind)) { - if (type->type_kind == TYPE_VECTOR) + if (type_kind_is_real_vector(type->type_kind)) { unsigned len = type->array.len; if (!is_power_of_two(len)) @@ -201,13 +206,13 @@ LLVMValueRef llvm_store_zero(GenContext *c, BEValue *ref) } if (type->type_kind == TYPE_ARRAY) { - LLVMTypeRef array_type = llvm_get_type(c, type); + Type *base = type->array.base; for (unsigned i = 0; i < type->array.len; i++) { AlignSize align; - LLVMValueRef element_ptr = llvm_emit_array_gep_raw(c, ref->value, array_type, i, ref->alignment, &align); + LLVMValueRef element_ptr = llvm_emit_array_gep_raw(c, ref->value, base, i, ref->alignment, &align); BEValue be_value; - llvm_value_set_address(c, &be_value, element_ptr, type->array.base, align); + llvm_value_set_address(c, &be_value, element_ptr, base, align); llvm_store_zero(c, &be_value); } return NULL; diff --git a/src/compiler/llvm_codegen_type.c b/src/compiler/llvm_codegen_type.c index d9b8e0b44..2e45550ad 100644 --- a/src/compiler/llvm_codegen_type.c +++ b/src/compiler/llvm_codegen_type.c @@ -103,7 +103,7 @@ static void param_expand(GenContext *context, LLVMTypeRef** params_ref, Type *ty case TYPE_ALIAS: UNREACHABLE_VOID case TYPE_ARRAY: - case TYPE_VECTOR: + case VECTORS: for (ArraySize i = type->array.len; i > 0; i--) { param_expand(context, params_ref, type->array.base); @@ -274,7 +274,9 @@ LLVMTypeRef llvm_get_pointee_type(GenContext *c, Type *any_type) any_type = type_lowering(any_type); ASSERT(any_type->type_kind == TYPE_POINTER); if (any_type == type_voidptr) return llvm_get_type(c, type_char); - return llvm_get_type(c, any_type->pointer); + Type *pointee = any_type->pointer; +// ASSERT(type_flatten(pointee)->type_kind != TYPE_VECTOR); + return llvm_get_type(c, pointee); } bool llvm_types_are_similar(LLVMTypeRef original, LLVMTypeRef coerce) @@ -351,7 +353,7 @@ LLVMTypeRef llvm_get_type(GenContext *c, Type *any_type) LLVMStructSetBody(virtual_type, types, 2, false); return any_type->backend_type = virtual_type; } - case TYPE_VECTOR: + case VECTORS: return any_type->backend_type = LLVMVectorType(llvm_get_type(c, any_type->array.base), any_type->array.len); } UNREACHABLE; @@ -624,7 +626,7 @@ LLVMValueRef llvm_get_typeid(GenContext *c, Type *type) return llvm_generate_introspection_global(c, NULL, type, INTROSPECT_TYPE_OPTIONAL, type->optional, 0, NULL, false); case TYPE_FLEXIBLE_ARRAY: return llvm_generate_introspection_global(c, NULL, type, INTROSPECT_TYPE_ARRAY, type->array.base, 0, NULL, false); - case TYPE_VECTOR: + case VECTORS: return llvm_generate_introspection_global(c, NULL, type, INTROSPECT_TYPE_VECTOR, type->array.base, type->array.len, NULL, false); case TYPE_ARRAY: return llvm_generate_introspection_global(c, NULL, type, INTROSPECT_TYPE_ARRAY, type->array.base, type->array.len, NULL, false); diff --git a/src/compiler/llvm_codegen_value.c b/src/compiler/llvm_codegen_value.c index 8a17f9dbe..7035b11d9 100644 --- a/src/compiler/llvm_codegen_value.c +++ b/src/compiler/llvm_codegen_value.c @@ -42,6 +42,16 @@ void llvm_value_set(BEValue *value, LLVMValueRef llvm_value, Type *type) } } +void llvm_value_set_alloca(GenContext *c, BEValue *value, Type *type, AlignSize align, const char *name) +{ + type = type_lowering(type); + *value = (BEValue) { + .value = llvm_emit_alloca(c, llvm_get_type(c, type), align, name), + .kind = BE_ADDRESS, + .type = type, + .alignment = align, + }; +} void llvm_value_set_address(GenContext *c, BEValue *value, LLVMValueRef llvm_value, Type *type, AlignSize alignment) { ASSERT(alignment > 0); @@ -74,9 +84,9 @@ void llvm_value_addr(GenContext *c, BEValue *value) } else { - LLVMValueRef temp = llvm_emit_alloca_aligned(c, value->type, "taddr"); - llvm_store_to_ptr(c, temp, value); - llvm_value_set_address_abi_aligned(c, value, temp, value->type); + BEValue temp = llvm_emit_alloca_b(c, value->type, "taddr"); + llvm_store(c, &temp, value); + *value = temp; } } diff --git a/src/compiler/number.c b/src/compiler/number.c index 33d534a04..a32f9a0b8 100644 --- a/src/compiler/number.c +++ b/src/compiler/number.c @@ -283,7 +283,7 @@ bool expr_const_compare(const ExprConst *left, const ExprConst *right, BinaryOp is_eq = !memcmp(left->bytes.ptr, right->bytes.ptr, left->bytes.len); goto RETURN; case CONST_INITIALIZER: - if (left->initializer->type->type_kind == TYPE_VECTOR) + if (type_kind_is_real_vector(left->initializer->type->type_kind)) { ConstInitializer *lhs = left->initializer; ConstInitializer *rhs = right->initializer; diff --git a/src/compiler/parse_global.c b/src/compiler/parse_global.c index a5addec94..a498c4580 100644 --- a/src/compiler/parse_global.c +++ b/src/compiler/parse_global.c @@ -635,6 +635,16 @@ static inline TypeInfo *parse_vector_type_index(ParseContext *c, TypeInfo *type) ASSIGN_EXPR_OR_RET(vector->array.len, parse_expr(c), poisoned_type_info); CONSUME_OR_RET(TOKEN_RVEC, poisoned_type_info); } + if (tok_is(c, TOKEN_AT_IDENT)) + { + if (symstr(c) != kw_at_simd) + { + PRINT_ERROR_HERE("Only '@simd' is a valid attribute, found '%s'.", symstr(c)); + return poisoned_type_info; + } + advance(c); + vector->is_simd = true; + } RANGE_EXTEND_PREV(vector); return vector; } @@ -1952,14 +1962,9 @@ static inline Decl *parse_typedef_declaration(ParseContext *c) ASSIGN_EXPR_OR_RET(decl->distinct_align, parse_expr(c), poisoned_decl); CONSUME_OR_RET(TOKEN_RPAREN, poisoned_decl); } - else if (name == kw_at_simd) - { - advance_and_verify(c, TOKEN_AT_IDENT); - decl->attr_simd = true; - } else { - RETURN_PRINT_ERROR_HERE("Expected only attributes '@align' and '@simd'."); + RETURN_PRINT_ERROR_HERE("Expected only attribute '@align'."); } } RANGE_EXTEND_PREV(decl); diff --git a/src/compiler/sema_builtins.c b/src/compiler/sema_builtins.c index fd16c1ea3..7d88e1d34 100644 --- a/src/compiler/sema_builtins.c +++ b/src/compiler/sema_builtins.c @@ -124,16 +124,16 @@ static bool sema_check_builtin_args(SemaContext *context, Expr **args, BuiltinAr RETURN_SEMA_ERROR(arg, "Expected a floating point or floating point vector, but was %s.", type_quoted_error_string(type)); case BA_VEC: - if (type->type_kind == TYPE_VECTOR) continue; + if (type_kind_is_real_vector(type->type_kind)) continue; RETURN_SEMA_ERROR(arg, "Expected a vector."); case BA_PTRVEC: if (type_is_pointer_vector(type)) continue; RETURN_SEMA_ERROR(arg, "Expected a pointer vector."); case BA_NUMVEC: - if (type->type_kind == TYPE_VECTOR && type_is_number_or_bool(type->array.base)) continue; + if (type_kind_is_real_vector(type->type_kind) && type_is_number_or_bool(type->array.base)) continue; RETURN_SEMA_ERROR(arg, "Expected a numeric vector."); case BA_INTVEC: - if (type->type_kind == TYPE_VECTOR && type_flat_is_intlike(type->array.base)) continue; + if (type_kind_is_real_vector(type->type_kind) && type_flat_is_intlike(type->array.base)) continue; RETURN_SEMA_ERROR(arg, "Expected an integer vector."); case BA_BOOLINT: if (type_is_integer_or_bool_kind(type)) continue; @@ -142,10 +142,10 @@ static bool sema_check_builtin_args(SemaContext *context, Expr **args, BuiltinAr if (type_flat_is_bool_vector(type)) continue; RETURN_SEMA_ERROR(arg, "Expected a boolean vector."); case BA_BOOLINTVEC: - if (type->type_kind == TYPE_VECTOR && type_flat_is_boolintlike(type->array.base)) continue; + if (type_kind_is_real_vector(type->type_kind) && type_flat_is_boolintlike(type->array.base)) continue; RETURN_SEMA_ERROR(arg, "Expected a boolean or integer vector."); case BA_FLOATVEC: - if (type->type_kind == TYPE_VECTOR && type_flat_is_floatlike(type->array.base)) continue; + if (type_kind_is_real_vector(type->type_kind) && type_flat_is_floatlike(type->array.base)) continue; RETURN_SEMA_ERROR(arg, "Expected an float vector."); case BA_INTLIKE: if (type_flat_is_intlike(type)) continue; @@ -184,7 +184,8 @@ static inline bool sema_expr_analyse_swizzle(SemaContext *context, Expr *expr, b // Ensure matching types if (swizzle_two && !sema_check_builtin_args_match(context, args, 2)) return false; - unsigned components = type_flatten(args[0]->type)->array.len; + Type *flat = type_flatten(args[0]->type); + unsigned components = flat->array.len; if (swizzle_two) components *= 2; for (unsigned i = first_mask_value; i < arg_count; i++) { @@ -200,7 +201,7 @@ static inline bool sema_expr_analyse_swizzle(SemaContext *context, Expr *expr, b RETURN_SEMA_ERROR(mask_val, "The swizzle position must be in the range 0-%d.", components - 1); } } - expr->type = type_add_optional(type_get_vector(type_get_indexed_type(args[0]->type), arg_count - first_mask_value), optional); + expr->type = type_add_optional(type_get_vector(type_get_indexed_type(args[0]->type), flat->type_kind, arg_count - first_mask_value), optional); return true; } @@ -723,8 +724,7 @@ bool sema_expr_analyse_builtin_call(SemaContext *context, Expr *expr) ASSERT(arg_count == 2); if (!sema_check_builtin_args(context, args, (BuiltinArg[]) {BA_NUMVEC, BA_NUMVEC}, 2)) return false; if (!sema_check_builtin_args_match(context, args, 2)) return false; - Type *vec_type = type_flatten(args[0]->type); - rtype = type_get_vector(type_bool, vec_type->array.len); + rtype = type_get_vector_from_vector(type_bool, type_flatten(args[0]->type)); expr->expr_kind = EXPR_BINARY; expr->binary_expr = (ExprBinary) { .left = exprid(args[0]), @@ -754,6 +754,12 @@ bool sema_expr_analyse_builtin_call(SemaContext *context, Expr *expr) type_to_error_string(type_get_ptr(args[0]->type)), type_to_error_string(args[2]->type)); } + Type *flat_0 = type_flatten(args[0]->type); + if (type_kind_is_real_vector(flat_0->type_kind)) + { + rtype = type_get_vector_from_vector(type_bool, flat_0); + break; + } rtype = type_bool; break; case BUILTIN_EXACT_ADD: @@ -861,7 +867,7 @@ bool sema_expr_analyse_builtin_call(SemaContext *context, Expr *expr) { RETURN_SEMA_ERROR(args[4], "Expected inner * outer col to equal %d.", vec_len2); } - rtype = type_get_vector(flat1->array.base, i128_mult(args[2]->const_expr.ixx.i, args[4]->const_expr.ixx.i).low); + rtype = type_get_vector(flat1->array.base, flat1->type_kind, i128_mult(args[2]->const_expr.ixx.i, args[4]->const_expr.ixx.i).low); break; case BUILTIN_SAT_SHL: case BUILTIN_SAT_SUB: @@ -1031,12 +1037,12 @@ bool sema_expr_analyse_builtin_call(SemaContext *context, Expr *expr) { RETURN_SEMA_ERROR(args[2], "Expected the vector to be %s, not %s.", type_quoted_error_string( - type_get_vector(pointer_type->pointer, len)), + type_get_vector(pointer_type->pointer, flat_pointer_vec->type_kind, len)), type_quoted_error_string(args[2]->type)); } if (!sema_check_alignment_expression(context, args[3])) return false; if (!sema_expr_is_valid_mask_for_value(context, args[1], args[2])) return false; - rtype = type_get_vector(pointer_type->pointer, len); + rtype = type_get_vector(pointer_type->pointer, flat_pointer_vec->type_kind, len); break; } case BUILTIN_SCATTER: @@ -1054,7 +1060,7 @@ bool sema_expr_analyse_builtin_call(SemaContext *context, Expr *expr) { RETURN_SEMA_ERROR(args[1], "Expected the vector to be %s, not %s.", type_quoted_error_string( - type_get_vector(pointer_type->pointer, flat_pointer_vec->array.len)), + type_get_vector_from_vector(pointer_type->pointer, flat_pointer_vec)), type_quoted_error_string(args[2]->type)); } if (!sema_check_alignment_expression(context, args[3])) return false; diff --git a/src/compiler/sema_casts.c b/src/compiler/sema_casts.c index d17b50eda..ac4e573c6 100644 --- a/src/compiler/sema_casts.c +++ b/src/compiler/sema_casts.c @@ -321,12 +321,12 @@ Type *type_infer_len_from_actual_type(Type *to_infer, Type *actual_type) return type_add_optional(type_get_array(indexed, type_flatten(actual_type)->array.len), is_optional); case TYPE_INFERRED_VECTOR: if (!type_is_arraylike(type_flatten(actual_type))) return to_infer; - return type_add_optional(type_get_vector(indexed, type_flatten(actual_type)->array.len), is_optional); + return type_add_optional(type_get_vector(indexed, TYPE_VECTOR, type_flatten(actual_type)->array.len), is_optional); case TYPE_SLICE: return type_add_optional(type_get_slice(indexed), is_optional); - case TYPE_VECTOR: + case VECTORS: // The case of int[*]*[<2>] x = ... - return type_add_optional(type_get_vector(indexed, to_infer->array.len), is_optional); + return type_add_optional(type_get_vector(indexed, to_infer->type_kind, to_infer->array.len), is_optional); default: UNREACHABLE } @@ -606,7 +606,7 @@ static void expr_recursively_rewrite_untyped_list(Expr *expr, Type *to_type) } switch (flat->type_kind) { - case TYPE_VECTOR: + case VECTORS: { Type *indexed = type_get_indexed_type(to_type); FOREACH(Expr *, e, values) @@ -1105,7 +1105,7 @@ static bool rule_arr_to_vec(CastContext *cc, bool is_explicit, bool is_silent) default: return sema_cast_error(cc, false, is_silent); } - cast_context_set_from(cc, type_get_vector(base, len)); + cast_context_set_from(cc, type_get_vector(base, cc->to->type_kind, len)); return cast_is_allowed(cc, is_explicit, is_silent); } @@ -1159,7 +1159,7 @@ static bool rule_slice_to_vecarr(CastContext *cc, bool is_explicit, bool is_sile { return report_cast_error(cc, false); } - cast_context_set_from(cc, type_get_vector(cc->from->array.base, size)); + cast_context_set_from(cc, type_get_vector(cc->from->array.base, cc->to->type_kind, size)); } return cast_is_allowed(cc, is_explicit, is_silent); } @@ -2409,14 +2409,13 @@ static void cast_arr_to_vec(Expr *expr, Type *to_type) { Type *index_vec = type_flatten(type_get_indexed_type(to_type)); Type *index_arr = type_flatten(type_get_indexed_type(expr->type)); - Type *to_temp = index_vec == index_arr ? to_type : type_get_vector(index_arr, type_flatten(expr->type)->array.len); + Type *to_temp = index_vec == index_arr ? to_type : type_get_vector(index_arr, to_type->canonical->type_kind, type_flatten(expr->type)->array.len); if (sema_cast_const(expr)) { // For the array -> vector this is always a simple rewrite of type. ASSERT(expr->const_expr.const_kind == CONST_INITIALIZER); ConstInitializer *list = expr->const_expr.initializer; list->type = type_flatten(to_temp); - list->is_simd = type_is_simd(to_temp); expr->type = to_temp; } else @@ -2630,13 +2629,14 @@ static ConvGroup group_from_type[TYPE_LAST + 1] = { [TYPE_WILDCARD] = CONV_WILDCARD, [TYPE_TYPEINFO] = CONV_NO, [TYPE_MEMBER] = CONV_NO, + [TYPE_SIMD_VECTOR] = CONV_VECTOR, }; INLINE ConvGroup type_to_group(Type *type) { type = type->canonical; if (type == type_voidptr) return CONV_VOIDPTR; - if (type->type_kind == TYPE_POINTER && (type->pointer->type_kind == TYPE_ARRAY || type->pointer->type_kind == TYPE_VECTOR)) return CONV_VAPTR; + if (type->type_kind == TYPE_POINTER && (type->pointer->type_kind == TYPE_ARRAY || type_kind_is_real_vector(type->pointer->canonical->type_kind))) return CONV_VAPTR; if (type_len_is_inferred(type)) return CONV_INFERRED; return group_from_type[type->type_kind]; } diff --git a/src/compiler/sema_const.c b/src/compiler/sema_const.c index 340123906..89b58c8bd 100644 --- a/src/compiler/sema_const.c +++ b/src/compiler/sema_const.c @@ -231,17 +231,17 @@ static bool sema_append_const_array_one(SemaContext *context, Expr *expr, Expr * } bool is_slice = list->const_expr.const_kind == CONST_SLICE; ASSERT(!type_is_inferred(array_type)); - bool is_vector = array_type->type_kind == TYPE_VECTOR; + bool is_vector = type_kind_is_real_vector(array_type->type_kind); ConstInitializer *init = is_slice ? list->const_expr.slice_init : list->const_expr.initializer; unsigned len = sema_len_from_const(list) + 1; Type *indexed = type_get_indexed_type(init->type); if (!cast_implicit(context, element, indexed, false)) return false; - Type *new_inner_type = is_vector ? type_get_vector(indexed, len) : type_get_array(indexed, len); + Type *new_inner_type = is_vector ? type_get_vector(indexed, array_type->type_kind, len) : type_get_array(indexed, len); Type *new_outer_type = list->type; if (!is_slice) { Type *outer_indexed = type_get_indexed_type(init->type); - new_outer_type = is_vector ? type_get_vector(outer_indexed, len) : type_get_array(outer_indexed, len); + new_outer_type = is_vector ? type_get_vector(outer_indexed, array_type->type_kind, len) : type_get_array(outer_indexed, len); } switch (init->kind) { @@ -307,7 +307,7 @@ bool sema_expr_analyse_ct_concat(SemaContext *context, Expr *concat_expr, Expr * ASSERT_SPAN(concat_expr, concat_expr->resolve_status == RESOLVE_RUNNING); if (!sema_check_left_right_const(context, left, right)) return false; ArraySize len = 0; - bool use_array = true; + TypeKind vec_type = TYPE_POISONED; Type *indexed_type = NULL; Type *element_type = left->type->canonical; Type *right_type = right->type->canonical; @@ -331,8 +331,11 @@ bool sema_expr_analyse_ct_concat(SemaContext *context, Expr *concat_expr, Expr * case CONST_INITIALIZER: switch (type_flatten(element_type)->type_kind) { + case TYPE_SIMD_VECTOR: + vec_type = TYPE_SIMD_VECTOR; + break; case TYPE_VECTOR: - use_array = false; + vec_type = TYPE_VECTOR; break; case TYPE_INFERRED_VECTOR: case TYPE_INFERRED_ARRAY: @@ -492,7 +495,7 @@ bool sema_expr_analyse_ct_concat(SemaContext *context, Expr *concat_expr, Expr * { expr_rewrite_to_const_zero(concat_expr, type_get_slice(indexed_type)); } - Type *type = use_array ? type_get_array(indexed_type, len) : type_get_vector(indexed_type, len); + Type *type = vec_type == TYPE_POISONED ? type_get_array(indexed_type, len) : type_get_vector(indexed_type, vec_type, len); ConstInitializer *lhs_init = expr_const_initializer_from_expr(left); ConstInitializer *rhs_init = expr_const_initializer_from_expr(right); if (!rhs_init) diff --git a/src/compiler/sema_decls.c b/src/compiler/sema_decls.c index 43216a99f..be1401a18 100755 --- a/src/compiler/sema_decls.c +++ b/src/compiler/sema_decls.c @@ -360,7 +360,7 @@ static bool sema_analyse_union_members(SemaContext *context, Decl *decl) RETURN_SEMA_ERROR(member, "Flexible array members not allowed in unions."); } AlignSize member_alignment; - if (!sema_set_abi_alignment(context, member->type, &member_alignment, true)) return false; + if (!sema_set_alignment(context, member->type, &member_alignment, false)) return false; if (!sema_check_struct_holes(context, decl, member)) return false; ByteSize member_size = type_size(member->type); @@ -427,7 +427,7 @@ static bool sema_analyse_union_members(SemaContext *context, Decl *decl) return true; } -AlignSize sema_get_max_natural_alignment_as_member(Type *type) +AlignSize sema_get_max_natural_alignment(Type *type) { RETRY:; switch (type->type_kind) @@ -436,7 +436,6 @@ RETRY:; type = type->optional; goto RETRY; case TYPE_TYPEDEF: - if (type->decl->attr_simd) return type_abi_alignment(type); type = type->decl->distinct->type; goto RETRY; case TYPE_ALIAS: @@ -474,7 +473,7 @@ RETRY:; AlignSize max = 0; FOREACH(Decl *, member, type->decl->strukt.members) { - AlignSize member_max = sema_get_max_natural_alignment_as_member(member->type); + AlignSize member_max = sema_get_max_natural_alignment(member->type); if (member_max > max) max = member_max; } return max; @@ -482,6 +481,8 @@ RETRY:; case TYPE_BITSTRUCT: type = type->decl->strukt.container_type->type; goto RETRY; + case TYPE_SIMD_VECTOR: + return type_abi_alignment(type); case TYPE_ARRAY: case TYPE_FLEXIBLE_ARRAY: case TYPE_INFERRED_ARRAY: @@ -574,9 +575,9 @@ static bool sema_analyse_struct_members(SemaContext *context, Decl *decl) SEMA_ERROR(member, "Recursive definition of %s.", type_quoted_error_string(member_type)); return decl_poison(decl); } - if (!sema_set_abi_alignment(context, member->type, &member_type_alignment, true)) return decl_poison(decl); + if (!sema_set_alignment(context, member->type, &member_type_alignment, false)) return decl_poison(decl); // And get the natural alignment - AlignSize member_natural_alignment = sema_get_max_natural_alignment_as_member(member->type); + AlignSize member_natural_alignment = sema_get_max_natural_alignment(member->type); // If packed, then the alignment is 1 AlignSize member_alignment = is_packed ? 1 : member_type_alignment; @@ -992,7 +993,7 @@ static bool sema_analyse_interface(SemaContext *context, Decl *decl, bool *erase first->var.kind = VARDECL_PARAM; first->unit = context->unit; first->resolve_status = RESOLVE_DONE; - first->alignment = type_abi_alignment(type_voidptr); + first->alignment = type_alloca_alignment(type_voidptr); vec_insert_first(method->func_decl.signature.params, first); method->unit = context->unit; method->func_decl.signature.vararg_index += 1; @@ -1043,6 +1044,7 @@ RETRY: case TYPE_BOOL: case ALL_INTS: case ALL_FLOATS: + case ALL_VECTORS: case TYPE_ANY: case TYPE_INTERFACE: case TYPE_ANYFAULT: @@ -1053,8 +1055,6 @@ RETRY: case TYPE_UNION: case TYPE_BITSTRUCT: case TYPE_TYPEDEF: - case TYPE_VECTOR: - case TYPE_INFERRED_VECTOR: case TYPE_UNTYPED_LIST: case TYPE_WILDCARD: case TYPE_TYPEINFO: @@ -1437,7 +1437,7 @@ static inline bool sema_analyse_signature(SemaContext *context, Signature *sig, { if (!sema_deep_resolve_function_ptr(context, type_info)) return false; param->type = type_info->type; - if (!sema_set_abi_alignment(context, param->type, ¶m->alignment, false)) return false; + if (!sema_set_alignment(context, param->type, ¶m->alignment, true)) return false; } if (param->var.init_expr) @@ -1585,15 +1585,6 @@ static inline bool sema_analyse_typedef(SemaContext *context, Decl *decl, bool * // Remove "alignment" if (default_size == decl->alignment) decl->distinct_align = NULL; } - if (decl->attr_simd) - { - if (decl->distinct_align) RETURN_SEMA_ERROR(decl, "You cannot set both @simd and @align on a distinct type."); - inner_type = inner_type->canonical; - if (inner_type->type_kind != TYPE_VECTOR) RETURN_SEMA_ERROR(decl, "You cannot set @simd on a non-vector type."); - ArraySize len = inner_type->array.len; - if (!is_power_of_two(len)) RETURN_SEMA_ERROR(decl, "The length of a @simd vector must be a power of two."); - decl->alignment = type_simd_alignment(inner_type); - } if (!decl->alignment) { decl->alignment = type_abi_alignment(inner_type); @@ -1742,7 +1733,7 @@ static inline bool sema_analyse_enum(SemaContext *context, Decl *decl, bool *era for (unsigned i = 0; i < associated_value_count; i++) { Decl *param = associated_values[i]; - if (!sema_set_abi_alignment(context, param->type, ¶m->alignment, false)) return false; + if (!sema_set_alignment(context, param->type, ¶m->alignment, true)) return false; param->resolve_status = RESOLVE_DONE; } for (unsigned i = 0; i < enums; i++) @@ -2891,8 +2882,7 @@ static inline bool sema_analyse_method(SemaContext *context, Decl *decl) goto NOT_VALID_NAME; } break; - case TYPE_VECTOR: - case TYPE_INFERRED_VECTOR: + case ALL_VECTORS: { unsigned len = strlen(decl->name); if (len <= 4) @@ -5465,8 +5455,7 @@ RETRY: case TYPE_SLICE: case TYPE_FLEXIBLE_ARRAY: case TYPE_INFERRED_ARRAY: - case TYPE_VECTOR: - case TYPE_INFERRED_VECTOR: + case ALL_VECTORS: type = type->array.base; goto RETRY; case TYPE_OPTIONAL: diff --git a/src/compiler/sema_expr.c b/src/compiler/sema_expr.c index 6ad22ecc0..e8849e34f 100644 --- a/src/compiler/sema_expr.c +++ b/src/compiler/sema_expr.c @@ -501,7 +501,7 @@ static inline bool expr_both_const_foldable(Expr *left, Expr *right, BinaryOp op case CONST_INITIALIZER: switch (type_flatten(left->type)->type_kind) { - case TYPE_VECTOR: + case VECTORS: if (a != b) return false; return op == BINARYOP_EQ || op == BINARYOP_NE; case TYPE_BITSTRUCT: @@ -534,7 +534,7 @@ static inline bool expr_both_any_integer_or_integer_bool_vector(Expr *left, Expr Type *flatten_right = type_flatten(right->type); if (type_is_integer(flatten_left) && type_is_integer(flatten_right)) return true; - if (flatten_left->type_kind != TYPE_VECTOR || flatten_right->type_kind != TYPE_VECTOR) return false; + if (!type_kind_is_real_vector(flatten_left->type_kind) || !type_kind_is_real_vector(flatten_right->type_kind)) return false; return type_is_integer_or_bool_kind(flatten_left->array.base) && type_is_integer_or_bool_kind(flatten_right->array.base); } @@ -1769,7 +1769,7 @@ static inline ArrayIndex sema_len_from_expr(Expr *expr) Type *type = type_flatten(expr->type); switch (type->type_kind) { - case TYPE_VECTOR: + case VECTORS: case TYPE_ARRAY: return (ArrayIndex)type->array.len; case TYPE_UNTYPED_LIST: @@ -1968,7 +1968,7 @@ SPLAT_NORMAL:; Type *flat = type_flatten(inner->type); switch (flat->type_kind) { - case TYPE_VECTOR: + case VECTORS: case TYPE_ARRAY: case TYPE_SLICE: case TYPE_UNTYPED_LIST: @@ -3035,7 +3035,7 @@ bool sema_expr_analyse_macro_call(SemaContext *context, Expr *call_expr, Expr *s } else if (flattened->type_kind == TYPE_VECTOR && rtype->type_kind == TYPE_INFERRED_VECTOR) { - rtype = type_get_vector(rtype->array.base, flattened->array.len); + rtype = type_get_vector(rtype->array.base, TYPE_VECTOR, flattened->array.len); inferred_len = false; } } @@ -3722,7 +3722,7 @@ RETRY:; break; case TYPE_UNTYPED_LIST: case TYPE_ARRAY: - case TYPE_VECTOR: + case VECTORS: { ArrayIndex len = (ArrayIndex)type->array.len; if (from_end) @@ -4265,9 +4265,9 @@ static inline bool sema_expr_analyse_pointer_offset(SemaContext *context, Expr * Expr *offset = exprptr(expr->pointer_offset_expr.offset); if (!sema_analyse_expr_rvalue(context, offset)) return false; Type *flat = type_flatten(pointer->type); - unsigned vec_len = flat->type_kind == TYPE_VECTOR ? flat->array.len : 0; + unsigned vec_len = type_kind_is_real_vector(flat->type_kind) ? flat->array.len : 0; - if (!cast_implicit_binary(context, offset, vec_len ? type_get_vector(type_isz, vec_len) : type_isz, NULL)) return false; + if (!cast_implicit_binary(context, offset, vec_len ? type_get_vector(type_isz, flat->type_kind, vec_len) : type_isz, NULL)) return false; // 3. Store optionality bool is_optional = IS_OPTIONAL(pointer) || IS_OPTIONAL(offset); @@ -4496,10 +4496,19 @@ static inline bool sema_slice_initializer(SemaContext *context, Expr *expr, Expr expr_rewrite_const_empty_slice(expr, new_type); return true; } - bool is_vec = initializer->type->type_kind == TYPE_VECTOR; - Type *inner_type = is_vec - ? type_get_vector(new_type->array.base, range->len_index) - : type_get_array(new_type->array.base, range->len_index); + Type *inner_type; + TypeKind kind = initializer->type->type_kind; + switch (kind) + { + case VECTORS: + inner_type = type_get_vector(new_type->array.base, kind, range->len_index); + break; + case TYPE_ARRAY: + inner_type = type_get_array(new_type->array.base, range->len_index); + break; + default: + UNREACHABLE + } const_init_set_type(initializer, inner_type); switch (initializer->kind) { @@ -4903,7 +4912,7 @@ static inline bool sema_expr_analyse_type_access(SemaContext *context, Expr *exp expr->expr_kind = EXPR_CONST; expr->resolve_status = RESOLVE_DONE; AlignSize align; - if (!sema_set_abi_alignment(context, decl->type, &align, true)) return false; + if (!sema_set_alignment(context, decl->type, &align, false)) return false; expr->const_expr = (ExprConst) { .member.decl = member, .member.align = align, @@ -5096,7 +5105,7 @@ static inline bool sema_create_const_len(Expr *expr, Type *type, Type *flat) switch (flat->type_kind) { case TYPE_ARRAY: - case TYPE_VECTOR: + case VECTORS: len = flat->array.len; break; case TYPE_ENUM: @@ -5138,8 +5147,7 @@ static inline bool sema_create_const_inner(SemaContext *context, Expr *expr, Typ case TYPE_FLEXIBLE_ARRAY: case TYPE_SLICE: case TYPE_INFERRED_ARRAY: - case TYPE_INFERRED_VECTOR: - case TYPE_VECTOR: + case ALL_VECTORS: inner = type->array.base; break; default: @@ -5701,8 +5709,7 @@ static bool sema_type_property_is_valid_for_type(CanonicalType *original_type, T case TYPE_FLEXIBLE_ARRAY: case TYPE_SLICE: case TYPE_INFERRED_ARRAY: - case TYPE_INFERRED_VECTOR: - case TYPE_VECTOR: + case ALL_VECTORS: return true; default: return false; @@ -5723,7 +5730,7 @@ static bool sema_type_property_is_valid_for_type(CanonicalType *original_type, T switch (type->type_kind) { case TYPE_ARRAY: - case TYPE_VECTOR: + case VECTORS: case TYPE_ENUM: return true; default: @@ -5866,7 +5873,7 @@ static bool sema_expr_rewrite_to_type_property(SemaContext *context, Expr *expr, case TYPE_PROPERTY_MEMBERSOF: { AlignSize align; - if (!sema_set_abi_alignment(context, parent_type, &align, true)) return false; + if (!sema_set_alignment(context, parent_type, &align, false)) return false; sema_create_const_membersof(expr, flat, align, 0); return true; } @@ -5896,7 +5903,7 @@ static bool sema_expr_rewrite_to_type_property(SemaContext *context, Expr *expr, case TYPE_PROPERTY_ALIGNOF: { AlignSize align; - if (!sema_set_abi_alignment(context, type, &align, false)) return false; + if (!sema_set_alignment(context, type, &align, false)) return false; expr_rewrite_const_int(expr, type_usz, align); return true; } @@ -6042,7 +6049,7 @@ static inline bool sema_expr_analyse_swizzle(SemaContext *context, Expr *expr, E } return true; } - Type *result = type_get_vector(indexed_type, len); + Type *result = type_get_vector(indexed_type, flat_type->type_kind, len); expr->expr_kind = EXPR_SWIZZLE; expr->swizzle_expr = (ExprSwizzle) { .parent = exprid(parent), .swizzle = kw, .is_overlapping = is_overlapping }; @@ -6206,6 +6213,7 @@ static inline bool sema_expr_analyse_access(SemaContext *context, Expr *expr, bo Type *type = type_no_optional(parent->type)->canonical; Type *flat_type = type_flatten(type); + TypeKind flat_kind = flat_type->type_kind; if (kw_type == kw) { if (type_is_any_raw(flat_type)) @@ -6213,7 +6221,7 @@ static inline bool sema_expr_analyse_access(SemaContext *context, Expr *expr, bo expr_rewrite_to_builtin_access(expr, parent, ACCESS_TYPEOFANY, type_typeid); return true; } - if (flat_type->type_kind == TYPE_ANYFAULT) + if (flat_kind == TYPE_ANYFAULT) { expr_rewrite_to_builtin_access(expr, parent, ACCESS_TYPEOFANYFAULT, type_typeid); return true; @@ -6231,19 +6239,19 @@ CHECK_DEEPER: expr_rewrite_const_int(expr, type_isz, index); return true; } - if (flat_type->type_kind == TYPE_SLICE) + if (flat_kind == TYPE_SLICE) { expr_rewrite_slice_len(expr, current_parent, type_usz); return true; } - assert(flat_type->type_kind != TYPE_ARRAY && flat_type->type_kind != TYPE_VECTOR); + assert(flat_kind != TYPE_ARRAY && !type_kind_is_real_vector(flat_kind)); } - if (flat_type->type_kind == TYPE_TYPEID) + if (flat_kind == TYPE_TYPEID) { bool was_error = false; if (sema_expr_rewrite_to_typeid_property(context, expr, current_parent, kw, &was_error)) return !was_error; } - if (flat_type->type_kind == TYPE_VECTOR) + if (type_kind_is_real_vector(flat_kind)) { unsigned len = strlen(kw); if (len <= 4) @@ -6260,7 +6268,7 @@ CHECK_DEEPER: // Hard coded ptr on slices and any if (kw == kw_ptr) { - if (flat_type->type_kind == TYPE_SLICE) + if (flat_kind == TYPE_SLICE) { expr_rewrite_ptr_access(expr, current_parent, type_get_ptr(flat_type->array.base)); return true; @@ -6271,7 +6279,7 @@ CHECK_DEEPER: return true; } } - if (kw == kw_ordinal && flat_type->type_kind == TYPE_ENUM) + if (kw == kw_ordinal && flat_kind == TYPE_ENUM) { sema_expr_convert_enum_to_int(current_parent); expr_replace(expr, current_parent); @@ -6287,7 +6295,7 @@ CHECK_DEEPER: return true; } } - if (flat_type->type_kind == TYPE_ENUM) + if (flat_kind == TYPE_ENUM) { if (sema_cast_const(current_parent)) { @@ -6379,6 +6387,7 @@ CHECK_DEEPER: current_parent = substruct; type = current_parent->type->canonical; flat_type = type_flatten(type); + flat_kind = flat_type->type_kind; goto CHECK_DEEPER; } @@ -6590,7 +6599,7 @@ Expr **sema_expand_vasplat_exprs(SemaContext *context, Expr **exprs) Type *flat = type_flatten(inner->type); switch (flat->type_kind) { - case TYPE_VECTOR: + case VECTORS: case TYPE_ARRAY: case TYPE_SLICE: case TYPE_UNTYPED_LIST: @@ -6694,11 +6703,8 @@ static bool sema_expr_analyse_slice_assign(SemaContext *context, Expr *expr, Typ switch (flat->type_kind) { case TYPE_STRUCT: - case TYPE_ARRAY: - case TYPE_VECTOR: + case ALL_ARRAYLIKE: case TYPE_UNION: - case TYPE_INFERRED_ARRAY: - case TYPE_INFERRED_VECTOR: case TYPE_BITSTRUCT: case TYPE_SLICE: break; @@ -7171,13 +7177,13 @@ static bool sema_expr_analyse_op_assign_enum_ptr(SemaContext *context, Expr *rhs } else { - Type *real_type = type_get_vector(is_enum ? enum_inner_type(base) : type_isz, flat->array.len); + Type *real_type = type_get_vector_from_vector(is_enum ? enum_inner_type(base) : type_isz, flat); if (flat_rhs == type_untypedlist) { if (!cast_implicit(context, rhs, real_type, true)) return false; flat_rhs = type_flat_for_arithmethics(rhs->type); } - if (!type_is_integer(flat_rhs) && (flat_rhs->type_kind != TYPE_VECTOR || !type_is_integer(flat_rhs->array.base))) + if (!type_is_integer(flat_rhs) && (!type_kind_is_real_vector(flat_rhs->type_kind) || !type_is_integer(flat_rhs->array.base))) { RETURN_SEMA_ERROR(rhs, "The right side was '%s' but only integers or integer vectors are valid on the right side of %s when the left side is %s.", @@ -7289,7 +7295,7 @@ static bool sema_expr_analyse_op_assign(SemaContext *context, Expr *expr, Expr * SKIP_OVERLOAD_CHECK:; // 3. If this is only defined for ints (^= |= &= %=) verify that this is an int. Type *flat = type_flat_for_arithmethics(no_fail); - Type *base = flat->type_kind == TYPE_VECTOR ? type_flat_for_arithmethics(flat->array.base) : flat; + Type *base = type_kind_is_real_vector(flat->type_kind) ? type_flat_for_arithmethics(flat->array.base) : flat; if (int_only && !type_is_integer(base)) { @@ -7578,7 +7584,7 @@ INLINE bool sema_expr_analyse_ptr_sub(SemaContext *context, Expr *expr, Expr *le bool right_is_pointer_vector = type_is_pointer_vector(right_type); bool right_is_pointer = right_is_pointer_vector || right_type->type_kind == TYPE_POINTER; - Type *offset_type = vec_len ? type_get_vector(type_isz, vec_len) : type_isz; + Type *offset_type = vec_len ? type_get_vector_from_vector(type_isz, left_type) : type_isz; // 3. ptr - other pointer if (right_is_pointer) @@ -7608,7 +7614,7 @@ INLINE bool sema_expr_analyse_ptr_sub(SemaContext *context, Expr *expr, Expr *le right_type = right->type->canonical; - bool right_is_vector = right_type->type_kind == TYPE_VECTOR; + bool right_is_vector = type_kind_is_real_vector(right_type->type_kind); // 4. Check that the right hand side is an integer. if (!type_flat_is_intlike(right_type)) { @@ -7651,7 +7657,6 @@ INLINE bool sema_expr_analyse_ptr_sub(SemaContext *context, Expr *expr, Expr *le expr->expr_kind = EXPR_POINTER_OFFSET; expr->pointer_offset_expr.ptr = exprid(left); expr->pointer_offset_expr.offset = exprid(expr_negate_expr(right)); - expr->pointer_offset_expr.raw_offset = false; } expr->resolve_status = RESOLVE_NOT_DONE; @@ -7731,8 +7736,8 @@ static bool sema_expr_analyse_sub(SemaContext *context, Expr *expr, Expr *left, INLINE bool sema_expr_analyse_ptr_add(SemaContext *context, Expr *expr, Expr *left, Expr *right, CanonicalType *left_type, CanonicalType *right_type, Type *cast_to_iptr, bool *failed_ref) { - bool left_is_vec = left_type->type_kind == TYPE_VECTOR; - bool right_is_vec = right_type->type_kind == TYPE_VECTOR; + bool left_is_vec = type_kind_is_real_vector(left_type->type_kind); + bool right_is_vec = type_kind_is_real_vector(right_type->type_kind); ArraySize vec_len = left_is_vec ? left_type->array.len : 0; // 3a. Check that the other side is an integer of some sort. @@ -7749,7 +7754,7 @@ INLINE bool sema_expr_analyse_ptr_add(SemaContext *context, Expr *expr, Expr *le // 3b. Cast it to usz or isz depending on underlying type. // Either is fine, but it looks a bit nicer if we actually do this and keep the sign. - bool success = cast_explicit(context, right, left_is_vec ? type_get_vector(type_isz, vec_len) : type_isz); + bool success = cast_explicit(context, right, left_is_vec ? type_get_vector_from_vector(type_isz, left_type) : type_isz); // No need to check the cast we just ensured it was an integer. ASSERT_SPAN(expr, success && "This should always work"); @@ -7775,7 +7780,6 @@ INLINE bool sema_expr_analyse_ptr_add(SemaContext *context, Expr *expr, Expr *le { // Set the type and other properties. expr->type = left->type; - expr->pointer_offset_expr.raw_offset = false; expr->pointer_offset_expr.ptr = exprid(left); expr->pointer_offset_expr.offset = exprid(right); expr->expr_kind = EXPR_POINTER_OFFSET; @@ -8069,8 +8073,6 @@ static bool sema_expr_check_shift_rhs(SemaContext *context, Expr *expr, Expr *le { // Make sure the value does not exceed the bitsize of // the left hand side. We ignore this check for lhs being a constant. - - Type *base = type_vector_base(left_type_flat); ASSERT_SPAN(expr, type_kind_is_any_integer(base->type_kind)); if (int_ucomp(right->const_expr.ixx, base->builtin.bitsize, BINARYOP_GE)) @@ -8086,15 +8088,15 @@ static bool sema_expr_check_shift_rhs(SemaContext *context, Expr *expr, Expr *le } // If LHS is vector but RHS isn't? Promote. - bool lhs_is_vec = left_type_flat->type_kind == TYPE_VECTOR; - if (lhs_is_vec && right_type_flat->type_kind != TYPE_VECTOR) + bool lhs_is_vec = type_kind_is_real_vector(left_type_flat->type_kind); + if (lhs_is_vec && !type_kind_is_real_vector(right_type_flat->type_kind)) { // Create a vector from the right hand side. - Type *right_vec = type_get_vector(right->type, left_type_flat->array.len); + Type *right_vec = type_get_vector_from_vector(right->type, left_type_flat); if (!cast_explicit_checkable(context, right, right_vec, failed_ref)) return false; } - bool rhs_is_vec = right_type_flat->type_kind == TYPE_VECTOR; + bool rhs_is_vec = type_kind_is_real_vector(right_type_flat->type_kind); if (!lhs_is_vec && rhs_is_vec) { if (is_assign) @@ -8103,7 +8105,7 @@ static bool sema_expr_check_shift_rhs(SemaContext *context, Expr *expr, Expr *le RETURN_SEMA_ERROR(right, "The shift cannot be a vector of type %s when shifting a variable of type %s.", left->type, right->type); } - Type *left_vec = type_get_vector(left->type, right_type_flat->array.len); + Type *left_vec = type_get_vector_from_vector(left->type, right_type_flat); if (!cast_explicit_checkable(context, left, left_vec, failed_ref)) return false; } @@ -8112,11 +8114,11 @@ static bool sema_expr_check_shift_rhs(SemaContext *context, Expr *expr, Expr *le if (type_no_optional(left->type)->canonical == right_type) return true; Type *base = right_type; - if (right_type->type_kind == TYPE_VECTOR) + if (type_kind_is_real_vector(right_type->type_kind)) { base = right_type->array.base->canonical; base = type_flat_distinct_enum_inline(base); - right_type = type_get_vector(base, right_type->array.len); + right_type = type_get_vector_from_vector(base, right_type); } else { @@ -8155,7 +8157,7 @@ static bool sema_expr_analyse_shift(SemaContext *context, Expr *expr, Expr *left Type *flat_left = type_flatten_and_inline(left->type); Type *flat_right = type_flatten_and_inline(right->type); - if (flat_left->type_kind == TYPE_VECTOR) + if (type_kind_is_real_vector(flat_left->type_kind)) { Type *left_base = type_flatten_and_inline(flat_left->array.base); if (!type_is_integer(left_base)) goto FAIL; @@ -8342,7 +8344,7 @@ BoolErr sema_type_can_check_equality_with_overload(SemaContext *context, Type *t case TYPE_FUNC_PTR: case TYPE_FUNC_RAW: case TYPE_TYPEINFO: - case TYPE_VECTOR: + case VECTORS: case TYPE_WILDCARD: return true; } @@ -8559,7 +8561,7 @@ NEXT: max = max->canonical; - if (max->type_kind == TYPE_VECTOR && !is_equality_type_op) + if (type_kind_is_real_vector(max->type_kind) && !is_equality_type_op) { RETURN_SEMA_ERROR(expr, "Vector types can only be tested for equality, for other comparison, use vector comparison functions."); } @@ -9061,8 +9063,8 @@ static inline bool sema_expr_analyse_not(SemaContext *context, Expr *expr) Type *canonical = type->canonical; switch (canonical->type_kind) { - case TYPE_VECTOR: - expr->type = type_get_vector(type_bool, canonical->array.len); + case VECTORS: + expr->type = type_get_vector_from_vector(type_bool, canonical); return true; case TYPE_INFERRED_VECTOR: UNREACHABLE @@ -9273,7 +9275,7 @@ static inline bool sema_expr_analyse_incdec(SemaContext *context, Expr *expr) Type *type = type_flatten(inner->type); // 5. We can only inc/dec numbers or pointers. - if (!type_underlying_may_add_sub(type) && type->type_kind != TYPE_VECTOR) + if (!type_underlying_may_add_sub(type) && !type_kind_is_real_vector(type->type_kind)) { RETURN_SEMA_ERROR(inner, "The expression must be a vector, enum, number or a pointer."); } @@ -10074,7 +10076,7 @@ static inline bool sema_expr_analyse_ct_alignof(SemaContext *context, Expr *expr } else { - if (!sema_set_abi_alignment(context, type, &align, false)) return false; + if (!sema_set_alignment(context, type, &align, false)) return false; } FOREACH_IDX(i, DesignatorElement *, element, path) { @@ -10265,7 +10267,7 @@ RETRY: type_quoted_error_string(type)); return poisoned_type; } - return type_get_vector(type, size); + return type_get_vector(type, type_info->is_simd ? TYPE_SIMD_VECTOR : TYPE_VECTOR, size); } case TYPE_INFO_ARRAY: { @@ -10742,7 +10744,7 @@ static bool sema_expr_analyse_lenof(SemaContext *context, Expr *expr, bool *miss switch (canonical->type_kind) { case TYPE_ARRAY: - case TYPE_VECTOR: + case VECTORS: expr_rewrite_const_int(expr, type_isz, canonical->array.len); return true; case TYPE_SLICE: diff --git a/src/compiler/sema_initializers.c b/src/compiler/sema_initializers.c index 6f7d27211..6cc4cb1a1 100644 --- a/src/compiler/sema_initializers.c +++ b/src/compiler/sema_initializers.c @@ -367,7 +367,7 @@ static inline bool sema_expr_analyse_array_plain_initializer(SemaContext *contex if (!sema_analyse_inferred_expr(context, inner_type, element, no_match_ref)) return false; Type *element_type = element->type; Type *element_flat = type_flatten(element_type); - if (element_flat->type_kind == TYPE_VECTOR + if (type_kind_is_real_vector(element_flat->type_kind) && type_flatten(type_get_indexed_type(element_type)) == type_flatten(inner_type)) { unsigned len = element_flat->array.len; @@ -666,10 +666,9 @@ static inline bool sema_expr_analyse_initializer(SemaContext *context, Type *ass if (flattened->type_kind == TYPE_UNTYPED_LIST || flattened->type_kind == TYPE_ARRAY || flattened->type_kind == TYPE_INFERRED_ARRAY || - flattened->type_kind == TYPE_INFERRED_VECTOR || flattened->type_kind == TYPE_FLEXIBLE_ARRAY || flattened->type_kind == TYPE_SLICE || - flattened->type_kind == TYPE_VECTOR) + type_kind_is_any_vector(flattened->type_kind)) { return sema_expr_analyse_array_plain_initializer(context, assigned_type, flattened, expr, no_match_ref); } @@ -871,12 +870,8 @@ bool sema_expr_analyse_initializer_list(SemaContext *context, Type *to, Expr *ex case TYPE_UNTYPED_LIST: case TYPE_STRUCT: case TYPE_UNION: - case TYPE_ARRAY: case TYPE_BITSTRUCT: - case TYPE_INFERRED_ARRAY: - case TYPE_INFERRED_VECTOR: - case TYPE_FLEXIBLE_ARRAY: - case TYPE_VECTOR: + case ALL_ARRAYLIKE: return sema_expr_analyse_initializer(context, to, flattened, expr, no_match_ref); case TYPE_SLICE: { @@ -1257,7 +1252,7 @@ static inline void sema_update_const_initializer_with_designator( sema_update_const_initializer_with_designator_union(const_init, curr, end, value); return; case TYPE_ARRAY: - case TYPE_VECTOR: + case VECTORS: sema_update_const_initializer_with_designator_array(const_init, curr, end, value); return; default: @@ -1309,7 +1304,7 @@ static Type *sema_find_type_of_element(SemaContext *context, Type *type, Designa base = type_flattened->array.base; break; case TYPE_ARRAY: - case TYPE_VECTOR: + case VECTORS: len = type_flattened->array.len; base = type_flattened->array.base; break; diff --git a/src/compiler/sema_internal.h b/src/compiler/sema_internal.h index fb200492c..36fe9bcd8 100644 --- a/src/compiler/sema_internal.h +++ b/src/compiler/sema_internal.h @@ -130,7 +130,7 @@ Decl *sema_analyse_parameterized_identifier(SemaContext *c, Path *decl_path, con Expr **params, bool *was_recursive_ref, SourceSpan invocation_span); bool sema_parameterized_type_is_found(SemaContext *context, Path *decl_path, const char *name, SourceSpan span); Type *sema_resolve_type_get_func(Signature *signature, CallABI abi); -INLINE bool sema_set_abi_alignment(SemaContext *context, Type *type, AlignSize *result, bool as_member); +INLINE bool sema_set_alignment(SemaContext *context, Type *type, AlignSize *result, bool is_alloca); INLINE bool sema_set_alloca_alignment(SemaContext *context, Type *type, AlignSize *result); INLINE void sema_display_deprecated_warning_on_use(SemaContext *context, Decl *decl, SourceSpan span); bool sema_expr_analyse_ct_concat(SemaContext *context, Expr *concat_expr, Expr *left, Expr *right, bool *failed_ref); @@ -170,7 +170,7 @@ INLINE bool sema_check_left_right_const(SemaContext *context, Expr *left, Expr * return true; } -INLINE bool sema_set_abi_alignment(SemaContext *context, Type *type, AlignSize *result, bool as_member) +INLINE bool sema_set_alignment(SemaContext *context, Type *type, AlignSize *result, bool is_alloca) { type = type->canonical; if (type_is_func_ptr(type)) @@ -179,20 +179,7 @@ INLINE bool sema_set_abi_alignment(SemaContext *context, Type *type, AlignSize * return true; } if (!sema_resolve_type_decl(context, type)) return false; - if (as_member) - { - while (type->type_kind == TYPE_TYPEDEF) - { - if (type_is_simd(type)) goto DONE; - type = type->decl->distinct->type->canonical; - } - if (type_kind_is_any_vector(type->type_kind)) - { - type = type->array.base; - } - } -DONE:; - *result = type_abi_alignment(type); + *result = is_alloca ? type_alloca_alignment(type) : type_abi_alignment(type); return true; } diff --git a/src/compiler/sema_name_resolution.c b/src/compiler/sema_name_resolution.c index 1516b3c22..73e66a1ae 100644 --- a/src/compiler/sema_name_resolution.c +++ b/src/compiler/sema_name_resolution.c @@ -965,14 +965,13 @@ bool sema_resolve_type_decl(SemaContext *context, Type *type) case TYPE_BOOL: case ALL_INTS: case ALL_FLOATS: + case ALL_VECTORS: case TYPE_ANYFAULT: case TYPE_TYPEID: case TYPE_POINTER: case TYPE_FUNC_PTR: case TYPE_UNTYPED_LIST: case TYPE_MEMBER: - case TYPE_INFERRED_VECTOR: - case TYPE_VECTOR: case TYPE_SLICE: case TYPE_ANY: case TYPE_INTERFACE: @@ -1014,7 +1013,7 @@ Decl *sema_resolve_type_method(SemaContext *context, CanonicalType *type, const { case TYPE_ARRAY: return declptrzero(methodtable_get(&compiler.context.method_extensions, type_get_inferred_array(type->array.base), method_name)); - case TYPE_VECTOR: + case VECTORS: return declptrzero(methodtable_get(&compiler.context.method_extensions, type_get_inferred_vector(type->array.base), method_name)); default: return NULL; diff --git a/src/compiler/sema_stmts.c b/src/compiler/sema_stmts.c index 573da66f4..e2c752220 100644 --- a/src/compiler/sema_stmts.c +++ b/src/compiler/sema_stmts.c @@ -1738,7 +1738,7 @@ SKIP_OVERLOAD:; switch (enumerator_type->type_kind) { case TYPE_ARRAY: - case TYPE_VECTOR: + case VECTORS: array_len = enumerator_type->array.len; len_call = NULL; break; @@ -2850,7 +2850,7 @@ static inline bool sema_analyse_ct_foreach_stmt(SemaContext *context, Ast *state INITIALIZER:; ConstInitType init_type = initializer->kind; const_list_type = type_flatten(collection->type); - if (const_list_type->type_kind == TYPE_ARRAY || const_list_type->type_kind == TYPE_VECTOR) + if (const_list_type->type_kind == TYPE_ARRAY || type_kind_is_real_vector(const_list_type->type_kind)) { count = const_list_type->array.len; } diff --git a/src/compiler/sema_types.c b/src/compiler/sema_types.c index 4aad62188..f03343139 100644 --- a/src/compiler/sema_types.c +++ b/src/compiler/sema_types.c @@ -149,7 +149,8 @@ static inline bool sema_check_array_type(SemaContext *context, TypeInfo *origina { RETURN_SEMA_ERROR(original_info, "You cannot form a vector with elements of type %s.", type_quoted_error_string(base)); } - *result_ref = type_get_vector(base, len); + if (original_info->is_simd && !is_power_of_two(len)) RETURN_SEMA_ERROR(original_info, "The length of a @simd vector must be a power of two."); + *result_ref = type_get_vector(base, original_info->is_simd ? TYPE_SIMD_VECTOR : TYPE_VECTOR, len); break; case TYPE_INFO_ARRAY: if (!type_is_valid_for_array(base)) diff --git a/src/compiler/types.c b/src/compiler/types.c index 645d17f11..50b30aa7e 100644 --- a/src/compiler/types.c +++ b/src/compiler/types.c @@ -179,12 +179,11 @@ void type_append_name_to_scratch(Type *type) case TYPE_TYPEID: case TYPE_ANYFAULT: case TYPE_ANY: - case TYPE_VECTOR: + case ALL_VECTORS: scratch_buffer_append(type->name); break; case TYPE_UNTYPED_LIST: case TYPE_INFERRED_ARRAY: - case TYPE_INFERRED_VECTOR: case TYPE_TYPEINFO: case TYPE_MEMBER: case TYPE_WILDCARD: @@ -307,6 +306,8 @@ const char *type_to_error_string(Type *type) return str_printf("%s[<*>]", type_to_error_string(type->array.base)); case TYPE_VECTOR: return str_printf("%s[<%llu>]", type_to_error_string(type->array.base), (unsigned long long)type->array.len); + case TYPE_SIMD_VECTOR: + return str_printf("%s[<%llu>] @simd", type_to_error_string(type->array.base), (unsigned long long)type->array.len); case TYPE_TYPEINFO: return "typeinfo"; case TYPE_TYPEID: @@ -385,6 +386,8 @@ static const char *type_to_error_string_with_path(Type *type) return str_printf("%s[<*>]", type_to_error_string_with_path(type->array.base)); case TYPE_VECTOR: return str_printf("%s[<%llu>]", type_to_error_string_with_path(type->array.base), (unsigned long long)type->array.len); + case TYPE_SIMD_VECTOR: + return str_printf("%s[<%llu>] @simd", type_to_error_string_with_path(type->array.base), (unsigned long long)type->array.len); case TYPE_TYPEINFO: return "typeinfo"; case TYPE_TYPEID: @@ -464,8 +467,8 @@ TypeSize type_size(Type *type) case TYPE_FUNC_PTR: case TYPE_POINTER: return type->size = t.iptr.canonical->builtin.bytesize; + case VECTORS: case TYPE_ARRAY: - case TYPE_VECTOR: return type->size = type_size(type->array.base) * type->array.len; case TYPE_SLICE: return type->size = size_slice; @@ -487,7 +490,7 @@ FunctionPrototype *type_get_resolved_prototype(Type *type) bool type_flat_is_numlike(Type *type) { type = type_flatten(type); - if (type->type_kind == TYPE_VECTOR) type = type_flatten(type->array.base); + if (type_kind_is_real_vector(type->type_kind)) type = type_flatten(type->array.base); TypeKind kind = type->type_kind; return kind >= TYPE_NUM_FIRST && kind <= TYPE_NUM_LAST; } @@ -495,7 +498,7 @@ bool type_flat_is_numlike(Type *type) bool type_flat_is_floatlike(Type *type) { type = type_flatten(type); - if (type->type_kind == TYPE_VECTOR) type = type_flatten(type->array.base); + if (type_kind_is_real_vector(type->type_kind)) type = type_flatten(type->array.base); TypeKind kind = type->type_kind; return kind >= TYPE_FLOAT_FIRST && kind <= TYPE_FLOAT_LAST; } @@ -503,7 +506,7 @@ bool type_flat_is_floatlike(Type *type) bool type_flat_is_intlike(Type *type) { type = type_flatten(type); - if (type->type_kind == TYPE_VECTOR) type = type_flatten(type->array.base); + if (type_kind_is_real_vector(type->type_kind)) type = type_flatten(type->array.base); TypeKind kind = type->type_kind; return kind >= TYPE_INTEGER_FIRST && kind <= TYPE_INTEGER_LAST; } @@ -511,7 +514,7 @@ bool type_flat_is_intlike(Type *type) bool type_flat_is_boolintlike(Type *type) { type = type_flatten(type); - if (type->type_kind == TYPE_VECTOR) type = type_flatten(type->array.base); + if (type_kind_is_real_vector(type->type_kind)) type = type_flatten(type->array.base); TypeKind kind = type->type_kind; return kind == TYPE_BOOL || (kind >= TYPE_INTEGER_FIRST && kind <= TYPE_INTEGER_LAST); } @@ -528,16 +531,6 @@ bool type_is_abi_aggregate(Type *type) return type_is_aggregate(type); } -bool type_is_simd(Type *type) -{ - type = type->canonical; - while (type->type_kind == TYPE_TYPEDEF) - { - if (type->decl->attr_simd) return true; - type = type->decl->distinct->type; - } - return false; -} bool type_is_aggregate(Type *type) { @@ -567,7 +560,7 @@ bool type_is_aggregate(Type *type) case TYPE_CONST_ENUM: case TYPE_FUNC_PTR: case TYPE_FUNC_RAW: - case TYPE_VECTOR: + case VECTORS: case TYPE_ANYFAULT: return false; case TYPE_STRUCT: @@ -667,7 +660,7 @@ bool type_is_comparable(Type *type) case TYPE_FUNC_PTR: case TYPE_FUNC_RAW: case TYPE_TYPEINFO: - case TYPE_VECTOR: + case VECTORS: case TYPE_WILDCARD: return true; } @@ -707,6 +700,9 @@ void type_mangle_introspect_name_to_buffer(Type *type) scratch_buffer_append("f$"); type_mangle_introspect_name_to_buffer(type->optional); return; + case TYPE_SIMD_VECTOR: + scratch_buffer_append("si"); + FALLTHROUGH; case TYPE_VECTOR: scratch_buffer_append_char('v'); scratch_buffer_append_unsigned_int(type->array.len); @@ -790,17 +786,8 @@ bool type_func_match(Type *fn_type, Type *rtype, unsigned arg_count, ...) return true; } -AlignSize type_simd_alignment(CanonicalType *type) -{ - ASSERT(type->type_kind == TYPE_VECTOR); - ByteSize width = type_size(type->array.base) * type->array.len; - AlignSize alignment = (AlignSize)(int32_t)width; - if (max_alignment_vector && alignment > max_alignment_vector) return max_alignment_vector; - ASSERT(is_power_of_two(alignment)); - return alignment; -} -AlignSize type_abi_alignment(Type *type) +INLINE AlignSize type_alignment_(Type *type, bool alloca) { RETRY: switch (type->type_kind) @@ -817,6 +804,13 @@ AlignSize type_abi_alignment(Type *type) goto RETRY; case TYPE_INFERRED_VECTOR: case TYPE_VECTOR: + if (!alloca) + { + type = type->array.base->canonical; + goto RETRY; + } + FALLTHROUGH; + case TYPE_SIMD_VECTOR: { ArraySize len = type->array.len; if (!len) len = 1; @@ -876,6 +870,11 @@ AlignSize type_abi_alignment(Type *type) UNREACHABLE } +AlignSize type_abi_alignment(Type *type) +{ + return type_alignment_(type, false); +} + static inline void create_type_cache(Type *type) { ASSERT(type->type_cache == NULL); @@ -1096,7 +1095,7 @@ Type *type_get_inferred_vector(Type *arr_type) AlignSize type_alloca_alignment(Type *type) { - AlignSize align = type_abi_alignment(type); + AlignSize align = type_alignment_(type, true); if (align < 16 && (compiler.platform.abi == ABI_X64 || compiler.platform.abi == ABI_WIN64)) { type = type_flatten(type); @@ -1216,7 +1215,7 @@ Type *type_get_indexed_type(Type *type) case TYPE_INFERRED_ARRAY: case TYPE_INFERRED_VECTOR: case TYPE_FLEXIBLE_ARRAY: - case TYPE_VECTOR: + case VECTORS: return type->array.base; case TYPE_CONST_ENUM: type = enum_inner_type(type); @@ -1250,17 +1249,25 @@ static Type *type_create_array(Type *element_type, ArraySize len, TypeKind kind, if (ptr_vec->array.len == len) return ptr_vec; } Type *vec_arr; - if (kind == TYPE_ARRAY) + switch (kind) { - vec_arr = type_new(TYPE_ARRAY, str_printf("%s[%llu]", element_type->name, (unsigned long long)len)); - vec_arr->array.base = element_type; - vec_arr->array.len = len; - } - else - { - vec_arr = type_new(kind, str_printf("%s[<%llu>]", element_type->name, (unsigned long long)len)); - vec_arr->array.base = element_type; - vec_arr->array.len = len; + case TYPE_ARRAY: + vec_arr = type_new(TYPE_ARRAY, str_printf("%s[%llu]", element_type->name, (unsigned long long)len)); + vec_arr->array.base = element_type; + vec_arr->array.len = len; + break; + case TYPE_VECTOR: + vec_arr = type_new(kind, str_printf("%s[<%llu>]", element_type->name, (unsigned long long)len)); + vec_arr->array.base = element_type; + vec_arr->array.len = len; + break; + case TYPE_SIMD_VECTOR: + vec_arr = type_new(kind, str_printf("%s[<%llu>] @simd", element_type->name, (unsigned long long)len)); + vec_arr->array.base = element_type; + vec_arr->array.len = len; + break; + default: + UNREACHABLE; } if (element_type->canonical == element_type) { @@ -1276,14 +1283,14 @@ static Type *type_create_array(Type *element_type, ArraySize len, TypeKind kind, Type *type_array_from_vector(Type *vec_type) { - ASSERT(vec_type->type_kind == TYPE_VECTOR); + ASSERT(type_kind_is_real_vector(vec_type->type_kind)); return type_get_array(vec_type->array.base, vec_type->array.len); } Type *type_vector_from_array(Type *vec_type) { ASSERT(vec_type->type_kind == TYPE_ARRAY); - return type_get_vector(vec_type->array.base, vec_type->array.len); + return type_get_vector(vec_type->array.base, TYPE_VECTOR, vec_type->array.len); } Type *type_get_array(Type *arr_type, ArraySize len) @@ -1344,7 +1351,7 @@ bool type_is_valid_for_array(Type *type) case TYPE_BOOL: case TYPE_ARRAY: case TYPE_SLICE: - case TYPE_VECTOR: + case VECTORS: return true; case TYPE_ALIAS: ASSERT(type->decl->resolve_status == RESOLVE_DONE); @@ -1367,19 +1374,30 @@ bool type_is_valid_for_array(Type *type) UNREACHABLE } -Type *type_get_vector_bool(Type *original_type) +Type *type_get_vector_bool(Type *original_type, TypeKind kind) { Type *type = type_flatten(original_type); ByteSize size = type_size(type->array.base); - return type_get_vector(type_int_signed_by_bitsize((unsigned)size * 8), (unsigned)original_type->array.len); + return type_get_vector(type_int_signed_by_bitsize((unsigned)size * 8), kind, (unsigned)original_type->array.len); } -Type *type_get_vector(Type *vector_type, unsigned len) +Type *type_get_vector_from_vector(Type *base_type, Type *orginal_vector) { - ASSERT(type_is_valid_for_vector(vector_type)); - return type_create_array(vector_type, len, TYPE_VECTOR, false); + ASSERT(type_kind_is_real_vector(orginal_vector->type_kind)); + return type_get_vector(base_type, orginal_vector->type_kind, orginal_vector->array.len); } +Type *type_get_simd_from_vector(Type *orginal_vector) +{ + ASSERT(orginal_vector->type_kind == TYPE_VECTOR); + return type_get_vector(orginal_vector->array.base, TYPE_SIMD_VECTOR, orginal_vector->array.len); +} + +Type *type_get_vector(Type *vector_type, TypeKind kind, unsigned len) +{ + ASSERT(type_kind_is_real_vector(kind) && type_is_valid_for_vector(vector_type)); + return type_create_array(vector_type, len, kind, false); +} static void type_create(const char *name, Type *location, TypeKind kind, unsigned bitsize, unsigned align, unsigned pref_align) @@ -1593,7 +1611,7 @@ bool type_is_scalar(Type *type) case TYPE_UNION: case TYPE_ARRAY: case TYPE_SLICE: - case TYPE_VECTOR: + case VECTORS: case TYPE_INTERFACE: case TYPE_ANY: case TYPE_FLEXIBLE_ARRAY: @@ -1735,15 +1753,15 @@ static TypeCmpResult type_array_is_equivalent(SemaContext *context, Type *from, return type_array_element_is_equivalent(context, from->array.base, to->array.base, is_explicit); case TYPE_ARRAY: if (to_kind != TYPE_ARRAY && to_kind != TYPE_INFERRED_ARRAY) return TYPE_MISMATCH; - if (to->type_kind == TYPE_ARRAY && from->array.len != to->array.len) return TYPE_MISMATCH; + if (to_kind == TYPE_ARRAY && from->array.len != to->array.len) return TYPE_MISMATCH; return type_array_element_is_equivalent(context, from->array.base, to->array.base, is_explicit); case TYPE_INFERRED_VECTOR: ASSERT(to_kind != TYPE_INFERRED_VECTOR); - if (to->type_kind != TYPE_VECTOR) return TYPE_MISMATCH; + if (type_kind_is_real_vector(to_kind)) return TYPE_MISMATCH; return type_array_element_is_equivalent(context, from->array.base, to->array.base, is_explicit); - case TYPE_VECTOR: - if (to_kind != TYPE_VECTOR && to_kind != TYPE_INFERRED_VECTOR) return TYPE_MISMATCH; - if (to->type_kind == TYPE_VECTOR && from->array.len != to->array.len) return TYPE_MISMATCH; + case VECTORS: + if (!type_kind_is_any_vector(to_kind)) return TYPE_MISMATCH; + if (type_kind_is_real_vector(to_kind) && from->array.len != to->array.len) return TYPE_MISMATCH; return type_array_element_is_equivalent(context, from->array.base, to->array.base, is_explicit); default: return TYPE_MISMATCH; @@ -1782,7 +1800,7 @@ TypeCmpResult type_array_element_is_equivalent(SemaContext *context, Type *eleme case TYPE_STRUCT: if (is_explicit) return type_is_structurally_equivalent(element1, element2) ? TYPE_SAME : TYPE_MISMATCH; return TYPE_MISMATCH; - case TYPE_VECTOR: + case VECTORS: case TYPE_ARRAY: case TYPE_INFERRED_ARRAY: case TYPE_INFERRED_VECTOR: @@ -1879,10 +1897,9 @@ bool type_may_have_method(Type *type) case TYPE_TYPEID: case TYPE_ARRAY: case TYPE_SLICE: + case ALL_VECTORS: case TYPE_INFERRED_ARRAY: - case TYPE_INFERRED_VECTOR: case TYPE_FLEXIBLE_ARRAY: - case TYPE_VECTOR: case TYPE_BOOL: case TYPE_INTERFACE: return true; @@ -1930,8 +1947,7 @@ Type *type_find_max_num_type(Type *num_type, Type *other_num) ASSERT(kind != other_kind); // If the other is a vector then we always set that one as the max. - if (other_kind == TYPE_VECTOR) return other_num; - + if (type_kind_is_real_vector(other_kind)) return other_num; // 1. The only conversions need to happen if the other type is a number. if (other_kind < TYPE_INTEGER_FIRST || other_kind > TYPE_FLOAT_LAST) return NULL; @@ -2040,7 +2056,7 @@ Type *type_decay_array_pointer(Type *type) switch (ptr->type_kind) { case TYPE_ARRAY: - case TYPE_VECTOR: + case VECTORS: return type_get_ptr(ptr->array.base->canonical); default: return type; @@ -2187,7 +2203,7 @@ RETRY_DISTINCT: } } } - if (type->pointer->type_kind == TYPE_VECTOR) + if (type_kind_is_real_vector(type->pointer->type_kind)) { Type *vector_base = type->pointer->array.base->canonical; if (other->type_kind == TYPE_SLICE && vector_base == other->array.base->canonical) @@ -2252,7 +2268,7 @@ RETRY_DISTINCT: UNREACHABLE // Should only handle canonical types case TYPE_UNTYPED_LIST: if (other->type_kind == TYPE_ARRAY) return other; - if (other->type_kind == TYPE_VECTOR) return other; + if (type_kind_is_real_vector(other->type_kind)) return other; if (other->type_kind == TYPE_STRUCT) return other; if (other->type_kind == TYPE_SLICE) return other; return NULL; @@ -2283,6 +2299,16 @@ RETRY_DISTINCT: return NULL; UNREACHABLE case TYPE_VECTOR: + // VECTOR + SIMD -> SIMD if type and length matches. + if (other->type_kind == TYPE_SIMD_VECTOR) + { + if (other->array.base->canonical == type->array.base->canonical && other->array.len == type->array.len) + { + return other; + } + } + return NULL; + case TYPE_SIMD_VECTOR: // No implicit conversion between vectors return NULL; } @@ -2351,8 +2377,7 @@ unsigned type_get_introspection_kind(TypeKind kind) return INTROSPECT_TYPE_ARRAY; case TYPE_SLICE: return INTROSPECT_TYPE_SLICE; - case TYPE_VECTOR: - case TYPE_INFERRED_VECTOR: + case ALL_VECTORS: return INTROSPECT_TYPE_VECTOR; case TYPE_OPTIONAL: return INTROSPECT_TYPE_OPTIONAL; @@ -2400,12 +2425,8 @@ Module *type_base_module(Type *type) case TYPE_ALIAS: type = type->canonical; goto RETRY; - case TYPE_ARRAY: case TYPE_SLICE: - case TYPE_INFERRED_ARRAY: - case TYPE_FLEXIBLE_ARRAY: - case TYPE_VECTOR: - case TYPE_INFERRED_VECTOR: + case ALL_ARRAYLIKE: type = type->array.base; goto RETRY; case TYPE_OPTIONAL: diff --git a/test/test_suite/abi/vec_update_align.c3t b/test/test_suite/abi/vec_update_align.c3t index a1789dd1f..e66453d00 100644 --- a/test/test_suite/abi/vec_update_align.c3t +++ b/test/test_suite/abi/vec_update_align.c3t @@ -47,7 +47,7 @@ entry: %x1 = alloca float, align 4 call void @llvm.memset.p0.i64(ptr align 4 %a, i8 0, i64 52, i1 false) call void @llvm.memset.p0.i64(ptr align 4 %b, i8 0, i64 52, i1 false) - %ptradd = getelementptr inbounds i8, ptr %b, i64 16 + %ptradd = getelementptr inbounds i8, ptr %b, i64 12 %0 = load <3 x float>, ptr %ptradd, align 4 %1 = extractelement <3 x float> %0, i64 0 %fincdec = fadd float %1, 1.000000e+00 diff --git a/test/test_suite/abi/x64alignarray.c3t b/test/test_suite/abi/x64alignarray.c3t index 31f5d990b..af2c18ff4 100644 --- a/test/test_suite/abi/x64alignarray.c3t +++ b/test/test_suite/abi/x64alignarray.c3t @@ -14,7 +14,7 @@ fn void test1_g() define void @test.test1_g() #0 { entry: %x = alloca [4 x float], align 16 - store float 0.000000e+00, ptr %x, align 4 + store float 0.000000e+00, ptr %x, align 16 %ptradd = getelementptr inbounds i8, ptr %x, i64 4 store float 0.000000e+00, ptr %ptradd, align 4 %ptradd1 = getelementptr inbounds i8, ptr %x, i64 8 diff --git a/test/test_suite/any/interface_optional_try.c3t b/test/test_suite/any/interface_optional_try.c3t index dfd148887..090cca8d6 100644 --- a/test/test_suite/any/interface_optional_try.c3t +++ b/test/test_suite/any/interface_optional_try.c3t @@ -69,34 +69,34 @@ after_check2: ; preds = %after_assign %3 = load %any, ptr %v, align 8 %4 = extractvalue %any %3, 1 %5 = inttoptr i64 %4 to ptr - %type = load ptr, ptr %.cachedtype, align 8 - %6 = icmp eq ptr %5, %type - br i1 %6, label %cache_hit, label %cache_miss + %6 = load ptr, ptr %.cachedtype, align 8 + %7 = icmp eq ptr %5, %6 + br i1 %7, label %cache_hit, label %cache_miss cache_miss: ; preds = %after_check2 - %7 = call ptr @.dyn_search(ptr %5, ptr @"$sel.do_something") - store ptr %7, ptr %.inlinecache, align 8 + %8 = call ptr @.dyn_search(ptr %5, ptr @"$sel.do_something") + store ptr %8, ptr %.inlinecache, align 8 store ptr %5, ptr %.cachedtype, align 8 - br label %8 + br label %10 cache_hit: ; preds = %after_check2 - %cache_hit_fn = load ptr, ptr %.inlinecache, align 8 - br label %8 + %9 = load ptr, ptr %.inlinecache, align 8 + br label %10 -8: ; preds = %cache_hit, %cache_miss - %fn_phi = phi ptr [ %cache_hit_fn, %cache_hit ], [ %7, %cache_miss ] +10: ; preds = %cache_hit, %cache_miss + %fn_phi = phi ptr [ %9, %cache_hit ], [ %8, %cache_miss ] store ptr %fn_phi, ptr %x, align 8 br label %phi_try_catch catch_landing: ; preds = %after_assign br label %phi_try_catch -phi_try_catch: ; preds = %catch_landing, %8 - %val = phi i1 [ true, %8 ], [ false, %catch_landing ] +phi_try_catch: ; preds = %catch_landing, %10 + %val = phi i1 [ true, %10 ], [ false, %catch_landing ] br i1 %val, label %if.exit, label %if.else if.else: ; preds = %phi_try_catch - %9 = call i64 @std.io.printfn(ptr %retparam3, ptr @.str.1, i64 39, ptr null, i64 0) + %11 = call i64 @std.io.printfn(ptr %retparam3, ptr @.str.1, i64 39, ptr null, i64 0) br label %if.exit if.exit: ; preds = %if.else, %phi_try_catch diff --git a/test/test_suite/arrays/array_comparison_2.c3t b/test/test_suite/arrays/array_comparison_2.c3t index 23370b44a..cf8eb6eaf 100644 --- a/test/test_suite/arrays/array_comparison_2.c3t +++ b/test/test_suite/arrays/array_comparison_2.c3t @@ -21,14 +21,14 @@ entry: %z = alloca [8 x [2 x [2 x i32]]], align 16 %w = alloca [8 x [2 x [2 x i32]]], align 16 %cmp.idx = alloca i64, align 8 - store i32 0, ptr %x, align 4 + store i32 0, ptr %x, align 16 %ptradd = getelementptr inbounds i8, ptr %x, i64 4 store i32 0, ptr %ptradd, align 4 %ptradd1 = getelementptr inbounds i8, ptr %x, i64 8 store i32 0, ptr %ptradd1, align 4 %ptradd2 = getelementptr inbounds i8, ptr %ptradd1, i64 4 store i32 0, ptr %ptradd2, align 4 - store i32 0, ptr %y, align 4 + store i32 0, ptr %y, align 16 %ptradd3 = getelementptr inbounds i8, ptr %y, i64 4 store i32 0, ptr %ptradd3, align 4 %ptradd4 = getelementptr inbounds i8, ptr %y, i64 8 @@ -38,14 +38,17 @@ entry: %cmp = call i32 @memcmp(ptr %x, ptr %y, i64 8) %eq = icmp eq i32 %cmp, 0 br i1 %eq, label %next_check, label %exit + next_check: ; preds = %entry %ptradd6 = getelementptr inbounds i8, ptr %x, i64 8 %ptradd7 = getelementptr inbounds i8, ptr %y, i64 8 %cmp8 = call i32 @memcmp(ptr %ptradd6, ptr %ptradd7, i64 8) %eq9 = icmp eq i32 %cmp8, 0 br i1 %eq9, label %match10, label %exit + match10: ; preds = %next_check br label %exit + exit: ; preds = %match10, %next_check, %entry %array_cmp_phi = phi i1 [ false, %entry ], [ false, %next_check ], [ true, %match10 ] %0 = zext i1 %array_cmp_phi to i8 @@ -54,6 +57,7 @@ exit: ; preds = %match10, %next_chec call void @llvm.memset.p0.i64(ptr align 16 %w, i8 0, i64 128, i1 false) store i64 0, ptr %cmp.idx, align 8 br label %array_loop_start + array_loop_start: ; preds = %array_loop_comparison, %exit %1 = load i64, ptr %cmp.idx, align 8 %ptroffset = getelementptr inbounds [16 x i8], ptr %z, i64 %1 @@ -61,22 +65,27 @@ array_loop_start: ; preds = %array_loop_comparis %cmp12 = call i32 @memcmp(ptr %ptroffset, ptr %ptroffset11, i64 8) %eq13 = icmp eq i32 %cmp12, 0 br i1 %eq13, label %next_check14, label %exit20 + next_check14: ; preds = %array_loop_start %ptradd15 = getelementptr inbounds i8, ptr %ptroffset, i64 8 %ptradd16 = getelementptr inbounds i8, ptr %ptroffset11, i64 8 %cmp17 = call i32 @memcmp(ptr %ptradd15, ptr %ptradd16, i64 8) %eq18 = icmp eq i32 %cmp17, 0 br i1 %eq18, label %match19, label %exit20 + match19: ; preds = %next_check14 br label %exit20 + exit20: ; preds = %match19, %next_check14, %array_loop_start %array_cmp_phi21 = phi i1 [ false, %array_loop_start ], [ false, %next_check14 ], [ true, %match19 ] br i1 %array_cmp_phi21, label %array_loop_comparison, label %array_cmp_exit + array_loop_comparison: ; preds = %exit20 %inc = add i64 %1, 1 store i64 %inc, ptr %cmp.idx, align 8 %lt = icmp ult i64 %inc, 8 br i1 %lt, label %array_loop_start, label %array_cmp_exit + array_cmp_exit: ; preds = %array_loop_comparison, %exit20 %array_cmp_phi22 = phi i1 [ true, %array_loop_comparison ], [ false, %exit20 ] %2 = zext i1 %array_cmp_phi22 to i8 diff --git a/test/test_suite/arrays/codegen_big_array_range.c3t b/test/test_suite/arrays/codegen_big_array_range.c3t index 229f6549c..0e6a1679a 100644 --- a/test/test_suite/arrays/codegen_big_array_range.c3t +++ b/test/test_suite/arrays/codegen_big_array_range.c3t @@ -22,7 +22,7 @@ entry: %e = alloca ptr, align 8 call void @llvm.memcpy.p0.p0.i32(ptr align 16 %x, ptr align 16 @.__const, i32 4000, i1 false) call void @llvm.memset.p0.i64(ptr align 16 %y, i8 0, i64 40, i1 false) - store i32 10, ptr %y, align 4 + store i32 10, ptr %y, align 16 %ptradd = getelementptr inbounds i8, ptr %y, i64 4 store i32 10, ptr %ptradd, align 4 %ptradd1 = getelementptr inbounds i8, ptr %y, i64 8 diff --git a/test/test_suite/arrays/index_into_global.c3t b/test/test_suite/arrays/index_into_global.c3t index 13ef5c884..c4573a71c 100644 --- a/test/test_suite/arrays/index_into_global.c3t +++ b/test/test_suite/arrays/index_into_global.c3t @@ -27,6 +27,6 @@ entry: %sext = sext i32 %0 to i64 %ptroffset = getelementptr inbounds [4 x i8], ptr @foo.bar, i64 %sext store i32 0, ptr %ptroffset, align 4 - store i32 %0, ptr @foo.bar, align 4 + store i32 %0, ptr @foo.bar, align 16 ret void } diff --git a/test/test_suite/bitstruct/designated_initializer_with_bitstruct.c3t b/test/test_suite/bitstruct/designated_initializer_with_bitstruct.c3t index 672bc81b0..cd3aff4e5 100644 --- a/test/test_suite/bitstruct/designated_initializer_with_bitstruct.c3t +++ b/test/test_suite/bitstruct/designated_initializer_with_bitstruct.c3t @@ -69,16 +69,16 @@ entry: %ptradd1 = getelementptr inbounds i8, ptr %literal, i64 8 store ptr %f, ptr %ptradd1, align 8 store ptr %literal, ptr %f, align 8 - store i8 0, ptr %literal2, align 1 + store i8 0, ptr %literal2, align 8 %ptradd3 = getelementptr inbounds i8, ptr %literal2, i64 1 store i8 0, ptr %ptradd3, align 1 %ptradd4 = getelementptr inbounds i8, ptr %literal2, i64 8 store ptr null, ptr %ptradd4, align 8 - store i8 2, ptr %literal2, align 1 + store i8 2, ptr %literal2, align 8 %ptradd5 = getelementptr inbounds i8, ptr %literal2, i64 8 store ptr %f, ptr %ptradd5, align 8 store ptr %literal2, ptr %f2, align 8 - store i8 0, ptr %literal6, align 1 + store i8 0, ptr %literal6, align 8 %ptradd7 = getelementptr inbounds i8, ptr %literal6, i64 1 store i8 0, ptr %ptradd7, align 1 %ptradd8 = getelementptr inbounds i8, ptr %literal6, i64 8 @@ -86,11 +86,11 @@ entry: %3 = load i8, ptr %literal6, align 1 %4 = and i8 %3, 1 %5 = or i8 %4, 4 - store i8 %5, ptr %literal6, align 1 + store i8 %5, ptr %literal6, align 8 %6 = load i8, ptr %literal6, align 1 %7 = and i8 %6, -2 %8 = or i8 %7, 1 - store i8 %8, ptr %literal6, align 1 + store i8 %8, ptr %literal6, align 8 %ptradd9 = getelementptr inbounds i8, ptr %literal6, i64 1 %9 = load i8, ptr %ptradd9, align 1 %10 = and i8 %9, -2 diff --git a/test/test_suite/builtins/unaligned_load_store.c3t b/test/test_suite/builtins/unaligned_load_store.c3t index bcd1144f5..da382b0bb 100644 --- a/test/test_suite/builtins/unaligned_load_store.c3t +++ b/test/test_suite/builtins/unaligned_load_store.c3t @@ -32,11 +32,11 @@ entry: %value = alloca <4 x float>, align 16 store ptr null, ptr %foo, align 8 %0 = load ptr, ptr %foo, align 8 - %1 = load <4 x float>, ptr %0, align 16 + %1 = load <4 x float>, ptr %0, align 4 store <4 x float> %1, ptr %a, align 1 %2 = load ptr, ptr %foo, align 8 %3 = load <4 x float>, ptr %a, align 1 - store <4 x float> %3, ptr %2, align 16 + store <4 x float> %3, ptr %2, align 4 %4 = load ptr, ptr %foo, align 8 %5 = load <4 x float>, ptr %4, align 1 store <4 x float> %5, ptr %a, align 1 diff --git a/test/test_suite/compile_time/concat_append_extended_and_edit.c3t b/test/test_suite/compile_time/concat_append_extended_and_edit.c3t index 9be8cbd8f..ea0f4caff 100644 --- a/test/test_suite/compile_time/concat_append_extended_and_edit.c3t +++ b/test/test_suite/compile_time/concat_append_extended_and_edit.c3t @@ -448,8 +448,8 @@ noerr_block124: ; preds = %after_check122 br label %voiderr126 voiderr126: ; preds = %noerr_block124, %guard_block123, %guard_block117, %guard_block111 - call void @llvm.memcpy.p0.p0.i32(ptr align 4 %literal127, ptr align 16 @.__const.6, i32 20, i1 false) - call void @llvm.memcpy.p0.p0.i32(ptr align 16 %x128, ptr align 4 %literal127, i32 20, i1 false) + call void @llvm.memcpy.p0.p0.i32(ptr align 16 %literal127, ptr align 16 @.__const.6, i32 20, i1 false) + call void @llvm.memcpy.p0.p0.i32(ptr align 16 %x128, ptr align 16 %literal127, i32 20, i1 false) %60 = call ptr @std.io.stdout() call void @llvm.memcpy.p0.p0.i32(ptr align 16 %x129, ptr align 16 %x128, i32 20, i1 false) call void @llvm.memcpy.p0.p0.i32(ptr align 16 %x132, ptr align 16 %x129, i32 20, i1 false) @@ -754,8 +754,8 @@ entry: %indirectarg651 = alloca %"any[]", align 8 %error_var657 = alloca i64, align 8 %error_var663 = alloca i64, align 8 - call void @llvm.memset.p0.i64(ptr align 4 %literal, i8 0, i64 32, i1 false) - call void @llvm.memcpy.p0.p0.i32(ptr align 16 %x, ptr align 4 %literal, i32 32, i1 false) + call void @llvm.memset.p0.i64(ptr align 16 %literal, i8 0, i64 32, i1 false) + call void @llvm.memcpy.p0.p0.i32(ptr align 16 %x, ptr align 16 %literal, i32 32, i1 false) %0 = call ptr @std.io.stdout() call void @llvm.memcpy.p0.p0.i32(ptr align 16 %x1, ptr align 16 %x, i32 32, i1 false) call void @llvm.memcpy.p0.p0.i32(ptr align 16 %x2, ptr align 16 %x1, i32 32, i1 false) @@ -826,7 +826,7 @@ noerr_block15: ; preds = %after_check13 br label %voiderr voiderr: ; preds = %noerr_block15, %guard_block14, %guard_block8, %guard_block - store i32 0, ptr %literal16, align 4 + store i32 0, ptr %literal16, align 16 %ptradd17 = getelementptr inbounds i8, ptr %literal16, i64 4 store i32 0, ptr %ptradd17, align 4 %ptradd18 = getelementptr inbounds i8, ptr %literal16, i64 8 @@ -835,7 +835,7 @@ voiderr: ; preds = %noerr_block15, %gua store i32 1, ptr %ptradd19, align 4 %ptradd20 = getelementptr inbounds i8, ptr %literal16, i64 16 store i32 2, ptr %ptradd20, align 4 - call void @llvm.memcpy.p0.p0.i32(ptr align 16 %x21, ptr align 4 %literal16, i32 20, i1 false) + call void @llvm.memcpy.p0.p0.i32(ptr align 16 %x21, ptr align 16 %literal16, i32 20, i1 false) %14 = call ptr @std.io.stdout() call void @llvm.memcpy.p0.p0.i32(ptr align 16 %x22, ptr align 16 %x21, i32 20, i1 false) call void @llvm.memcpy.p0.p0.i32(ptr align 16 %x25, ptr align 16 %x22, i32 20, i1 false) @@ -906,8 +906,8 @@ noerr_block51: ; preds = %after_check49 br label %voiderr53 voiderr53: ; preds = %noerr_block51, %guard_block50, %guard_block44, %guard_block38 - call void @llvm.memcpy.p0.p0.i32(ptr align 4 %literal54, ptr align 16 @.__const.10, i32 20, i1 false) - call void @llvm.memcpy.p0.p0.i32(ptr align 16 %x55, ptr align 4 %literal54, i32 20, i1 false) + call void @llvm.memcpy.p0.p0.i32(ptr align 16 %literal54, ptr align 16 @.__const.10, i32 20, i1 false) + call void @llvm.memcpy.p0.p0.i32(ptr align 16 %x55, ptr align 16 %literal54, i32 20, i1 false) %28 = call ptr @std.io.stdout() call void @llvm.memcpy.p0.p0.i32(ptr align 16 %x56, ptr align 16 %x55, i32 20, i1 false) call void @llvm.memcpy.p0.p0.i32(ptr align 16 %x59, ptr align 16 %x56, i32 20, i1 false) @@ -978,8 +978,8 @@ noerr_block85: ; preds = %after_check83 br label %voiderr87 voiderr87: ; preds = %noerr_block85, %guard_block84, %guard_block78, %guard_block72 - call void @llvm.memcpy.p0.p0.i32(ptr align 4 %literal88, ptr align 16 @.__const.12, i32 32, i1 false) - call void @llvm.memcpy.p0.p0.i32(ptr align 16 %x89, ptr align 4 %literal88, i32 32, i1 false) + call void @llvm.memcpy.p0.p0.i32(ptr align 16 %literal88, ptr align 16 @.__const.12, i32 32, i1 false) + call void @llvm.memcpy.p0.p0.i32(ptr align 16 %x89, ptr align 16 %literal88, i32 32, i1 false) %42 = call ptr @std.io.stdout() call void @llvm.memcpy.p0.p0.i32(ptr align 16 %x90, ptr align 16 %x89, i32 32, i1 false) call void @llvm.memcpy.p0.p0.i32(ptr align 16 %x93, ptr align 16 %x90, i32 32, i1 false) @@ -1050,7 +1050,7 @@ noerr_block119: ; preds = %after_check117 br label %voiderr121 voiderr121: ; preds = %noerr_block119, %guard_block118, %guard_block112, %guard_block106 - store i32 1, ptr %literal122, align 4 + store i32 1, ptr %literal122, align 16 %ptradd123 = getelementptr inbounds i8, ptr %literal122, i64 4 store i32 2, ptr %ptradd123, align 4 %ptradd124 = getelementptr inbounds i8, ptr %literal122, i64 8 @@ -1059,7 +1059,7 @@ voiderr121: ; preds = %noerr_block119, %gu store i32 0, ptr %ptradd125, align 4 %ptradd126 = getelementptr inbounds i8, ptr %literal122, i64 16 store i32 0, ptr %ptradd126, align 4 - call void @llvm.memcpy.p0.p0.i32(ptr align 16 %x127, ptr align 4 %literal122, i32 20, i1 false) + call void @llvm.memcpy.p0.p0.i32(ptr align 16 %x127, ptr align 16 %literal122, i32 20, i1 false) %56 = call ptr @std.io.stdout() call void @llvm.memcpy.p0.p0.i32(ptr align 16 %x128, ptr align 16 %x127, i32 20, i1 false) call void @llvm.memcpy.p0.p0.i32(ptr align 16 %x131, ptr align 16 %x128, i32 20, i1 false) @@ -1130,8 +1130,8 @@ noerr_block157: ; preds = %after_check155 br label %voiderr159 voiderr159: ; preds = %noerr_block157, %guard_block156, %guard_block150, %guard_block144 - call void @llvm.memcpy.p0.p0.i32(ptr align 4 %literal160, ptr align 16 @.__const.15, i32 16, i1 false) - call void @llvm.memcpy.p0.p0.i32(ptr align 16 %x161, ptr align 4 %literal160, i32 16, i1 false) + call void @llvm.memcpy.p0.p0.i32(ptr align 16 %literal160, ptr align 16 @.__const.15, i32 16, i1 false) + call void @llvm.memcpy.p0.p0.i32(ptr align 16 %x161, ptr align 16 %literal160, i32 16, i1 false) %70 = call ptr @std.io.stdout() call void @llvm.memcpy.p0.p0.i32(ptr align 16 %x162, ptr align 16 %x161, i32 16, i1 false) call void @llvm.memcpy.p0.p0.i32(ptr align 16 %x165, ptr align 16 %x162, i32 16, i1 false) @@ -1202,7 +1202,7 @@ noerr_block191: ; preds = %after_check189 br label %voiderr193 voiderr193: ; preds = %noerr_block191, %guard_block190, %guard_block184, %guard_block178 - store i32 1, ptr %literal194, align 4 + store i32 1, ptr %literal194, align 16 %ptradd195 = getelementptr inbounds i8, ptr %literal194, i64 4 store i32 2, ptr %ptradd195, align 4 %ptradd196 = getelementptr inbounds i8, ptr %literal194, i64 8 @@ -1215,7 +1215,7 @@ voiderr193: ; preds = %noerr_block191, %gu store i32 1, ptr %ptradd199, align 4 %ptradd200 = getelementptr inbounds i8, ptr %literal194, i64 24 store i32 0, ptr %ptradd200, align 4 - call void @llvm.memcpy.p0.p0.i32(ptr align 16 %x201, ptr align 4 %literal194, i32 28, i1 false) + call void @llvm.memcpy.p0.p0.i32(ptr align 16 %x201, ptr align 16 %literal194, i32 28, i1 false) %84 = call ptr @std.io.stdout() call void @llvm.memcpy.p0.p0.i32(ptr align 16 %x202, ptr align 16 %x201, i32 28, i1 false) call void @llvm.memcpy.p0.p0.i32(ptr align 16 %x205, ptr align 16 %x202, i32 28, i1 false) @@ -1286,8 +1286,8 @@ noerr_block231: ; preds = %after_check229 br label %voiderr233 voiderr233: ; preds = %noerr_block231, %guard_block230, %guard_block224, %guard_block218 - call void @llvm.memcpy.p0.p0.i32(ptr align 4 %literal234, ptr align 16 @.__const.18, i32 32, i1 false) - call void @llvm.memcpy.p0.p0.i32(ptr align 16 %x235, ptr align 4 %literal234, i32 32, i1 false) + call void @llvm.memcpy.p0.p0.i32(ptr align 16 %literal234, ptr align 16 @.__const.18, i32 32, i1 false) + call void @llvm.memcpy.p0.p0.i32(ptr align 16 %x235, ptr align 16 %literal234, i32 32, i1 false) %98 = call ptr @std.io.stdout() call void @llvm.memcpy.p0.p0.i32(ptr align 16 %x236, ptr align 16 %x235, i32 32, i1 false) call void @llvm.memcpy.p0.p0.i32(ptr align 16 %x239, ptr align 16 %x236, i32 32, i1 false) @@ -1358,7 +1358,7 @@ noerr_block265: ; preds = %after_check263 br label %voiderr267 voiderr267: ; preds = %noerr_block265, %guard_block264, %guard_block258, %guard_block252 - store i32 99, ptr %literal268, align 4 + store i32 99, ptr %literal268, align 16 %ptradd269 = getelementptr inbounds i8, ptr %literal268, i64 4 store i32 0, ptr %ptradd269, align 4 %ptradd270 = getelementptr inbounds i8, ptr %literal268, i64 8 @@ -1371,7 +1371,7 @@ voiderr267: ; preds = %noerr_block265, %gu store i32 1, ptr %ptradd273, align 4 %ptradd274 = getelementptr inbounds i8, ptr %literal268, i64 24 store i32 2, ptr %ptradd274, align 4 - call void @llvm.memcpy.p0.p0.i32(ptr align 16 %x275, ptr align 4 %literal268, i32 28, i1 false) + call void @llvm.memcpy.p0.p0.i32(ptr align 16 %x275, ptr align 16 %literal268, i32 28, i1 false) %112 = call ptr @std.io.stdout() call void @llvm.memcpy.p0.p0.i32(ptr align 16 %x276, ptr align 16 %x275, i32 28, i1 false) call void @llvm.memcpy.p0.p0.i32(ptr align 16 %x279, ptr align 16 %x276, i32 28, i1 false) @@ -1442,15 +1442,15 @@ noerr_block305: ; preds = %after_check303 br label %voiderr307 voiderr307: ; preds = %noerr_block305, %guard_block304, %guard_block298, %guard_block292 - call void @llvm.memset.p0.i64(ptr align 4 %literal308, i8 0, i64 40, i1 false) - store i32 99, ptr %literal308, align 4 + call void @llvm.memset.p0.i64(ptr align 16 %literal308, i8 0, i64 40, i1 false) + store i32 99, ptr %literal308, align 16 %ptradd309 = getelementptr inbounds i8, ptr %literal308, i64 12 store i32 1, ptr %ptradd309, align 4 %ptradd310 = getelementptr inbounds i8, ptr %literal308, i64 20 store i32 99, ptr %ptradd310, align 4 %ptradd311 = getelementptr inbounds i8, ptr %literal308, i64 32 store i32 1, ptr %ptradd311, align 4 - call void @llvm.memcpy.p0.p0.i32(ptr align 16 %x312, ptr align 4 %literal308, i32 40, i1 false) + call void @llvm.memcpy.p0.p0.i32(ptr align 16 %x312, ptr align 16 %literal308, i32 40, i1 false) %126 = call ptr @std.io.stdout() call void @llvm.memcpy.p0.p0.i32(ptr align 16 %x313, ptr align 16 %x312, i32 40, i1 false) call void @llvm.memcpy.p0.p0.i32(ptr align 16 %x316, ptr align 16 %x313, i32 40, i1 false) @@ -1521,8 +1521,8 @@ noerr_block342: ; preds = %after_check340 br label %voiderr344 voiderr344: ; preds = %noerr_block342, %guard_block341, %guard_block335, %guard_block329 - call void @llvm.memcpy.p0.p0.i32(ptr align 4 %literal345, ptr align 16 @.__const.22, i32 16, i1 false) - call void @llvm.memcpy.p0.p0.i32(ptr align 16 %x346, ptr align 4 %literal345, i32 16, i1 false) + call void @llvm.memcpy.p0.p0.i32(ptr align 16 %literal345, ptr align 16 @.__const.22, i32 16, i1 false) + call void @llvm.memcpy.p0.p0.i32(ptr align 16 %x346, ptr align 16 %literal345, i32 16, i1 false) %140 = call ptr @std.io.stdout() call void @llvm.memcpy.p0.p0.i32(ptr align 16 %x347, ptr align 16 %x346, i32 16, i1 false) call void @llvm.memcpy.p0.p0.i32(ptr align 16 %x350, ptr align 16 %x347, i32 16, i1 false) @@ -1593,8 +1593,8 @@ noerr_block376: ; preds = %after_check374 br label %voiderr378 voiderr378: ; preds = %noerr_block376, %guard_block375, %guard_block369, %guard_block363 - call void @llvm.memcpy.p0.p0.i32(ptr align 4 %literal379, ptr align 16 @.__const.24, i32 16, i1 false) - call void @llvm.memcpy.p0.p0.i32(ptr align 16 %x380, ptr align 4 %literal379, i32 16, i1 false) + call void @llvm.memcpy.p0.p0.i32(ptr align 16 %literal379, ptr align 16 @.__const.24, i32 16, i1 false) + call void @llvm.memcpy.p0.p0.i32(ptr align 16 %x380, ptr align 16 %literal379, i32 16, i1 false) %154 = call ptr @std.io.stdout() call void @llvm.memcpy.p0.p0.i32(ptr align 16 %x381, ptr align 16 %x380, i32 16, i1 false) call void @llvm.memcpy.p0.p0.i32(ptr align 16 %x384, ptr align 16 %x381, i32 16, i1 false) @@ -1665,7 +1665,7 @@ noerr_block410: ; preds = %after_check408 br label %voiderr412 voiderr412: ; preds = %noerr_block410, %guard_block409, %guard_block403, %guard_block397 - store i32 0, ptr %literal413, align 4 + store i32 0, ptr %literal413, align 16 %ptradd414 = getelementptr inbounds i8, ptr %literal413, i64 4 store i32 0, ptr %ptradd414, align 4 %ptradd415 = getelementptr inbounds i8, ptr %literal413, i64 8 @@ -1674,7 +1674,7 @@ voiderr412: ; preds = %noerr_block410, %gu store i32 98, ptr %ptradd416, align 4 %ptradd417 = getelementptr inbounds i8, ptr %literal413, i64 16 store i32 91, ptr %ptradd417, align 4 - call void @llvm.memcpy.p0.p0.i32(ptr align 16 %x418, ptr align 4 %literal413, i32 20, i1 false) + call void @llvm.memcpy.p0.p0.i32(ptr align 16 %x418, ptr align 16 %literal413, i32 20, i1 false) %168 = call ptr @std.io.stdout() call void @llvm.memcpy.p0.p0.i32(ptr align 16 %x419, ptr align 16 %x418, i32 20, i1 false) call void @llvm.memcpy.p0.p0.i32(ptr align 16 %x422, ptr align 16 %x419, i32 20, i1 false) @@ -1745,8 +1745,8 @@ noerr_block448: ; preds = %after_check446 br label %voiderr450 voiderr450: ; preds = %noerr_block448, %guard_block447, %guard_block441, %guard_block435 - call void @llvm.memcpy.p0.p0.i32(ptr align 4 %literal451, ptr align 16 @.__const.27, i32 20, i1 false) - call void @llvm.memcpy.p0.p0.i32(ptr align 16 %x452, ptr align 4 %literal451, i32 20, i1 false) + call void @llvm.memcpy.p0.p0.i32(ptr align 16 %literal451, ptr align 16 @.__const.27, i32 20, i1 false) + call void @llvm.memcpy.p0.p0.i32(ptr align 16 %x452, ptr align 16 %literal451, i32 20, i1 false) %182 = call ptr @std.io.stdout() call void @llvm.memcpy.p0.p0.i32(ptr align 16 %x453, ptr align 16 %x452, i32 20, i1 false) call void @llvm.memcpy.p0.p0.i32(ptr align 16 %x456, ptr align 16 %x453, i32 20, i1 false) @@ -1817,8 +1817,8 @@ noerr_block482: ; preds = %after_check480 br label %voiderr484 voiderr484: ; preds = %noerr_block482, %guard_block481, %guard_block475, %guard_block469 - call void @llvm.memcpy.p0.p0.i32(ptr align 4 %literal485, ptr align 16 @.__const.29, i32 28, i1 false) - call void @llvm.memcpy.p0.p0.i32(ptr align 16 %x486, ptr align 4 %literal485, i32 28, i1 false) + call void @llvm.memcpy.p0.p0.i32(ptr align 16 %literal485, ptr align 16 @.__const.29, i32 28, i1 false) + call void @llvm.memcpy.p0.p0.i32(ptr align 16 %x486, ptr align 16 %literal485, i32 28, i1 false) %196 = call ptr @std.io.stdout() call void @llvm.memcpy.p0.p0.i32(ptr align 16 %x487, ptr align 16 %x486, i32 28, i1 false) call void @llvm.memcpy.p0.p0.i32(ptr align 16 %x490, ptr align 16 %x487, i32 28, i1 false) @@ -1889,7 +1889,7 @@ noerr_block516: ; preds = %after_check514 br label %voiderr518 voiderr518: ; preds = %noerr_block516, %guard_block515, %guard_block509, %guard_block503 - store i32 99, ptr %literal519, align 4 + store i32 99, ptr %literal519, align 16 %ptradd520 = getelementptr inbounds i8, ptr %literal519, i64 4 store i32 0, ptr %ptradd520, align 4 %ptradd521 = getelementptr inbounds i8, ptr %literal519, i64 8 @@ -1902,7 +1902,7 @@ voiderr518: ; preds = %noerr_block516, %gu store i32 98, ptr %ptradd524, align 4 %ptradd525 = getelementptr inbounds i8, ptr %literal519, i64 24 store i32 91, ptr %ptradd525, align 4 - call void @llvm.memcpy.p0.p0.i32(ptr align 16 %x526, ptr align 4 %literal519, i32 28, i1 false) + call void @llvm.memcpy.p0.p0.i32(ptr align 16 %x526, ptr align 16 %literal519, i32 28, i1 false) %210 = call ptr @std.io.stdout() call void @llvm.memcpy.p0.p0.i32(ptr align 16 %x527, ptr align 16 %x526, i32 28, i1 false) call void @llvm.memcpy.p0.p0.i32(ptr align 16 %x530, ptr align 16 %x527, i32 28, i1 false) @@ -1973,7 +1973,7 @@ noerr_block556: ; preds = %after_check554 br label %voiderr558 voiderr558: ; preds = %noerr_block556, %guard_block555, %guard_block549, %guard_block543 - store i32 0, ptr %literal559, align 4 + store i32 0, ptr %literal559, align 16 %ptradd560 = getelementptr inbounds i8, ptr %literal559, i64 4 store i32 0, ptr %ptradd560, align 4 %ptradd561 = getelementptr inbounds i8, ptr %literal559, i64 8 @@ -1992,7 +1992,7 @@ voiderr558: ; preds = %noerr_block556, %gu store i32 0, ptr %ptradd567, align 4 %ptradd568 = getelementptr inbounds i8, ptr %ptradd565, i64 4 store i32 5, ptr %ptradd568, align 4 - call void @llvm.memcpy.p0.p0.i32(ptr align 16 %x569, ptr align 4 %literal559, i32 36, i1 false) + call void @llvm.memcpy.p0.p0.i32(ptr align 16 %x569, ptr align 16 %literal559, i32 36, i1 false) %224 = call ptr @std.io.stdout() call void @llvm.memcpy.p0.p0.i32(ptr align 16 %x570, ptr align 16 %x569, i32 36, i1 false) call void @llvm.memcpy.p0.p0.i32(ptr align 16 %x573, ptr align 16 %x570, i32 36, i1 false) diff --git a/test/test_suite/compile_time/concat_test.c3t b/test/test_suite/compile_time/concat_test.c3t index 97aa5439a..fa5274aed 100644 --- a/test/test_suite/compile_time/concat_test.c3t +++ b/test/test_suite/compile_time/concat_test.c3t @@ -252,7 +252,7 @@ entry: %indirectarg = alloca %"any[]", align 8 %error_var3 = alloca i64, align 8 %error_var9 = alloca i64, align 8 - call void @llvm.memcpy.p0.p0.i32(ptr align 4 %literal, ptr align 16 @.__const.5, i32 28, i1 false) + call void @llvm.memcpy.p0.p0.i32(ptr align 16 %literal, ptr align 16 @.__const.5, i32 28, i1 false) %0 = insertvalue %"int[]" undef, ptr %literal, 0 %1 = insertvalue %"int[]" %0, i64 7, 1 %2 = call ptr @std.io.stdout() @@ -341,8 +341,8 @@ entry: %indirectarg = alloca %"any[]", align 8 %error_var4 = alloca i64, align 8 %error_var10 = alloca i64, align 8 - call void @llvm.memcpy.p0.p0.i32(ptr align 4 %literal, ptr align 16 @.__const.7, i32 20, i1 false) - call void @llvm.memcpy.p0.p0.i32(ptr align 16 %x, ptr align 4 %literal, i32 20, i1 false) + call void @llvm.memcpy.p0.p0.i32(ptr align 16 %literal, ptr align 16 @.__const.7, i32 20, i1 false) + call void @llvm.memcpy.p0.p0.i32(ptr align 16 %x, ptr align 16 %literal, i32 20, i1 false) %0 = call ptr @std.io.stdout() call void @llvm.memcpy.p0.p0.i32(ptr align 16 %x1, ptr align 16 %x, i32 20, i1 false) call void @llvm.memcpy.p0.p0.i32(ptr align 16 %x2, ptr align 16 %x1, i32 20, i1 false) @@ -430,8 +430,8 @@ entry: %indirectarg = alloca %"any[]", align 8 %error_var4 = alloca i64, align 8 %error_var10 = alloca i64, align 8 - call void @llvm.memcpy.p0.p0.i32(ptr align 4 %literal, ptr align 16 @.__const.9, i32 20, i1 false) - call void @llvm.memcpy.p0.p0.i32(ptr align 16 %x, ptr align 4 %literal, i32 20, i1 false) + call void @llvm.memcpy.p0.p0.i32(ptr align 16 %literal, ptr align 16 @.__const.9, i32 20, i1 false) + call void @llvm.memcpy.p0.p0.i32(ptr align 16 %x, ptr align 16 %literal, i32 20, i1 false) %0 = call ptr @std.io.stdout() call void @llvm.memcpy.p0.p0.i32(ptr align 16 %x1, ptr align 16 %x, i32 20, i1 false) call void @llvm.memcpy.p0.p0.i32(ptr align 16 %x2, ptr align 16 %x1, i32 20, i1 false) diff --git a/test/test_suite/compile_time/ct_declaration_in_if.c3t b/test/test_suite/compile_time/ct_declaration_in_if.c3t index e1b4671cf..c75140e26 100644 --- a/test/test_suite/compile_time/ct_declaration_in_if.c3t +++ b/test/test_suite/compile_time/ct_declaration_in_if.c3t @@ -25,16 +25,19 @@ fn void main() define void @test.main() #0 { entry: %literal = alloca [3 x %Vector3], align 16 - call void @llvm.memcpy.p0.p0.i32(ptr align 4 %literal, ptr align 16 @.__const, i32 36, i1 false) + call void @llvm.memcpy.p0.p0.i32(ptr align 16 %literal, ptr align 16 @.__const, i32 36, i1 false) %0 = insertvalue %"Vector3[]" undef, ptr %literal, 0 %1 = insertvalue %"Vector3[]" %0, i64 3, 1 br label %loop.cond + loop.cond: ; preds = %loop.inc, %entry %2 = extractvalue %"Vector3[]" %1, 1 %lt = icmp ult i64 0, %2 br i1 %lt, label %loop.inc, label %loop.exit + loop.inc: ; preds = %loop.cond br label %loop.cond + loop.exit: ; preds = %loop.cond ret void } diff --git a/test/test_suite/compile_time/ct_memberof.c3t b/test/test_suite/compile_time/ct_memberof.c3t index cba789129..7256f8e82 100644 --- a/test/test_suite/compile_time/ct_memberof.c3t +++ b/test/test_suite/compile_time/ct_memberof.c3t @@ -103,9 +103,9 @@ fn void main() define void @test.hello(i32 %0, double %1, i64 %2, i64 %3, ptr %4, i64 %5) #0 { entry: - %d = alloca [4 x i32], align 8 + %d = alloca [4 x i32], align 16 %args = alloca %"any[]", align 8 - store i64 %2, ptr %d, align 8 + store i64 %2, ptr %d, align 16 %ptradd = getelementptr inbounds i8, ptr %d, i64 8 store i64 %3, ptr %ptradd, align 8 store ptr %4, ptr %args, align 8 diff --git a/test/test_suite/compile_time/more_untyped_conversions.c3t b/test/test_suite/compile_time/more_untyped_conversions.c3t index 01fa92f3d..40455befb 100644 --- a/test/test_suite/compile_time/more_untyped_conversions.c3t +++ b/test/test_suite/compile_time/more_untyped_conversions.c3t @@ -25,7 +25,7 @@ entry: %z_slice = alloca [2 x [2 x i32]], align 16 %f = alloca %Foo, align 4 %y = alloca [2 x i32], align 4 - store i32 1, ptr %literal, align 4 + store i32 1, ptr %literal, align 16 %ptradd = getelementptr inbounds i8, ptr %literal, i64 4 store i32 2, ptr %ptradd, align 4 %ptradd1 = getelementptr inbounds i8, ptr %literal, i64 8 @@ -35,7 +35,7 @@ entry: %0 = insertvalue %"int[2][]" undef, ptr %literal, 0 %1 = insertvalue %"int[2][]" %0, i64 2, 1 store %"int[2][]" %1, ptr %z, align 8 - store i32 1, ptr %z_slice, align 4 + store i32 1, ptr %z_slice, align 16 %ptradd3 = getelementptr inbounds i8, ptr %z_slice, i64 4 store i32 2, ptr %ptradd3, align 4 %ptradd4 = getelementptr inbounds i8, ptr %z_slice, i64 8 diff --git a/test/test_suite/compile_time/untyped_conversions.c3t b/test/test_suite/compile_time/untyped_conversions.c3t index 37eefc4f9..15c69f03a 100644 --- a/test/test_suite/compile_time/untyped_conversions.c3t +++ b/test/test_suite/compile_time/untyped_conversions.c3t @@ -31,8 +31,8 @@ fn void main() %any = type { ptr, i64 } @"$ct.test.Foo" = linkonce global %.introspect { i8 10, i64 0, ptr null, i64 8, i64 0, i64 2, [0 x i64] zeroinitializer }, align 8 -@"$ct.test.Int2V" = linkonce global %.introspect { i8 18, i64 0, ptr null, i64 8, i64 ptrtoint (ptr @"$ct.v2$int" to i64), i64 0, [0 x i64] zeroinitializer }, align 8 -@"$ct.v2$int" = linkonce global %.introspect { i8 17, i64 0, ptr null, i64 8, i64 ptrtoint (ptr @"$ct.int" to i64), i64 2, [0 x i64] zeroinitializer }, align 8 +@"$ct.test.Int2V" = linkonce global %.introspect { i8 18, i64 0, ptr null, i64 8, i64 ptrtoint (ptr @"$ct.siv2$int" to i64), i64 0, [0 x i64] zeroinitializer }, align 8 +@"$ct.siv2$int" = linkonce global %.introspect { i8 17, i64 0, ptr null, i64 8, i64 ptrtoint (ptr @"$ct.int" to i64), i64 2, [0 x i64] zeroinitializer }, align 8 @"$ct.int" = linkonce global %.introspect { i8 2, i64 0, ptr null, i64 4, i64 0, i64 0, [0 x i64] zeroinitializer }, align 8 @.str = private unnamed_addr constant [9 x i8] c"%s %s %s\00", align 1 @"$ct.a2$int" = linkonce global %.introspect { i8 15, i64 0, ptr null, i64 8, i64 ptrtoint (ptr @"$ct.int" to i64), i64 2, [0 x i64] zeroinitializer }, align 8 diff --git a/test/test_suite/debug_symbols/defer_macro.c3t b/test/test_suite/debug_symbols/defer_macro.c3t index 6de671826..a3745514c 100644 --- a/test/test_suite/debug_symbols/defer_macro.c3t +++ b/test/test_suite/debug_symbols/defer_macro.c3t @@ -421,118 +421,119 @@ if.exit: ; preds = %entry %ptradd = getelementptr inbounds i8, ptr %allocator10, i64 8, !dbg !165 %12 = load i64, ptr %ptradd, align 8, !dbg !165 %13 = inttoptr i64 %12 to ptr, !dbg !165 - %type = load ptr, ptr %.cachedtype, align 8 - %14 = icmp eq ptr %13, %type - br i1 %14, label %cache_hit, label %cache_miss + %14 = load ptr, ptr %.cachedtype, align 8 + %15 = icmp eq ptr %13, %14 + br i1 %15, label %cache_hit, label %cache_miss cache_miss: ; preds = %if.exit - %15 = call ptr @.dyn_search(ptr %13, ptr @"$sel.acquire") - store ptr %15, ptr %.inlinecache, align 8 + %16 = call ptr @.dyn_search(ptr %13, ptr @"$sel.acquire") + store ptr %16, ptr %.inlinecache, align 8 store ptr %13, ptr %.cachedtype, align 8 - br label %16 + br label %18 cache_hit: ; preds = %if.exit - %cache_hit_fn = load ptr, ptr %.inlinecache, align 8 - br label %16 + %17 = load ptr, ptr %.inlinecache, align 8 + br label %18 -16: ; preds = %cache_hit, %cache_miss - %fn_phi = phi ptr [ %cache_hit_fn, %cache_hit ], [ %15, %cache_miss ] - %17 = icmp eq ptr %fn_phi, null - br i1 %17, label %missing_function, label %match +18: ; preds = %cache_hit, %cache_miss + %fn_phi = phi ptr [ %17, %cache_hit ], [ %16, %cache_miss ] + %19 = icmp eq ptr %fn_phi, null + br i1 %19, label %missing_function, label %match -missing_function: ; preds = %16 - %18 = load ptr, ptr @std.core.builtin.panic, align 8, !dbg !167 - call void %18(ptr @.panic_msg, i64 44, ptr @.file, +missing_function: ; preds = %18 + %20 = load ptr, ptr @std.core.builtin.panic, align 8, !dbg !167 + call void %20(ptr @.panic_msg, i64 44, ptr @.file, unreachable, !dbg !167 -match: ; preds = %16 - %19 = load ptr, ptr %allocator10, align 8 - %20 = load i64, ptr %size, align 8 - %21 = call i64 %fn_phi(ptr %retparam, ptr %19, i64 %20, i32 0, i64 0), !dbg !167 - %not_err = icmp eq i64 %21, 0, !dbg !167 - %22 = call i1 @llvm.expect.i1(i1 %not_err, i1 true), !dbg !167 - br i1 %22, label %after_check, label %assign_optional, !dbg !167 +match: ; preds = %18 + %21 = load ptr, ptr %allocator10, align 8 + %22 = load i64, ptr %size, align 8 + %23 = call i64 %fn_phi(ptr %retparam, ptr %21, i64 %22, i32 0, i64 0), !dbg !167 + %not_err = icmp eq i64 %23, 0, !dbg !167 + %24 = call i1 @llvm.expect.i1(i1 %not_err, i1 true), !dbg !167 + br i1 %24, label %after_check, label %assign_optional, !dbg !167 assign_optional: ; preds = %match - store i64 %21, ptr %error_var, align 8, !dbg !167 + store i64 %23, ptr %error_var, align 8, !dbg !167 br label %panic_block, !dbg !167 after_check: ; preds = %match - %23 = load ptr, ptr %retparam, align 8, !dbg !167 - store ptr %23, ptr %blockret11, align 8, !dbg !167 + %25 = load ptr, ptr %retparam, align 8, !dbg !167 + store ptr %25, ptr %blockret11, align 8, !dbg !167 br label %expr_block.exit, !dbg !167 expr_block.exit: ; preds = %after_check, %if.then - %24 = load ptr, ptr %blockret11, align 8, !dbg !167 - %25 = load i64, ptr %elements8, align 8, !dbg !168 - %add = add i64 0, %25, !dbg !168 + %26 = load ptr, ptr %blockret11, align 8, !dbg !167 + %27 = load i64, ptr %elements8, align 8, !dbg !168 + %add = add i64 0, %27, !dbg !168 %size12 = sub i64 %add, 0, !dbg !168 - %26 = insertvalue %"char[][]" undef, ptr %24, 0, !dbg !168 - %27 = insertvalue %"char[][]" %26, i64 %size12, 1, !dbg !168 + %28 = insertvalue %"char[][]" undef, ptr %26, 0, !dbg !168 + %29 = insertvalue %"char[][]" %28, i64 %size12, 1, !dbg !168 br label %noerr_block, !dbg !168 panic_block: ; preds = %assign_optional - %28 = insertvalue %any undef, ptr %error_var, 0, !dbg !168 - %29 = insertvalue %any %28, i64 ptrtoint (ptr @"$ct.fault" to i64), 1, !dbg !168 - store %any %29, ptr %varargslots, align 16 - %30 = insertvalue %"any[]" undef, ptr %varargslots, 0 - %"$$temp" = insertvalue %"any[]" %30, i64 1, 1 + %30 = insertvalue %any undef, ptr %error_var, 0, !dbg !168 + %31 = insertvalue %any %30, i64 ptrtoint (ptr @"$ct.fault" to i64), 1, !dbg !168 + store %any %31, ptr %varargslots, align 16 + %32 = insertvalue %"any[]" undef, ptr %varargslots, 0 + %"$$temp" = insertvalue %"any[]" %32, i64 1, 1 store %"any[]" %"$$temp", ptr %indirectarg, align 8 call void @std.core.builtin.panicf(ptr @.panic_msg.1, i64 36, ptr @.file, i64 16, ptr @.func, i64 6, i32 287, ptr byval(%"any[]") align 8 %indirectarg) #5, !dbg !157 unreachable, !dbg !157 noerr_block: ; preds = %expr_block.exit - store %"char[][]" %27, ptr %list5, align 8, !dbg !157 + store %"char[][]" %29, ptr %list5, align 8, !dbg !157 + store i32 0, ptr %i, align 4, !dbg !172 br label %loop.cond, !dbg !172 loop.cond: ; preds = %loop.exit, %noerr_block - %31 = load i32, ptr %i, align 4, !dbg !173 - %32 = load i32, ptr %argc2, align 4, !dbg !174 - %lt = icmp slt i32 %31, %32, !dbg !173 + %33 = load i32, ptr %i, align 4, !dbg !173 + %34 = load i32, ptr %argc2, align 4, !dbg !174 + %lt = icmp slt i32 %33, %34, !dbg !173 br i1 %lt, label %loop.body, label %loop.exit25, !dbg !173 loop.body: ; preds = %loop.cond - %33 = load ptr, ptr %argv3, align 8, !dbg !178 - %34 = load i32, ptr %i, align 4, !dbg !179 - %sext13 = sext i32 %34 to i64, !dbg !179 - %ptroffset = getelementptr inbounds [8 x i8], ptr %33, i64 %sext13, !dbg !179 - %35 = load ptr, ptr %ptroffset, align 8, !dbg !179 - store ptr %35, ptr %arg, align 8, !dbg !179 + %35 = load ptr, ptr %argv3, align 8, !dbg !178 + %36 = load i32, ptr %i, align 4, !dbg !179 + %sext13 = sext i32 %36 to i64, !dbg !179 + %ptroffset = getelementptr inbounds [8 x i8], ptr %35, i64 %sext13, !dbg !179 + %37 = load ptr, ptr %ptroffset, align 8, !dbg !179 + store ptr %37, ptr %arg, align 8, !dbg !179 store i64 0, ptr %len, align 8, !dbg !182 - %36 = load ptr, ptr %arg, align 8, !dbg !183 - %37 = load ptr, ptr %arg, align 8 - store ptr %37, ptr %ptr, align 8 + %38 = load ptr, ptr %arg, align 8, !dbg !183 + %39 = load ptr, ptr %arg, align 8 + store ptr %39, ptr %ptr, align 8 store i64 0, ptr %len15, align 8, !dbg !188 br label %loop.cond16, !dbg !189 loop.cond16: ; preds = %loop.body18, %loop.body - %38 = load ptr, ptr %ptr, align 8, !dbg !190 - %39 = load i64, ptr %len15, align 8, !dbg !192 - %ptradd17 = getelementptr inbounds i8, ptr %38, i64 %39, !dbg !192 - %40 = load i8, ptr %ptradd17, align 1, !dbg !192 - %i2b = icmp ne i8 %40, 0, !dbg !192 + %40 = load ptr, ptr %ptr, align 8, !dbg !190 + %41 = load i64, ptr %len15, align 8, !dbg !192 + %ptradd17 = getelementptr inbounds i8, ptr %40, i64 %41, !dbg !192 + %42 = load i8, ptr %ptradd17, align 1, !dbg !192 + %i2b = icmp ne i8 %42, 0, !dbg !192 br i1 %i2b, label %loop.body18, label %loop.exit, !dbg !192 loop.body18: ; preds = %loop.cond16 - %41 = load i64, ptr %len15, align 8, !dbg !193 - %add19 = add i64 %41, 1, !dbg !193 + %43 = load i64, ptr %len15, align 8, !dbg !193 + %add19 = add i64 %43, 1, !dbg !193 store i64 %add19, ptr %len15, align 8, !dbg !193 br label %loop.cond16, !dbg !193 loop.exit: ; preds = %loop.cond16 - %42 = load i64, ptr %len15, align 8, !dbg !194 - %add20 = add i64 0, %42, !dbg !194 + %44 = load i64, ptr %len15, align 8, !dbg !194 + %add20 = add i64 0, %44, !dbg !194 %size21 = sub i64 %add20, 0, !dbg !194 - %43 = insertvalue %"char[]" undef, ptr %36, 0, !dbg !194 - %44 = insertvalue %"char[]" %43, i64 %size21, 1, !dbg !194 - %45 = load ptr, ptr %list5, align 8, !dbg !195 - %46 = load i32, ptr %i, align 4, !dbg !196 - %sext22 = sext i32 %46 to i64, !dbg !196 - %ptroffset23 = getelementptr inbounds [16 x i8], ptr %45, i64 %sext22, !dbg !196 - store %"char[]" %44, ptr %ptroffset23, align 8, !dbg !196 - %47 = load i32, ptr %i, align 4, !dbg !197 - %add24 = add i32 %47, 1, !dbg !197 + %45 = insertvalue %"char[]" undef, ptr %38, 0, !dbg !194 + %46 = insertvalue %"char[]" %45, i64 %size21, 1, !dbg !194 + %47 = load ptr, ptr %list5, align 8, !dbg !195 + %48 = load i32, ptr %i, align 4, !dbg !196 + %sext22 = sext i32 %48 to i64, !dbg !196 + %ptroffset23 = getelementptr inbounds [16 x i8], ptr %47, i64 %sext22, !dbg !196 + store %"char[]" %46, ptr %ptroffset23, align 8, !dbg !196 + %49 = load i32, ptr %i, align 4, !dbg !197 + %add24 = add i32 %49, 1, !dbg !197 store i32 %add24, ptr %i, align 4, !dbg !197 br label %loop.cond, !dbg !197 @@ -541,15 +542,15 @@ loop.exit25: ; preds = %loop.cond %lo = load ptr, ptr %list, align 8, !dbg !199 %ptradd26 = getelementptr inbounds i8, ptr %list, i64 8, !dbg !199 %hi = load i64, ptr %ptradd26, align 8, !dbg !199 - %48 = call i32 @test.main(ptr %lo, i64 %hi), !dbg !200 - store i32 %48, ptr %blockret, align 4, !dbg !200 - %49 = load ptr, ptr %list, align 8, !dbg !201 - call void @std.core.mem.free(ptr %49) #4, !dbg !203 + %50 = call i32 @test.main(ptr %lo, i64 %hi), !dbg !200 + store i32 %50, ptr %blockret, align 4, !dbg !200 + %51 = load ptr, ptr %list, align 8, !dbg !201 + call void @std.core.mem.free(ptr %51) #4, !dbg !203 br label %expr_block.exit27, !dbg !203 expr_block.exit27: ; preds = %loop.exit25 - %50 = load i32, ptr %blockret, align 4, !dbg !203 - ret i32 %50, !dbg !203 + %52 = load i32, ptr %blockret, align 4, !dbg !203 + ret i32 %52, !dbg !203 } declare { i32, ptr } @attach.to_scope() #0 diff --git a/test/test_suite/defer/defer_catch_mix.c3t b/test/test_suite/defer/defer_catch_mix.c3t index 879b06cea..debc2d327 100644 --- a/test/test_suite/defer/defer_catch_mix.c3t +++ b/test/test_suite/defer/defer_catch_mix.c3t @@ -313,77 +313,77 @@ if.exit: ; preds = %entry %ptradd = getelementptr inbounds i8, ptr %allocator2, i64 8 %1 = load i64, ptr %ptradd, align 8 %2 = inttoptr i64 %1 to ptr - %type = load ptr, ptr %.cachedtype, align 8 - %3 = icmp eq ptr %2, %type - br i1 %3, label %cache_hit, label %cache_miss + %3 = load ptr, ptr %.cachedtype, align 8 + %4 = icmp eq ptr %2, %3 + br i1 %4, label %cache_hit, label %cache_miss cache_miss: ; preds = %if.exit - %4 = call ptr @.dyn_search(ptr %2, ptr @"$sel.acquire") - store ptr %4, ptr %.inlinecache, align 8 + %5 = call ptr @.dyn_search(ptr %2, ptr @"$sel.acquire") + store ptr %5, ptr %.inlinecache, align 8 store ptr %2, ptr %.cachedtype, align 8 - br label %5 + br label %7 cache_hit: ; preds = %if.exit - %cache_hit_fn = load ptr, ptr %.inlinecache, align 8 - br label %5 + %6 = load ptr, ptr %.inlinecache, align 8 + br label %7 -5: ; preds = %cache_hit, %cache_miss - %fn_phi = phi ptr [ %cache_hit_fn, %cache_hit ], [ %4, %cache_miss ] - %6 = icmp eq ptr %fn_phi, null - br i1 %6, label %missing_function, label %match +7: ; preds = %cache_hit, %cache_miss + %fn_phi = phi ptr [ %6, %cache_hit ], [ %5, %cache_miss ] + %8 = icmp eq ptr %fn_phi, null + br i1 %8, label %missing_function, label %match -missing_function: ; preds = %5 +missing_function: ; preds = %7 store %"char[]" { ptr @.panic_msg, i64 44 }, ptr %taddr, align 8 - %7 = load [2 x i64], ptr %taddr, align 8 + %9 = load [2 x i64], ptr %taddr, align 8 store %"char[]" { ptr @.file, i64 16 }, ptr %taddr3, align 8 - %8 = load [2 x i64], ptr %taddr3, align 8 + %10 = load [2 x i64], ptr %taddr3, align 8 store %"char[]" { ptr @.func, i64 4 }, ptr %taddr4, align 8 - %9 = load [2 x i64], ptr %taddr4, align 8 - %10 = load ptr, ptr @std.core.builtin.panic, align 8 - call void %10([2 x i64] %7, [2 x i64] %8, [2 x i64] %9, i32 98) #4 + %11 = load [2 x i64], ptr %taddr4, align 8 + %12 = load ptr, ptr @std.core.builtin.panic, align 8 + call void %12([2 x i64] %9, [2 x i64] %10, [2 x i64] %11, i32 98) #4 unreachable -match: ; preds = %5 - %11 = load ptr, ptr %allocator2, align 8 - %12 = call i64 %fn_phi(ptr %retparam, ptr %11, i64 12, i32 1, i64 0) - %not_err = icmp eq i64 %12, 0 - %13 = call i1 @llvm.expect.i1(i1 %not_err, i1 true) - br i1 %13, label %after_check, label %assign_optional +match: ; preds = %7 + %13 = load ptr, ptr %allocator2, align 8 + %14 = call i64 %fn_phi(ptr %retparam, ptr %13, i64 12, i32 1, i64 0) + %not_err = icmp eq i64 %14, 0 + %15 = call i1 @llvm.expect.i1(i1 %not_err, i1 true) + br i1 %15, label %after_check, label %assign_optional assign_optional: ; preds = %match - store i64 %12, ptr %error_var, align 8 + store i64 %14, ptr %error_var, align 8 br label %panic_block after_check: ; preds = %match - %14 = load ptr, ptr %retparam, align 8 - %15 = insertvalue %"char[]" undef, ptr %14, 0 - %16 = insertvalue %"char[]" %15, i64 12, 1 + %16 = load ptr, ptr %retparam, align 8 + %17 = insertvalue %"char[]" undef, ptr %16, 0 + %18 = insertvalue %"char[]" %17, i64 12, 1 br label %noerr_block panic_block: ; preds = %assign_optional - %17 = insertvalue %any undef, ptr %error_var, 0 - %18 = insertvalue %any %17, i64 ptrtoint (ptr @"$ct.fault" to i64), 1 + %19 = insertvalue %any undef, ptr %error_var, 0 + %20 = insertvalue %any %19, i64 ptrtoint (ptr @"$ct.fault" to i64), 1 store %"char[]" { ptr @.panic_msg.4, i64 36 }, ptr %taddr5, align 8 - %19 = load [2 x i64], ptr %taddr5, align 8 + %21 = load [2 x i64], ptr %taddr5, align 8 store %"char[]" { ptr @.file, i64 16 }, ptr %taddr6, align 8 - %20 = load [2 x i64], ptr %taddr6, align 8 + %22 = load [2 x i64], ptr %taddr6, align 8 store %"char[]" { ptr @.func, i64 4 }, ptr %taddr7, align 8 - %21 = load [2 x i64], ptr %taddr7, align 8 - store %any %18, ptr %varargslots, align 8 - %22 = insertvalue %"any[]" undef, ptr %varargslots, 0 - %"$$temp" = insertvalue %"any[]" %22, i64 1, 1 + %23 = load [2 x i64], ptr %taddr7, align 8 + store %any %20, ptr %varargslots, align 8 + %24 = insertvalue %"any[]" undef, ptr %varargslots, 0 + %"$$temp" = insertvalue %"any[]" %24, i64 1, 1 store %"any[]" %"$$temp", ptr %taddr8, align 8 - %23 = load [2 x i64], ptr %taddr8, align 8 - call void @std.core.builtin.panicf([2 x i64] %19, [2 x i64] %20, [2 x i64] %21, i32 262, [2 x i64] %23) #4 + %25 = load [2 x i64], ptr %taddr8, align 8 + call void @std.core.builtin.panicf([2 x i64] %21, [2 x i64] %22, [2 x i64] %23, i32 262, [2 x i64] %25) #4 unreachable noerr_block: ; preds = %after_check - store %"char[]" %16, ptr %buffer, align 8 + store %"char[]" %18, ptr %buffer, align 8 store i64 0, ptr %buffer.f, align 8 %optval = load i64, ptr %buffer.f, align 8 %not_err9 = icmp eq i64 %optval, 0 - %24 = call i1 @llvm.expect.i1(i1 %not_err9, i1 true) - br i1 %24, label %after_check11, label %assign_optional10 + %26 = call i1 @llvm.expect.i1(i1 %not_err9, i1 true) + br i1 %26, label %after_check11, label %assign_optional10 assign_optional10: ; preds = %noerr_block store i64 %optval, ptr %buffer.f, align 8 @@ -391,15 +391,15 @@ assign_optional10: ; preds = %noerr_block after_check11: ; preds = %noerr_block store %"char[]" { ptr @.str.5, i64 13 }, ptr %taddr13, align 8 - %25 = load [2 x i64], ptr %taddr13, align 8 - %26 = load [2 x i64], ptr %buffer, align 8 - %27 = call i64 @test.fileReader(ptr %retparam12, [2 x i64] %25, [2 x i64] %26) - %not_err14 = icmp eq i64 %27, 0 - %28 = call i1 @llvm.expect.i1(i1 %not_err14, i1 true) - br i1 %28, label %after_check16, label %assign_optional15 + %27 = load [2 x i64], ptr %taddr13, align 8 + %28 = load [2 x i64], ptr %buffer, align 8 + %29 = call i64 @test.fileReader(ptr %retparam12, [2 x i64] %27, [2 x i64] %28) + %not_err14 = icmp eq i64 %29, 0 + %30 = call i1 @llvm.expect.i1(i1 %not_err14, i1 true) + br i1 %30, label %after_check16, label %assign_optional15 assign_optional15: ; preds = %after_check11 - store i64 %27, ptr %buffer.f, align 8 + store i64 %29, ptr %buffer.f, align 8 br label %after_assign after_check16: ; preds = %after_check11 diff --git a/test/test_suite/dynamic/inherit_linux.c3t b/test/test_suite/dynamic/inherit_linux.c3t index f3ef55adf..64890b751 100644 --- a/test/test_suite/dynamic/inherit_linux.c3t +++ b/test/test_suite/dynamic/inherit_linux.c3t @@ -80,65 +80,65 @@ entry: %ptradd = getelementptr inbounds i8, ptr %z, i64 8 %3 = load i64, ptr %ptradd, align 8 %4 = inttoptr i64 %3 to ptr - %type = load ptr, ptr %.cachedtype, align 8 - %5 = icmp eq ptr %4, %type - br i1 %5, label %cache_hit, label %cache_miss + %5 = load ptr, ptr %.cachedtype, align 8 + %6 = icmp eq ptr %4, %5 + br i1 %6, label %cache_hit, label %cache_miss cache_miss: ; preds = %entry - %6 = call ptr @.dyn_search(ptr %4, ptr @"$sel.tesT") - store ptr %6, ptr %.inlinecache, align 8 + %7 = call ptr @.dyn_search(ptr %4, ptr @"$sel.tesT") + store ptr %7, ptr %.inlinecache, align 8 store ptr %4, ptr %.cachedtype, align 8 - br label %7 + br label %9 cache_hit: ; preds = %entry - %cache_hit_fn = load ptr, ptr %.inlinecache, align 8 - br label %7 + %8 = load ptr, ptr %.inlinecache, align 8 + br label %9 -7: ; preds = %cache_hit, %cache_miss - %fn_phi = phi ptr [ %cache_hit_fn, %cache_hit ], [ %6, %cache_miss ] - %8 = icmp eq ptr %fn_phi, null - br i1 %8, label %missing_function, label %match +9: ; preds = %cache_hit, %cache_miss + %fn_phi = phi ptr [ %8, %cache_hit ], [ %7, %cache_miss ] + %10 = icmp eq ptr %fn_phi, null + br i1 %10, label %missing_function, label %match -missing_function: ; preds = %7 - %9 = load ptr, ptr @std.core.builtin.panic, align 8 - call void %9(ptr @.panic_msg, i64 41, ptr @.file +missing_function: ; preds = %9 + %11 = load ptr, ptr @std.core.builtin.panic, align 8 + call void %11(ptr @.panic_msg, i64 41, ptr @.file unreachable -match: ; preds = %7 - %10 = load ptr, ptr %z, align 8 - call void %fn_phi(ptr %10) - %11 = load %any, ptr %z, align 8 - store %any %11, ptr %w, align 8 +match: ; preds = %9 + %12 = load ptr, ptr %z, align 8 + call void %fn_phi(ptr %12) + %13 = load %any, ptr %z, align 8 + store %any %13, ptr %w, align 8 %ptradd1 = getelementptr inbounds i8, ptr %w, i64 8 - %12 = load i64, ptr %ptradd1, align 8 - %13 = inttoptr i64 %12 to ptr - %type4 = load ptr, ptr %.cachedtype3, align 8 - %14 = icmp eq ptr %13, %type4 - br i1 %14, label %cache_hit6, label %cache_miss5 + %14 = load i64, ptr %ptradd1, align 8 + %15 = inttoptr i64 %14 to ptr + %16 = load ptr, ptr %.cachedtype3, align 8 + %17 = icmp eq ptr %15, %16 + br i1 %17, label %cache_hit5, label %cache_miss4 -cache_miss5: ; preds = %match - %15 = call ptr @.dyn_search(ptr %13, ptr @"$sel.tesT") - store ptr %15, ptr %.inlinecache2, align 8 - store ptr %13, ptr %.cachedtype3, align 8 - br label %16 +cache_miss4: ; preds = %match + %18 = call ptr @.dyn_search(ptr %15, ptr @"$sel.tesT") + store ptr %18, ptr %.inlinecache2, align 8 + store ptr %15, ptr %.cachedtype3, align 8 + br label %20 -cache_hit6: ; preds = %match - %cache_hit_fn7 = load ptr, ptr %.inlinecache2, align 8 - br label %16 +cache_hit5: ; preds = %match + %19 = load ptr, ptr %.inlinecache2, align 8 + br label %20 -16: ; preds = %cache_hit6, %cache_miss5 - %fn_phi8 = phi ptr [ %cache_hit_fn7, %cache_hit6 ], [ %15, %cache_miss5 ] - %17 = icmp eq ptr %fn_phi8, null - br i1 %17, label %missing_function9, label %match10 +20: ; preds = %cache_hit5, %cache_miss4 + %fn_phi6 = phi ptr [ %19, %cache_hit5 ], [ %18, %cache_miss4 ] + %21 = icmp eq ptr %fn_phi6, null + br i1 %21, label %missing_function7, label %match8 -missing_function9: ; preds = %16 - %18 = load ptr, ptr @std.core.builtin.panic, align 8 - call void %18(ptr @.panic_msg, i64 41, ptr @.file, i64 16, ptr @.func, i64 4, i32 36) #2 +missing_function7: ; preds = %20 + %22 = load ptr, ptr @std.core.builtin.panic, align 8 + call void %22(ptr @.panic_msg, i64 41, ptr @.file, i64 16, ptr @.func, i64 4, i32 36) #2 unreachable -match10: ; preds = %16 - %19 = load ptr, ptr %w, align 8 - call void %fn_phi8(ptr %19) +match8: ; preds = %20 + %23 = load ptr, ptr %w, align 8 + call void %fn_phi6(ptr %23) ret void } define i32 @main(i32 %0, ptr %1) #0 { diff --git a/test/test_suite/dynamic/inherit_macos.c3t b/test/test_suite/dynamic/inherit_macos.c3t index c9387ec35..d5cfa1709 100644 --- a/test/test_suite/dynamic/inherit_macos.c3t +++ b/test/test_suite/dynamic/inherit_macos.c3t @@ -77,65 +77,65 @@ entry: %ptradd = getelementptr inbounds i8, ptr %z, i64 8 %3 = load i64, ptr %ptradd, align 8 %4 = inttoptr i64 %3 to ptr - %type = load ptr, ptr %.cachedtype, align 8 - %5 = icmp eq ptr %4, %type - br i1 %5, label %cache_hit, label %cache_miss + %5 = load ptr, ptr %.cachedtype, align 8 + %6 = icmp eq ptr %4, %5 + br i1 %6, label %cache_hit, label %cache_miss cache_miss: ; preds = %entry - %6 = call ptr @.dyn_search(ptr %4, ptr @"$sel.tesT") - store ptr %6, ptr %.inlinecache, align 8 + %7 = call ptr @.dyn_search(ptr %4, ptr @"$sel.tesT") + store ptr %7, ptr %.inlinecache, align 8 store ptr %4, ptr %.cachedtype, align 8 - br label %7 + br label %9 cache_hit: ; preds = %entry - %cache_hit_fn = load ptr, ptr %.inlinecache, align 8 - br label %7 + %8 = load ptr, ptr %.inlinecache, align 8 + br label %9 -7: ; preds = %cache_hit, %cache_miss - %fn_phi = phi ptr [ %cache_hit_fn, %cache_hit ], [ %6, %cache_miss ] - %8 = icmp eq ptr %fn_phi, null - br i1 %8, label %missing_function, label %match +9: ; preds = %cache_hit, %cache_miss + %fn_phi = phi ptr [ %8, %cache_hit ], [ %7, %cache_miss ] + %10 = icmp eq ptr %fn_phi, null + br i1 %10, label %missing_function, label %match -missing_function: ; preds = %7 - %9 = load ptr, ptr @std.core.builtin.panic, align 8 - call void %9(ptr @.panic_msg, i64 41, +missing_function: ; preds = %9 + %11 = load ptr, ptr @std.core.builtin.panic, align 8 + call void %11(ptr @.panic_msg, i64 41, unreachable -match: ; preds = %7 - %10 = load ptr, ptr %z, align 8 - call void %fn_phi(ptr %10) - %11 = load %any, ptr %z, align 8 - store %any %11, ptr %w, align 8 +match: ; preds = %9 + %12 = load ptr, ptr %z, align 8 + call void %fn_phi(ptr %12) + %13 = load %any, ptr %z, align 8 + store %any %13, ptr %w, align 8 %ptradd1 = getelementptr inbounds i8, ptr %w, i64 8 - %12 = load i64, ptr %ptradd1, align 8 - %13 = inttoptr i64 %12 to ptr - %type4 = load ptr, ptr %.cachedtype3, align 8 - %14 = icmp eq ptr %13, %type4 - br i1 %14, label %cache_hit6, label %cache_miss5 + %14 = load i64, ptr %ptradd1, align 8 + %15 = inttoptr i64 %14 to ptr + %16 = load ptr, ptr %.cachedtype3, align 8 + %17 = icmp eq ptr %15, %16 + br i1 %17, label %cache_hit5, label %cache_miss4 -cache_miss5: ; preds = %match - %15 = call ptr @.dyn_search(ptr %13, ptr @"$sel.tesT") - store ptr %15, ptr %.inlinecache2, align 8 - store ptr %13, ptr %.cachedtype3, align 8 - br label %16 +cache_miss4: ; preds = %match + %18 = call ptr @.dyn_search(ptr %15, ptr @"$sel.tesT") + store ptr %18, ptr %.inlinecache2, align 8 + store ptr %15, ptr %.cachedtype3, align 8 + br label %20 -cache_hit6: ; preds = %match - %cache_hit_fn7 = load ptr, ptr %.inlinecache2, align 8 - br label %16 +cache_hit5: ; preds = %match + %19 = load ptr, ptr %.inlinecache2, align 8 + br label %20 -16: ; preds = %cache_hit6, %cache_miss5 - %fn_phi8 = phi ptr [ %cache_hit_fn7, %cache_hit6 ], [ %15, %cache_miss5 ] - %17 = icmp eq ptr %fn_phi8, null - br i1 %17, label %missing_function9, label %match10 +20: ; preds = %cache_hit5, %cache_miss4 + %fn_phi6 = phi ptr [ %19, %cache_hit5 ], [ %18, %cache_miss4 ] + %21 = icmp eq ptr %fn_phi6, null + br i1 %21, label %missing_function7, label %match8 -missing_function9: ; preds = %16 - %18 = load ptr, ptr @std.core.builtin.panic, align 8 - call void %18(ptr @.panic_msg, i64 41 +missing_function7: ; preds = %20 + %22 = load ptr, ptr @std.core.builtin.panic, align 8 + call void %22(ptr @.panic_msg, i64 41 unreachable -match10: ; preds = %16 - %19 = load ptr, ptr %w, align 8 - call void %fn_phi8(ptr %19) +match8: ; preds = %20 + %23 = load ptr, ptr %w, align 8 + call void %fn_phi6(ptr %23) ret void } define i32 @main(i32 %0, ptr %1) #0 { diff --git a/test/test_suite/dynamic/overlapping_function_linux.c3t b/test/test_suite/dynamic/overlapping_function_linux.c3t index f3411754a..a356aa8a9 100644 --- a/test/test_suite/dynamic/overlapping_function_linux.c3t +++ b/test/test_suite/dynamic/overlapping_function_linux.c3t @@ -75,65 +75,65 @@ entry: %ptradd = getelementptr inbounds i8, ptr %z, i64 8 %3 = load i64, ptr %ptradd, align 8 %4 = inttoptr i64 %3 to ptr - %type = load ptr, ptr %.cachedtype, align 8 - %5 = icmp eq ptr %4, %type - br i1 %5, label %cache_hit, label %cache_miss + %5 = load ptr, ptr %.cachedtype, align 8 + %6 = icmp eq ptr %4, %5 + br i1 %6, label %cache_hit, label %cache_miss cache_miss: ; preds = %entry - %6 = call ptr @.dyn_search(ptr %4, ptr @"$sel.tesT") - store ptr %6, ptr %.inlinecache, align 8 + %7 = call ptr @.dyn_search(ptr %4, ptr @"$sel.tesT") + store ptr %7, ptr %.inlinecache, align 8 store ptr %4, ptr %.cachedtype, align 8 - br label %7 + br label %9 cache_hit: ; preds = %entry - %cache_hit_fn = load ptr, ptr %.inlinecache, align 8 - br label %7 + %8 = load ptr, ptr %.inlinecache, align 8 + br label %9 -7: ; preds = %cache_hit, %cache_miss - %fn_phi = phi ptr [ %cache_hit_fn, %cache_hit ], [ %6, %cache_miss ] - %8 = icmp eq ptr %fn_phi, null - br i1 %8, label %missing_function, label %match +9: ; preds = %cache_hit, %cache_miss + %fn_phi = phi ptr [ %8, %cache_hit ], [ %7, %cache_miss ] + %10 = icmp eq ptr %fn_phi, null + br i1 %10, label %missing_function, label %match -missing_function: ; preds = %7 - %9 = load ptr, ptr @std.core.builtin.panic, align 8 - call void %9(ptr @.panic_msg, i64 41, ptr @.file +missing_function: ; preds = %9 + %11 = load ptr, ptr @std.core.builtin.panic, align 8 + call void %11(ptr @.panic_msg, i64 41, ptr @.file unreachable -match: ; preds = %7 - %10 = load ptr, ptr %z, align 8 - call void %fn_phi(ptr %10) - %11 = load %any, ptr %z, align 8 - store %any %11, ptr %w, align 8 +match: ; preds = %9 + %12 = load ptr, ptr %z, align 8 + call void %fn_phi(ptr %12) + %13 = load %any, ptr %z, align 8 + store %any %13, ptr %w, align 8 %ptradd1 = getelementptr inbounds i8, ptr %w, i64 8 - %12 = load i64, ptr %ptradd1, align 8 - %13 = inttoptr i64 %12 to ptr - %type4 = load ptr, ptr %.cachedtype3, align 8 - %14 = icmp eq ptr %13, %type4 - br i1 %14, label %cache_hit6, label %cache_miss5 + %14 = load i64, ptr %ptradd1, align 8 + %15 = inttoptr i64 %14 to ptr + %16 = load ptr, ptr %.cachedtype3, align 8 + %17 = icmp eq ptr %15, %16 + br i1 %17, label %cache_hit5, label %cache_miss4 -cache_miss5: ; preds = %match - %15 = call ptr @.dyn_search(ptr %13, ptr @"$sel.tesT") - store ptr %15, ptr %.inlinecache2, align 8 - store ptr %13, ptr %.cachedtype3, align 8 - br label %16 +cache_miss4: ; preds = %match + %18 = call ptr @.dyn_search(ptr %15, ptr @"$sel.tesT") + store ptr %18, ptr %.inlinecache2, align 8 + store ptr %15, ptr %.cachedtype3, align 8 + br label %20 -cache_hit6: ; preds = %match - %cache_hit_fn7 = load ptr, ptr %.inlinecache2, align 8 - br label %16 +cache_hit5: ; preds = %match + %19 = load ptr, ptr %.inlinecache2, align 8 + br label %20 -16: ; preds = %cache_hit6, %cache_miss5 - %fn_phi8 = phi ptr [ %cache_hit_fn7, %cache_hit6 ], [ %15, %cache_miss5 ] - %17 = icmp eq ptr %fn_phi8, null - br i1 %17, label %missing_function9, label %match10 +20: ; preds = %cache_hit5, %cache_miss4 + %fn_phi6 = phi ptr [ %19, %cache_hit5 ], [ %18, %cache_miss4 ] + %21 = icmp eq ptr %fn_phi6, null + br i1 %21, label %missing_function7, label %match8 -missing_function9: ; preds = %16 - %18 = load ptr, ptr @std.core.builtin.panic, align 8 - call void %18(ptr @.panic_msg, i64 41, ptr @.file +missing_function7: ; preds = %20 + %22 = load ptr, ptr @std.core.builtin.panic, align 8 + call void %22(ptr @.panic_msg, i64 41, ptr @.file unreachable -match10: ; preds = %16 - %19 = load ptr, ptr %w, align 8 - call void %fn_phi8(ptr %19) +match8: ; preds = %20 + %23 = load ptr, ptr %w, align 8 + call void %fn_phi6(ptr %23) ret void } diff --git a/test/test_suite/dynamic/overlapping_function_macos.c3t b/test/test_suite/dynamic/overlapping_function_macos.c3t index 14d257919..c9bee8b1c 100644 --- a/test/test_suite/dynamic/overlapping_function_macos.c3t +++ b/test/test_suite/dynamic/overlapping_function_macos.c3t @@ -72,65 +72,65 @@ entry: %ptradd = getelementptr inbounds i8, ptr %z, i64 8 %3 = load i64, ptr %ptradd, align 8 %4 = inttoptr i64 %3 to ptr - %type = load ptr, ptr %.cachedtype, align 8 - %5 = icmp eq ptr %4, %type - br i1 %5, label %cache_hit, label %cache_miss + %5 = load ptr, ptr %.cachedtype, align 8 + %6 = icmp eq ptr %4, %5 + br i1 %6, label %cache_hit, label %cache_miss cache_miss: ; preds = %entry - %6 = call ptr @.dyn_search(ptr %4, ptr @"$sel.tesT") - store ptr %6, ptr %.inlinecache, align 8 + %7 = call ptr @.dyn_search(ptr %4, ptr @"$sel.tesT") + store ptr %7, ptr %.inlinecache, align 8 store ptr %4, ptr %.cachedtype, align 8 - br label %7 + br label %9 cache_hit: ; preds = %entry - %cache_hit_fn = load ptr, ptr %.inlinecache, align 8 - br label %7 + %8 = load ptr, ptr %.inlinecache, align 8 + br label %9 -7: ; preds = %cache_hit, %cache_miss - %fn_phi = phi ptr [ %cache_hit_fn, %cache_hit ], [ %6, %cache_miss ] - %8 = icmp eq ptr %fn_phi, null - br i1 %8, label %missing_function, label %match +9: ; preds = %cache_hit, %cache_miss + %fn_phi = phi ptr [ %8, %cache_hit ], [ %7, %cache_miss ] + %10 = icmp eq ptr %fn_phi, null + br i1 %10, label %missing_function, label %match -missing_function: ; preds = %7 - %9 = load ptr, ptr @std.core.builtin.panic, align 8 - call void %9(ptr @.panic_msg, i64 41, ptr @.file +missing_function: ; preds = %9 + %11 = load ptr, ptr @std.core.builtin.panic, align 8 + call void %11(ptr @.panic_msg, i64 41, ptr @.file unreachable -match: ; preds = %7 - %10 = load ptr, ptr %z, align 8 - call void %fn_phi(ptr %10) - %11 = load %any, ptr %z, align 8 - store %any %11, ptr %w, align 8 +match: ; preds = %9 + %12 = load ptr, ptr %z, align 8 + call void %fn_phi(ptr %12) + %13 = load %any, ptr %z, align 8 + store %any %13, ptr %w, align 8 %ptradd1 = getelementptr inbounds i8, ptr %w, i64 8 - %12 = load i64, ptr %ptradd1, align 8 - %13 = inttoptr i64 %12 to ptr - %type4 = load ptr, ptr %.cachedtype3, align 8 - %14 = icmp eq ptr %13, %type4 - br i1 %14, label %cache_hit6, label %cache_miss5 + %14 = load i64, ptr %ptradd1, align 8 + %15 = inttoptr i64 %14 to ptr + %16 = load ptr, ptr %.cachedtype3, align 8 + %17 = icmp eq ptr %15, %16 + br i1 %17, label %cache_hit5, label %cache_miss4 -cache_miss5: ; preds = %match - %15 = call ptr @.dyn_search(ptr %13, ptr @"$sel.tesT") - store ptr %15, ptr %.inlinecache2, align 8 - store ptr %13, ptr %.cachedtype3, align 8 - br label %16 +cache_miss4: ; preds = %match + %18 = call ptr @.dyn_search(ptr %15, ptr @"$sel.tesT") + store ptr %18, ptr %.inlinecache2, align 8 + store ptr %15, ptr %.cachedtype3, align 8 + br label %20 -cache_hit6: ; preds = %match - %cache_hit_fn7 = load ptr, ptr %.inlinecache2, align 8 - br label %16 +cache_hit5: ; preds = %match + %19 = load ptr, ptr %.inlinecache2, align 8 + br label %20 -16: ; preds = %cache_hit6, %cache_miss5 - %fn_phi8 = phi ptr [ %cache_hit_fn7, %cache_hit6 ], [ %15, %cache_miss5 ] - %17 = icmp eq ptr %fn_phi8, null - br i1 %17, label %missing_function9, label %match10 +20: ; preds = %cache_hit5, %cache_miss4 + %fn_phi6 = phi ptr [ %19, %cache_hit5 ], [ %18, %cache_miss4 ] + %21 = icmp eq ptr %fn_phi6, null + br i1 %21, label %missing_function7, label %match8 -missing_function9: ; preds = %16 - %18 = load ptr, ptr @std.core.builtin.panic, align 8 - call void %18(ptr @.panic_msg, i64 41, ptr @.file +missing_function7: ; preds = %20 + %22 = load ptr, ptr @std.core.builtin.panic, align 8 + call void %22(ptr @.panic_msg, i64 41, ptr @.file unreachable -match10: ; preds = %16 - %19 = load ptr, ptr %w, align 8 - call void %fn_phi8(ptr %19) +match8: ; preds = %20 + %23 = load ptr, ptr %w, align 8 + call void %fn_phi6(ptr %23) ret void } diff --git a/test/test_suite/expressions/casts/cast_vector_fail.c3 b/test/test_suite/expressions/casts/cast_vector_fail.c3 index df92120ff..03f2efbf9 100644 --- a/test/test_suite/expressions/casts/cast_vector_fail.c3 +++ b/test/test_suite/expressions/casts/cast_vector_fail.c3 @@ -1,6 +1,6 @@ module foo; import std; -fn void test(float[<4>]* x) +fn void test(float[<4>] @simd* x) {} fn void test2(float[4]* x) {} @@ -8,7 +8,7 @@ fn void test2(float[4]* x) fn int main(String[] args) { float[4] a; - float[<4>] b; + float[<4>] @simd b; test(&a); // #error: Implicitly casting test(&b); test2(&a); diff --git a/test/test_suite/functions/splat_init.c3t b/test/test_suite/functions/splat_init.c3t index 13d82a87f..64a536a52 100644 --- a/test/test_suite/functions/splat_init.c3t +++ b/test/test_suite/functions/splat_init.c3t @@ -22,7 +22,7 @@ entry: %.anon = alloca [2 x i32], align 4 %result = alloca [2 x i32], align 4 %0 = load i32, ptr @splat.a, align 4 - store i32 %0, ptr %z, align 4 + store i32 %0, ptr %z, align 16 %ptradd = getelementptr inbounds i8, ptr %z, i64 4 %1 = call i64 @splat.test() store i64 %1, ptr %result, align 4 diff --git a/test/test_suite/functions/test_regression.c3t b/test/test_suite/functions/test_regression.c3t index f0f66118f..6ee9bb1e8 100644 --- a/test/test_suite/functions/test_regression.c3t +++ b/test/test_suite/functions/test_regression.c3t @@ -520,7 +520,7 @@ loop.exit8: ; preds = %loop.cond2 %hi = load i64, ptr %ptradd10, align 8 %42 = call i32 @test.sum_us(ptr %lo, i64 %hi) %43 = call i32 (ptr, ...) @printf(ptr @.str.17, i32 %42) - store i32 1, ptr %varargslots, align 4 + store i32 1, ptr %varargslots, align 16 %ptradd11 = getelementptr inbounds i8, ptr %varargslots, i64 4 store i32 2, ptr %ptradd11, align 4 %ptradd12 = getelementptr inbounds i8, ptr %varargslots, i64 8 diff --git a/test/test_suite/functions/test_regression_mingw.c3t b/test/test_suite/functions/test_regression_mingw.c3t index 318122b92..bf9cdd005 100644 --- a/test/test_suite/functions/test_regression_mingw.c3t +++ b/test/test_suite/functions/test_regression_mingw.c3t @@ -564,7 +564,7 @@ loop.exit8: ; preds = %loop.cond2 call void @llvm.memcpy.p0.p0.i32(ptr align 8 %indirectarg11, ptr align 8 %z, i32 16, i1 false) %42 = call i32 @test.sum_us(ptr align 8 %indirectarg11) %43 = call i32 (ptr, ...) @printf(ptr @.str.17, i32 %42) - store i32 1, ptr %varargslots, align 4 + store i32 1, ptr %varargslots, align 16 %ptradd12 = getelementptr inbounds i8, ptr %varargslots, i64 4 store i32 2, ptr %ptradd12, align 4 %ptradd13 = getelementptr inbounds i8, ptr %varargslots, i64 8 diff --git a/test/test_suite/generic/generic_over_fn.c3t b/test/test_suite/generic/generic_over_fn.c3t index 3f62e985f..aacd43101 100644 --- a/test/test_suite/generic/generic_over_fn.c3t +++ b/test/test_suite/generic/generic_over_fn.c3t @@ -70,27 +70,27 @@ entry: %tc = alloca %"int[]", align 8 %list = alloca %"int[]", align 8 %len = alloca i64, align 8 - store %"int[]" zeroinitializer, ptr %literal, align 8 + store %"int[]" zeroinitializer, ptr %literal, align 16 %ptradd = getelementptr inbounds i8, ptr %literal, i64 16 call void @llvm.memcpy.p0.p0.i32(ptr align 4 %literal1, ptr align 4 @.__const, i32 8, i1 false) %0 = insertvalue %"int[]" undef, ptr %literal1, 0 %1 = insertvalue %"int[]" %0, i64 2, 1 - store %"int[]" %1, ptr %ptradd, align 8 + store %"int[]" %1, ptr %ptradd, align 16 %ptradd2 = getelementptr inbounds i8, ptr %literal, i64 32 call void @llvm.memcpy.p0.p0.i32(ptr align 4 %literal3, ptr align 4 @.__const.1, i32 12, i1 false) %2 = insertvalue %"int[]" undef, ptr %literal3, 0 %3 = insertvalue %"int[]" %2, i64 3, 1 - store %"int[]" %3, ptr %ptradd2, align 8 + store %"int[]" %3, ptr %ptradd2, align 16 %ptradd4 = getelementptr inbounds i8, ptr %literal, i64 48 call void @llvm.memcpy.p0.p0.i32(ptr align 4 %literal5, ptr align 4 @.__const.2, i32 12, i1 false) %4 = insertvalue %"int[]" undef, ptr %literal5, 0 %5 = insertvalue %"int[]" %4, i64 3, 1 - store %"int[]" %5, ptr %ptradd4, align 8 + store %"int[]" %5, ptr %ptradd4, align 16 %ptradd6 = getelementptr inbounds i8, ptr %literal, i64 64 call void @llvm.memcpy.p0.p0.i32(ptr align 4 %literal7, ptr align 4 @.__const.3, i32 12, i1 false) %6 = insertvalue %"int[]" undef, ptr %literal7, 0 %7 = insertvalue %"int[]" %6, i64 3, 1 - store %"int[]" %7, ptr %ptradd6, align 8 + store %"int[]" %7, ptr %ptradd6, align 16 %8 = insertvalue %"int[][]" undef, ptr %literal, 0 %9 = insertvalue %"int[][]" %8, i64 5, 1 store %"int[][]" %9, ptr %tcases, align 8 @@ -140,27 +140,27 @@ entry: %tc = alloca %"int[]", align 8 %list = alloca %"int[]", align 8 %len = alloca i64, align 8 - store %"int[]" zeroinitializer, ptr %literal, align 8 + store %"int[]" zeroinitializer, ptr %literal, align 16 %ptradd = getelementptr inbounds i8, ptr %literal, i64 16 call void @llvm.memcpy.p0.p0.i32(ptr align 4 %literal1, ptr align 4 @.__const.4, i32 8, i1 false) %0 = insertvalue %"int[]" undef, ptr %literal1, 0 %1 = insertvalue %"int[]" %0, i64 2, 1 - store %"int[]" %1, ptr %ptradd, align 8 + store %"int[]" %1, ptr %ptradd, align 16 %ptradd2 = getelementptr inbounds i8, ptr %literal, i64 32 call void @llvm.memcpy.p0.p0.i32(ptr align 4 %literal3, ptr align 4 @.__const.5, i32 12, i1 false) %2 = insertvalue %"int[]" undef, ptr %literal3, 0 %3 = insertvalue %"int[]" %2, i64 3, 1 - store %"int[]" %3, ptr %ptradd2, align 8 + store %"int[]" %3, ptr %ptradd2, align 16 %ptradd4 = getelementptr inbounds i8, ptr %literal, i64 48 call void @llvm.memcpy.p0.p0.i32(ptr align 4 %literal5, ptr align 4 @.__const.6, i32 12, i1 false) %4 = insertvalue %"int[]" undef, ptr %literal5, 0 %5 = insertvalue %"int[]" %4, i64 3, 1 - store %"int[]" %5, ptr %ptradd4, align 8 + store %"int[]" %5, ptr %ptradd4, align 16 %ptradd6 = getelementptr inbounds i8, ptr %literal, i64 64 call void @llvm.memcpy.p0.p0.i32(ptr align 4 %literal7, ptr align 4 @.__const.7, i32 12, i1 false) %6 = insertvalue %"int[]" undef, ptr %literal7, 0 %7 = insertvalue %"int[]" %6, i64 3, 1 - store %"int[]" %7, ptr %ptradd6, align 8 + store %"int[]" %7, ptr %ptradd6, align 16 %8 = insertvalue %"int[][]" undef, ptr %literal, 0 %9 = insertvalue %"int[][]" %8, i64 5, 1 store %"int[][]" %9, ptr %tcases, align 8 diff --git a/test/test_suite/initializer_lists/ranges_to_dynamic.c3t b/test/test_suite/initializer_lists/ranges_to_dynamic.c3t index 70106209a..764f8b4b4 100644 --- a/test/test_suite/initializer_lists/ranges_to_dynamic.c3t +++ b/test/test_suite/initializer_lists/ranges_to_dynamic.c3t @@ -25,7 +25,7 @@ entry: %.anon = alloca i64, align 8 %v = alloca i32, align 4 call void @llvm.memset.p0.i64(ptr align 16 %y, i8 0, i64 40, i1 false) - store i32 4, ptr %y, align 4 + store i32 4, ptr %y, align 16 %ptradd = getelementptr inbounds i8, ptr %y, i64 4 store i32 4, ptr %ptradd, align 4 %ptradd1 = getelementptr inbounds i8, ptr %y, i64 8 diff --git a/test/test_suite/macros/macro_typed_varargs.c3t b/test/test_suite/macros/macro_typed_varargs.c3t index 05b34eb17..d80f7db08 100644 --- a/test/test_suite/macros/macro_typed_varargs.c3t +++ b/test/test_suite/macros/macro_typed_varargs.c3t @@ -46,7 +46,7 @@ entry: %i14 = alloca %any, align 8 %varargslots16 = alloca [1 x %any], align 16 %retparam17 = alloca i64, align 8 - call void @llvm.memcpy.p0.p0.i32(ptr align 4 %literal, ptr align 16 @.__const, i32 16, i1 false) + call void @llvm.memcpy.p0.p0.i32(ptr align 16 %literal, ptr align 16 @.__const, i32 16, i1 false) %0 = insertvalue %"int[]" undef, ptr %literal, 0 %1 = insertvalue %"int[]" %0, i64 4, 1 store %"int[]" %1, ptr %x, align 8 @@ -79,22 +79,22 @@ loop.exit: ; preds = %loop.cond store i32 1, ptr %taddr, align 4 %11 = insertvalue %any undef, ptr %taddr, 0 %12 = insertvalue %any %11, i64 ptrtoint (ptr @"$ct.int" to i64), 1 - store %any %12, ptr %literal1, align 8 + store %any %12, ptr %literal1, align 16 %ptradd2 = getelementptr inbounds i8, ptr %literal1, i64 16 store i32 -1, ptr %taddr3, align 4 %13 = insertvalue %any undef, ptr %taddr3, 0 %14 = insertvalue %any %13, i64 ptrtoint (ptr @"$ct.int" to i64), 1 - store %any %14, ptr %ptradd2, align 8 + store %any %14, ptr %ptradd2, align 16 %ptradd4 = getelementptr inbounds i8, ptr %literal1, i64 32 store i32 3141, ptr %taddr5, align 4 %15 = insertvalue %any undef, ptr %taddr5, 0 %16 = insertvalue %any %15, i64 ptrtoint (ptr @"$ct.int" to i64), 1 - store %any %16, ptr %ptradd4, align 8 + store %any %16, ptr %ptradd4, align 16 %ptradd6 = getelementptr inbounds i8, ptr %literal1, i64 48 store i32 1000, ptr %taddr7, align 4 %17 = insertvalue %any undef, ptr %taddr7, 0 %18 = insertvalue %any %17, i64 ptrtoint (ptr @"$ct.int" to i64), 1 - store %any %18, ptr %ptradd6, align 8 + store %any %18, ptr %ptradd6, align 16 %19 = insertvalue %"any[]" undef, ptr %literal1, 0 %20 = insertvalue %"any[]" %19, i64 4, 1 store %"any[]" %20, ptr %x8, align 8 diff --git a/test/test_suite/macros/unifying_implicit_void.c3t b/test/test_suite/macros/unifying_implicit_void.c3t index 74f49c265..2defea7fa 100644 --- a/test/test_suite/macros/unifying_implicit_void.c3t +++ b/test/test_suite/macros/unifying_implicit_void.c3t @@ -40,47 +40,49 @@ entry: %0 = insertvalue %any undef, ptr %r, 0 %1 = insertvalue %any %0, i64 ptrtoint (ptr @"$ct.std.io.ByteReader" to i64), 1 store %any %1, ptr %s, align 8 + %neq = icmp ne ptr %s, null + call void @llvm.assume(i1 %neq) %ptradd = getelementptr inbounds i8, ptr %s, i64 8 %2 = load i64, ptr %ptradd, align 8 %3 = inttoptr i64 %2 to ptr - %type = load ptr, ptr %.cachedtype, align 8 - %4 = icmp eq ptr %3, %type - br i1 %4, label %cache_hit, label %cache_miss + %4 = load ptr, ptr %.cachedtype, align 8 + %5 = icmp eq ptr %3, %4 + br i1 %5, label %cache_hit, label %cache_miss cache_miss: ; preds = %entry - %5 = call ptr @.dyn_search(ptr %3, ptr @"$sel.read_byte") - store ptr %5, ptr %.inlinecache, align 8 + %6 = call ptr @.dyn_search(ptr %3, ptr @"$sel.read_byte") + store ptr %6, ptr %.inlinecache, align 8 store ptr %3, ptr %.cachedtype, align 8 - br label %6 + br label %8 cache_hit: ; preds = %entry - %cache_hit_fn = load ptr, ptr %.inlinecache, align 8 - br label %6 + %7 = load ptr, ptr %.inlinecache, align 8 + br label %8 -6: ; preds = %cache_hit, %cache_miss - %fn_phi = phi ptr [ %cache_hit_fn, %cache_hit ], [ %5, %cache_miss ] - %7 = icmp eq ptr %fn_phi, null - br i1 %7, label %missing_function, label %match +8: ; preds = %cache_hit, %cache_miss + %fn_phi = phi ptr [ %7, %cache_hit ], [ %6, %cache_miss ] + %9 = icmp eq ptr %fn_phi, null + br i1 %9, label %missing_function, label %match -missing_function: ; preds = %6 - %8 = load ptr, ptr @std.core.builtin.panic, align 8 - call void %8(ptr @.panic_msg, i64 46, ptr @.file, i64 25, ptr @.func, i64 4, i32 13) #4 +missing_function: ; preds = %8 + %10 = load ptr, ptr @std.core.builtin.panic, align 8 + call void %10(ptr @.panic_msg, i64 46, ptr @.file, i64 25, ptr @.func, i64 4, i32 13) #4 unreachable -match: ; preds = %6 - %9 = load ptr, ptr %s, align 8 - %10 = call i64 %fn_phi(ptr %retparam, ptr %9) - %not_err = icmp eq i64 %10, 0 - %11 = call i1 @llvm.expect.i1(i1 %not_err, i1 true) - br i1 %11, label %after_check, label %assign_optional +match: ; preds = %8 + %11 = load ptr, ptr %s, align 8 + %12 = call i64 %fn_phi(ptr %retparam, ptr %11) + %not_err = icmp eq i64 %12, 0 + %13 = call i1 @llvm.expect.i1(i1 %not_err, i1 true) + br i1 %13, label %after_check, label %assign_optional assign_optional: ; preds = %match - store i64 %10, ptr %c.f, align 8 + store i64 %12, ptr %c.f, align 8 br label %after_assign after_check: ; preds = %match - %12 = load i8, ptr %retparam, align 1 - store i8 %12, ptr %c, align 1 + %14 = load i8, ptr %retparam, align 1 + store i8 %14, ptr %c, align 1 store i64 0, ptr %c.f, align 8 br label %after_assign @@ -90,8 +92,8 @@ after_assign: ; preds = %after_check, %assig testblock: ; preds = %after_assign %optval = load i64, ptr %c.f, align 8 %not_err1 = icmp eq i64 %optval, 0 - %13 = call i1 @llvm.expect.i1(i1 %not_err1, i1 true) - br i1 %13, label %after_check3, label %assign_optional2 + %15 = call i1 @llvm.expect.i1(i1 %not_err1, i1 true) + br i1 %15, label %after_check3, label %assign_optional2 assign_optional2: ; preds = %testblock store i64 %optval, ptr %err, align 8 @@ -102,24 +104,24 @@ after_check3: ; preds = %testblock br label %end_block end_block: ; preds = %after_check3, %assign_optional2 - %14 = load i64, ptr %err, align 8 - %i2b = icmp ne i64 %14, 0 + %16 = load i64, ptr %err, align 8 + %i2b = icmp ne i64 %16, 0 br i1 %i2b, label %if.then, label %if.exit if.then: ; preds = %end_block - %15 = load i64, ptr %err, align 8 - store i64 %15, ptr %error_var, align 8 + %17 = load i64, ptr %err, align 8 + store i64 %17, ptr %error_var, align 8 br label %panic_block if.exit: ; preds = %end_block br label %noerr_block panic_block: ; preds = %if.then - %16 = insertvalue %any undef, ptr %error_var, 0 - %17 = insertvalue %any %16, i64 ptrtoint (ptr @"$ct.fault" to i64), 1 - store %any %17, ptr %varargslots, align 16 - %18 = insertvalue %"any[]" undef, ptr %varargslots, 0 - %"$$temp" = insertvalue %"any[]" %18, i64 1, 1 + %18 = insertvalue %any undef, ptr %error_var, 0 + %19 = insertvalue %any %18, i64 ptrtoint (ptr @"$ct.fault" to i64), 1 + store %any %19, ptr %varargslots, align 16 + %20 = insertvalue %"any[]" undef, ptr %varargslots, 0 + %"$$temp" = insertvalue %"any[]" %20, i64 1, 1 store %"any[]" %"$$temp", ptr %indirectarg, align 8 call void @std.core.builtin.panicf(ptr @.panic_msg.1, i64 36, unreachable diff --git a/test/test_suite/slices/slice_assign.c3t b/test/test_suite/slices/slice_assign.c3t index 22346fbea..195acdc47 100644 --- a/test/test_suite/slices/slice_assign.c3t +++ b/test/test_suite/slices/slice_assign.c3t @@ -20,17 +20,15 @@ fn void main() @.str = private unnamed_addr constant [4 x i8] c"%d\0A\00", align 1 -; Function Attrs: declare void @printf(ptr, ...) #0 -; Function Attrs: define void @test.main() #0 { entry: %x = alloca [8 x i32], align 16 %.anon = alloca i64, align 8 %i = alloca i32, align 4 call void @llvm.memset.p0.i64(ptr align 16 %x, i8 0, i64 32, i1 false) - store i32 3, ptr %x, align 4 + store i32 3, ptr %x, align 16 %ptradd = getelementptr inbounds i8, ptr %x, i64 4 store i32 3, ptr %ptradd, align 4 %ptradd1 = getelementptr inbounds i8, ptr %x, i64 4 @@ -45,10 +43,12 @@ entry: store i32 52, ptr %ptradd5, align 4 store i64 0, ptr %.anon, align 8 br label %loop.cond + loop.cond: ; preds = %loop.body, %entry %0 = load i64, ptr %.anon, align 8 %gt = icmp ugt i64 8, %0 br i1 %gt, label %loop.body, label %loop.exit + loop.body: ; preds = %loop.cond %1 = load i64, ptr %.anon, align 8 %ptroffset = getelementptr inbounds [4 x i8], ptr %x, i64 %1 @@ -60,22 +60,25 @@ loop.body: ; preds = %loop.cond %addnuw = add nuw i64 %4, 1 store i64 %addnuw, ptr %.anon, align 8 br label %loop.cond + loop.exit: ; preds = %loop.cond br label %cond + cond: ; preds = %assign, %loop.exit %5 = phi i64 [ 0, %loop.exit ], [ %add, %assign ] %lt = icmp slt i64 %5, 8 br i1 %lt, label %assign, label %exit + assign: ; preds = %cond %ptroffset6 = getelementptr inbounds [4 x i8], ptr %x, i64 %5 store i32 123, ptr %ptroffset6, align 4 %add = add i64 %5, 1 br label %cond + exit: ; preds = %cond ret void } -; Function Attrs: define i32 @main(i32 %0, ptr %1) #0 { entry: call void @test.main() diff --git a/test/test_suite/slices/slice_assign2.c3t b/test/test_suite/slices/slice_assign2.c3t index 7d5b96b9c..8c4f06b7d 100644 --- a/test/test_suite/slices/slice_assign2.c3t +++ b/test/test_suite/slices/slice_assign2.c3t @@ -39,7 +39,7 @@ entry: call void @llvm.memcpy.p0.p0.i32(ptr align 16 %y, ptr align 16 @.__const.1, i32 48, i1 false) %ptradd3 = getelementptr inbounds i8, ptr %y, i64 16 %0 = load %Bar, ptr %ptradd3, align 16 - store %Bar %0, ptr %y, align 8 + store %Bar %0, ptr %y, align 16 %ptradd4 = getelementptr inbounds i8, ptr %y, i64 16 store %Bar %0, ptr %ptradd4, align 8 %ptradd5 = getelementptr inbounds i8, ptr %y, i64 32 diff --git a/test/test_suite/slices/slice_checks.c3t b/test/test_suite/slices/slice_checks.c3t index 3380831eb..b00526b1d 100644 --- a/test/test_suite/slices/slice_checks.c3t +++ b/test/test_suite/slices/slice_checks.c3t @@ -18,7 +18,7 @@ fn void main() /* #expect: mymodule.ll - store i32 0, ptr %array, align 4 + store i32 0, ptr %array, align 16 %ptradd = getelementptr inbounds i8, ptr %array, i64 4 store i32 0, ptr %ptradd, align 4 %ptradd1 = getelementptr inbounds i8, ptr %array, i64 8 diff --git a/test/test_suite/slices/slice_to_slice_assign.c3t b/test/test_suite/slices/slice_to_slice_assign.c3t index fe5c64225..08e992e6c 100644 --- a/test/test_suite/slices/slice_to_slice_assign.c3t +++ b/test/test_suite/slices/slice_to_slice_assign.c3t @@ -83,14 +83,14 @@ entry: call void @llvm.memcpy.p0.p0.i32(ptr align 4 %literal8, ptr align 4 @.__const.2, i32 4, i1 false) %26 = insertvalue %"int[]" undef, ptr %literal8, 0 %27 = insertvalue %"int[]" %26, i64 1, 1 - store %"int[]" %27, ptr %literal, align 8 + store %"int[]" %27, ptr %literal, align 16 %28 = insertvalue %"int[][]" undef, ptr %literal, 0 %29 = insertvalue %"int[][]" %28, i64 1, 1 store %"int[][]" %29, ptr %a, align 8 call void @llvm.memcpy.p0.p0.i32(ptr align 4 %literal10, ptr align 4 @.__const.3, i32 4, i1 false) %30 = insertvalue %"int[]" undef, ptr %literal10, 0 %31 = insertvalue %"int[]" %30, i64 1, 1 - store %"int[]" %31, ptr %literal9, align 8 + store %"int[]" %31, ptr %literal9, align 16 %32 = insertvalue %"int[][]" undef, ptr %literal9, 0 %33 = insertvalue %"int[][]" %32, i64 1, 1 store %"int[][]" %33, ptr %b, align 8 diff --git a/test/test_suite/slices/slice_to_slice_vector_assign.c3t b/test/test_suite/slices/slice_to_slice_vector_assign.c3t index 8d6227b84..a3d9d46c8 100644 --- a/test/test_suite/slices/slice_to_slice_vector_assign.c3t +++ b/test/test_suite/slices/slice_to_slice_vector_assign.c3t @@ -84,14 +84,14 @@ entry: call void @llvm.memcpy.p0.p0.i32(ptr align 4 %literal8, ptr align 4 @.__const, i32 4, i1 false) %26 = insertvalue %"int[]" undef, ptr %literal8, 0 %27 = insertvalue %"int[]" %26, i64 1, 1 - store %"int[]" %27, ptr %literal, align 8 + store %"int[]" %27, ptr %literal, align 16 %28 = insertvalue %"int[][]" undef, ptr %literal, 0 %29 = insertvalue %"int[][]" %28, i64 1, 1 store %"int[][]" %29, ptr %a, align 8 call void @llvm.memcpy.p0.p0.i32(ptr align 4 %literal10, ptr align 4 @.__const.2, i32 4, i1 false) %30 = insertvalue %"int[]" undef, ptr %literal10, 0 %31 = insertvalue %"int[]" %30, i64 1, 1 - store %"int[]" %31, ptr %literal9, align 8 + store %"int[]" %31, ptr %literal9, align 16 %32 = insertvalue %"int[][]" undef, ptr %literal9, 0 %33 = insertvalue %"int[][]" %32, i64 1, 1 store %"int[][]" %33, ptr %b, align 8 diff --git a/test/test_suite/slices/various_const_slicing.c3t b/test/test_suite/slices/various_const_slicing.c3t index 999005940..090d1c1ff 100644 --- a/test/test_suite/slices/various_const_slicing.c3t +++ b/test/test_suite/slices/various_const_slicing.c3t @@ -53,7 +53,7 @@ entry: call void @llvm.memcpy.p0.p0.i32(ptr align 4 %literal1, ptr align 4 @.__const, i32 4, i1 false) %0 = insertvalue %"int[]" undef, ptr %literal1, 0 %1 = insertvalue %"int[]" %0, i64 1, 1 - store %"int[]" %1, ptr %literal, align 8 + store %"int[]" %1, ptr %literal, align 16 %2 = insertvalue %"int[][]" undef, ptr %literal, 0 %3 = insertvalue %"int[][]" %2, i64 1, 1 store %"int[][]" %3, ptr %a, align 8 diff --git a/test/test_suite/statements/custom_foreach_with_ref.c3t b/test/test_suite/statements/custom_foreach_with_ref.c3t index 8d6207323..c830160c4 100644 --- a/test/test_suite/statements/custom_foreach_with_ref.c3t +++ b/test/test_suite/statements/custom_foreach_with_ref.c3t @@ -111,8 +111,8 @@ define void @foo.getFields(ptr noalias sret([5 x i32]) align 4 %0) #0 { entry: %literal = alloca [5 x i32], align 16 call void (ptr, ...) @printf(ptr @.str) - call void @llvm.memcpy.p0.p0.i32(ptr align 4 %literal, ptr align 16 @.__const, i32 20, i1 false) - call void @llvm.memcpy.p0.p0.i32(ptr align 4 %0, ptr align 4 %literal, i32 20, i1 false) + call void @llvm.memcpy.p0.p0.i32(ptr align 16 %literal, ptr align 16 @.__const, i32 20, i1 false) + call void @llvm.memcpy.p0.p0.i32(ptr align 4 %0, ptr align 16 %literal, i32 20, i1 false) ret void } diff --git a/test/test_suite/vector/vector_consts.c3t b/test/test_suite/vector/vector_consts.c3t index 9e6b1cd84..930a7204c 100644 --- a/test/test_suite/vector/vector_consts.c3t +++ b/test/test_suite/vector/vector_consts.c3t @@ -10,35 +10,41 @@ fn int x(Char8 a, Char8 b) /* #expect: foo.ll +; ModuleID = 'foo' +source_filename = "foo" +target datalayout = "e-m:o-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128" +target triple = "x86_64-apple-macosx10.13.0" + +%.introspect = type { i8, i64, ptr, i64, i64, i64, [0 x i64] } + +@"$ct.foo.Char8" = linkonce global %.introspect { i8 18, i64 ptrtoint (ptr @"$ct.siv8$char" to i64), ptr null, i64 8, i64 ptrtoint (ptr @"$ct.siv8$char" to i64), i64 0, [0 x i64] zeroinitializer }, align 8 +@"$ct.siv8$char" = linkonce global %.introspect { i8 17, i64 0, ptr null, i64 8, i64 ptrtoint (ptr @"$ct.char" to i64), i64 8, [0 x i64] zeroinitializer }, align 8 +@"$ct.char" = linkonce global %.introspect { i8 3, i64 0, ptr null, i64 1, i64 0, i64 0, [0 x i64] zeroinitializer }, align 8 + +; Function Attrs: nounwind uwtable define i32 @foo.x(double %0, double %1) #0 { entry: %a = alloca <8 x i8>, align 8 %b = alloca <8 x i8>, align 8 %z = alloca <8 x i8>, align 8 - %x = alloca <8 x i8>, align 1 - %y = alloca <8 x i8>, align 1 store double %0, ptr %a, align 8 store double %1, ptr %b, align 8 %2 = load <8 x i8>, ptr %a, align 8 - store <8 x i8> %2, ptr %x, align 1 %3 = load <8 x i8>, ptr %b, align 8 - store <8 x i8> %3, ptr %y, align 1 - %4 = load <8 x i8>, ptr %x, align 1 - %5 = load <8 x i8>, ptr %y, align 1 - %eq = icmp eq <8 x i8> %4, %5 - %6 = sext <8 x i1> %eq to <8 x i8> - store <8 x i8> %6, ptr %z, align 8 + %eq = icmp eq <8 x i8> %2, %3 + %4 = sext <8 x i1> %eq to <8 x i8> + store <8 x i8> %4, ptr %z, align 8 + %5 = load <8 x i8>, ptr %z, align 8 + %6 = trunc <8 x i8> %5 to <8 x i1> + %sext = sext <8 x i1> %6 to <8 x i8> + %and = and <8 x i8> %7 = load <8 x i8>, ptr %z, align 8 %8 = trunc <8 x i8> %7 to <8 x i1> - %sext = sext <8 x i1> %8 to <8 x i8> - %and = and <8 x i8> - %9 = load <8 x i8>, ptr %z, align 8 - %10 = trunc <8 x i8> %9 to <8 x i1> - %sext1 = sext <8 x i1> %10 to <8 x i8> - %bnot = xor <8 x i8> + %sext1 = sext <8 x i1> %8 to <8 x i8> + %bnot = xor <8 x i8> %sext1 %and2 = and <8 x i8> %bnot, %add = add <8 x i8> %and, %and2 - %11 = call i8 @llvm.vector.reduce.umin.v8i8(<8 x i8> %add) - %zext = zext i8 %11 to i32 + %9 = call i8 @llvm.vector.reduce.umin.v8i8(<8 x i8> %add) + %zext = zext i8 %9 to i32 ret i32 %zext } \ No newline at end of file diff --git a/test/unit/regression/vecpointer.c3 b/test/unit/regression/vecpointer.c3 index 736d9b3c7..0b7543fa7 100644 --- a/test/unit/regression/vecpointer.c3 +++ b/test/unit/regression/vecpointer.c3 @@ -3,7 +3,7 @@ module vecpointer @test; fn void pointer_npot2_size() { int[<9>][3] a; - assert((usz)&a[1] - (usz)&a[0] == 64); + assert((usz)&a[1] - (usz)&a[0] == 9 * 4); } fn void pointer_add_sub_diff() diff --git a/test/unit/stdlib/collections/list.c3 b/test/unit/stdlib/collections/list.c3 index 55ff2ed10..a48beb0e3 100644 --- a/test/unit/stdlib/collections/list.c3 +++ b/test/unit/stdlib/collections/list.c3 @@ -8,6 +8,53 @@ struct Overalign { float[<4>] x @align(128); } +alias Vec3 = int[<3>]; +fn void veclist() +{ + List{Vec3} values; + List{Vec3} new_vertices; + values.init(mem, 200); + new_vertices.init(mem); + defer new_vertices.free(); + defer values.free(); + Vec3 x = {1,2,3}; + for (int i = 0; i < 20; i++) new_vertices.push(x); + values.clear(); + values.add_all(&new_vertices); + Vec3* entries = new_vertices.entries; + foreach (Vec3 entry : new_vertices) + { + assert(entry == x); + } + foreach (Vec3 entry : values) + { + assert(entry == x); + } +} + +alias Vec4 = int[<4>] @simd; +fn void veclist_simd() +{ + List{Vec4} values; + List{Vec4} new_vertices; + values.init(mem, 200); + new_vertices.init(mem); + defer new_vertices.free(); + defer values.free(); + Vec4 x = {1,2,3,5}; + for (int i = 0; i < 20; i++) new_vertices.push(x); + values.clear(); + values.add_all(&new_vertices); + Vec4* entries = new_vertices.entries; + foreach (Vec4 entry : new_vertices) + { + assert(entry == x); + } + foreach (Vec4 entry : values) + { + assert(entry == x); + } +} alias OveralignList = List{Overalign}; fn void overaligned_type()