Beginning support for variable sized arrays at end of struct.

This commit is contained in:
Christoffer Lerno
2021-05-24 17:57:00 +02:00
parent 97f7d1288e
commit b99f8d644b
9 changed files with 183 additions and 95 deletions

View File

@@ -20,6 +20,7 @@ Checks: >
-readability-named-parameter,
-readability-magic-numbers,
-readability-braces-around-statements,
-misc-no-recursion,
# Turn all the warnings from the checks above into errors.
WarningsAsErrors: "*"

View File

@@ -342,6 +342,7 @@ typedef struct FunctionSignature_
bool has_default : 1;
bool failable : 1;
bool typed_variadic : 1;
bool use_win64 : 1;
TypeInfo *rtype;
struct ABIArgInfo_ *ret_abi_info;
struct ABIArgInfo_ *failable_abi_info;
@@ -476,6 +477,7 @@ typedef struct Decl_
bool is_opaque : 1;
bool needs_additional_pad : 1;
bool is_substruct : 1;
bool has_variable_array : 1;
void *backend_ref;
const char *cname;
AlignSize alignment;
@@ -1773,7 +1775,7 @@ bool type_is_abi_aggregate(Type *type);
static inline bool type_is_any_integer(Type *type);
static inline bool type_is_builtin(TypeKind kind);
static inline bool type_is_ct(Type *type);
bool type_is_empty_union_struct(Type *type, bool allow_array);
bool type_is_empty_record(Type *type, bool allow_array);
bool type_is_empty_field(Type *type, bool allow_array);
static inline bool type_is_float(Type *type);
bool type_is_homogenous_aggregate(Type *type, Type **base, unsigned *elements);

View File

@@ -100,7 +100,7 @@ static bool riscv_detect_fpcc_struct_internal(Type *type, unsigned current_offse
if (type_is_structlike(type))
{
if (type_is_empty_union_struct(type, true)) return true;
if (type_is_empty_record(type, true)) return true;
// Unions aren't eligible unless they're empty (which is caught above).
if (type->type_kind == TYPE_UNION) return false;
Decl **members = type->decl->strukt.members;
@@ -165,7 +165,7 @@ static ABIArgInfo *riscv_classify_argument_type(Type *type, bool is_fixed, unsig
unsigned xlen = platform_target.riscv.xlen;
// Ignore empty structs/unions.
if (type_is_empty_union_struct(type, true)) return abi_arg_ignore();
if (type_is_empty_record(type, true)) return abi_arg_ignore();
ByteSize size = type_size(type);

View File

@@ -10,7 +10,7 @@ static ABIArgInfo *wasm_classify_argument_type(Type *type)
if (type_is_abi_aggregate(type))
{
// Ignore empty structs/unions.
if (type_is_empty_union_struct(type, true)) return abi_arg_ignore();
if (type_is_empty_record(type, true)) return abi_arg_ignore();
// Clang: Lower single-field structs to just pass a regular value. TODO: We
// could do reasonable-size multiple-field structs too, using getExpand(),
// though watch out for things like bitfields.
@@ -42,7 +42,7 @@ static ABIArgInfo *wasm_classify_return(Type *type)
if (type_is_abi_aggregate(type))
{
// Ignore empty
if (type_is_empty_union_struct(type, true)) return abi_arg_ignore();
if (type_is_empty_record(type, true)) return abi_arg_ignore();
Type *single_type = type_abi_find_single_struct_element(type);
if (single_type) return abi_arg_new_direct_coerce(abi_type_new_plain(single_type));

View File

@@ -11,6 +11,12 @@ ABIArgInfo *win64_classify(Regs *regs, Type *type, bool is_return, bool is_vecto
// Lower enums etc.
type = type_lowering(type);
// Variable array has to be passed indirectly.
if (type_is_structlike(type) && type->decl->has_variable_array)
{
return abi_arg_new_indirect_not_by_val();
}
Type *base = NULL;
unsigned elements = 0;
if ((is_vector || is_reg) && type_is_homogenous_aggregate(type, &base, &elements))

View File

@@ -126,16 +126,28 @@ ABIArgInfo *x64_indirect_result(Type *type, unsigned free_int_regs)
}
/**
* Based on X86_64ABIInfo::classifyRegCallStructTypeImpl in Clang
* @param type
* @param needed_registers
* @return
*/
ABIArgInfo *x64_classify_reg_call_struct_type_check(Type *type, Registers *needed_registers)
{
assert(x64_type_is_structure(type));
// These are all passed in two registers.
if (type->type_kind == TYPE_ERR_UNION || type->type_kind == TYPE_SUBARRAY || type->type_kind == TYPE_VIRTUAL || type->type_kind == TYPE_VIRTUAL_ANY)
{
needed_registers->int_registers += 2;
return abi_arg_new_direct();
}
// Union, struct, err type handled =>
assert(type->type_kind == TYPE_STRUCT || type->type_kind == TYPE_UNION || type->type_kind == TYPE_ERRTYPE);
// Struct, err type handled =>
assert(type->type_kind == TYPE_STRUCT || type->type_kind == TYPE_ERRTYPE);
// Variable array structs are always passed by pointer.
if (type->decl->has_variable_array) return x64_indirect_return_result(type);
Decl **members = type->decl->strukt.members;
VECEACH(members, i)
@@ -145,11 +157,13 @@ ABIArgInfo *x64_classify_reg_call_struct_type_check(Type *type, Registers *neede
Registers temp_needed_registers = {};
if (x64_type_is_structure(member_type))
{
// Recursively check the structure.
member_info = x64_classify_reg_call_struct_type_check(member_type, &temp_needed_registers);
}
else
{
member_info = x64_classify_argument_type(member_type, (unsigned)-1, &temp_needed_registers, NAMED);
// Pass as single argument.
member_info = x64_classify_argument_type(member_type, ~(0U), &temp_needed_registers, NAMED);
}
if (abi_arg_is_indirect(member_info))
{
@@ -159,20 +173,10 @@ ABIArgInfo *x64_classify_reg_call_struct_type_check(Type *type, Registers *neede
needed_registers->sse_registers += temp_needed_registers.sse_registers;
needed_registers->int_registers += temp_needed_registers.int_registers;
}
// Check this!
// Send as direct.
return abi_arg_new_direct();
}
ABIArgInfo *x64_classify_reg_call_struct_type(Type *return_type, Registers *available_registers)
{
Registers needed_registers = {};
ABIArgInfo *info = x64_classify_reg_call_struct_type_check(return_type, &needed_registers);
if (!try_use_registers(available_registers, &needed_registers))
{
return x64_indirect_return_result(return_type);
}
return info;
}
static void x64_classify(Type *type, ByteSize offset_base, X64Class *lo_class, X64Class *hi_class, NamedArgument named);
@@ -236,6 +240,9 @@ void x64_classify_struct_union(Type *type, ByteSize offset_base, X64Class *curre
// 64 byte max.
if (size > 64) return;
// Variable sized member is passed in memory.
if (type->decl->has_variable_array) return;
// Re-classify
*current = CLASS_NO_CLASS;
bool is_union = type->type_kind == TYPE_UNION;
@@ -573,7 +580,9 @@ AbiType *x64_get_sse_type_at_offset(Type *type, unsigned ir_offset, Type *source
return abi_type_new_plain(type_double);
}
/**
* Based off X86_64ABIInfo::GetINTEGERTypeAtOffset in Clang
*/
AbiType *x64_get_int_type_at_offset(Type *type, unsigned offset, Type *source_type, unsigned source_offset)
{
type = type_flatten(type);
@@ -611,17 +620,18 @@ AbiType *x64_get_int_type_at_offset(Type *type, unsigned offset, Type *source_ty
break;
}
case TYPE_ERR_UNION:
if (offset < 16) return abi_type_new_plain(type_usize);
if (offset < 16) return abi_type_new_plain(type_ulong);
break;
case TYPE_VIRTUAL_ANY:
if (offset < 8) return abi_type_new_plain(type_typeid);
if (offset < 8) return abi_type_new_plain(type_ulong);
if (offset < 16) return abi_type_new_plain(type_voidptr);
break;
case TYPE_VIRTUAL:
// Two pointers.
if (offset < 16) return abi_type_new_plain(type_voidptr);
break;
case TYPE_SUBARRAY:
if (offset < 8) return abi_type_new_plain(type_usize);
if (offset < 8) return abi_type_new_plain(type_ulong);
if (offset < 16) return abi_type_new_plain(type_voidptr);
break;
case TYPE_ARRAY:
@@ -790,9 +800,19 @@ ABIArgInfo *x64_classify_return(Type *return_type)
return abi_arg_new_direct_coerce(result_type);
}
/**
* Based off X86_64ABIInfo::classifyArgumentType in Clang.
* It completely ignores the x87 type, which C3 does not use.
*
* @param type the type to classify, it should already have been flattened.
* @param free_int_regs
* @param needed_registers
* @param is_named
* @return
*/
static ABIArgInfo *x64_classify_argument_type(Type *type, unsigned free_int_regs, Registers *needed_registers, NamedArgument is_named)
{
assert(type == type_lowering(type));
X64Class hi_class;
X64Class lo_class;
x64_classify(type, 0, &lo_class, &hi_class, is_named);
@@ -804,6 +824,7 @@ static ABIArgInfo *x64_classify_argument_type(Type *type, unsigned free_int_regs
AbiType *result_type = NULL;
*needed_registers = (Registers) { 0, 0 };
// Start by checking the lower class.
switch (lo_class)
{
case CLASS_NO_CLASS:
@@ -819,6 +840,7 @@ static ABIArgInfo *x64_classify_argument_type(Type *type, unsigned free_int_regs
result_type = x64_get_int_type_at_offset(type, 0, type, 0);
if (hi_class == CLASS_NO_CLASS && abi_type_is_integer(result_type))
{
// We might need to promote it if it's too small.
if (type_is_promotable_integer(type))
{
return abi_arg_new_direct_int_ext(type);
@@ -831,6 +853,7 @@ static ABIArgInfo *x64_classify_argument_type(Type *type, unsigned free_int_regs
break;
}
// At this point we know it's not MEMORY, since that's always handled.
AbiType *high_part = NULL;
switch (hi_class)
{
@@ -842,12 +865,12 @@ static ABIArgInfo *x64_classify_argument_type(Type *type, unsigned free_int_regs
needed_registers->int_registers++;
high_part = x64_get_int_type_at_offset(type, 8, type, 8);
// Return directly into high part.
assert(lo_class != CLASS_NO_CLASS && "empty first 8 bytes not allowed");
assert(lo_class != CLASS_NO_CLASS && "empty first 8 bytes not allowed, this is C++ stuff.");
break;
case CLASS_SSE:
needed_registers->sse_registers++;
high_part = x64_get_sse_type_at_offset(type, 8, type, 8);
assert(lo_class != CLASS_NO_CLASS && "empty first 8 bytes not allowed");
assert(lo_class != CLASS_NO_CLASS && "empty first 8 bytes not allowed, this is C++ stuff");
break;
case CLASS_SSEUP:
assert(lo_class == CLASS_SSE && "Unexpected SSEUp classification.");
@@ -869,11 +892,6 @@ static ABIArgInfo *x64_classify_argument_type(Type *type, unsigned free_int_regs
{
return abi_arg_new_direct();
}
if (type_is_integer(type->canonical) && type_is_integer(result_type->type)
&& result_type->type->canonical == type->canonical)
{
return abi_arg_new_direct();
}
}
return abi_arg_new_direct_coerce(result_type);
}
@@ -882,9 +900,9 @@ bool x64_type_is_structure(Type *type)
{
switch (type->type_kind)
{
case TYPE_ERR_UNION:
case TYPE_STRUCT:
case TYPE_ERRTYPE:
case TYPE_ERR_UNION:
case TYPE_SUBARRAY:
case TYPE_VIRTUAL_ANY:
case TYPE_VIRTUAL:
@@ -897,41 +915,54 @@ bool x64_type_is_structure(Type *type)
static ABIArgInfo *x64_classify_return_type(Type *ret_type, Registers *registers, bool is_regcall)
{
ret_type = type_lowering(ret_type);
// See if we can lower the reg call.
if (is_regcall && x64_type_is_structure(ret_type))
{
return x64_classify_reg_call_struct_type(ret_type, registers);
Registers needed_registers = {};
ABIArgInfo *info = x64_classify_reg_call_struct_type_check(ret_type, &needed_registers);
if (try_use_registers(registers, &needed_registers)) return info;
return x64_indirect_return_result(ret_type);
}
return x64_classify_return(ret_type);
}
static ABIArgInfo *x64_classify_parameter(Type *type, Registers *available_registers, bool is_regcall)
/**
* This code is based on the loop operations in X86_64ABIInfo::computeInfo in Clang
* @param type
* @param available_registers to update
* @param is_regcall true if this is a regcall
* @param named whether this is a named (non-vararg) parameter or not.
* @return the calculated ABI
*/
static ABIArgInfo *x64_classify_parameter(Type *type, Registers *available_registers, bool is_regcall, NamedArgument named)
{
// TODO check "NAMED"
NamedArgument arg = NAMED;
Registers needed_registers = {};
type = type_lowering(type);
ABIArgInfo *info;
// If this is a reg call, use the struct type check.
if (is_regcall && (type_is_structlike(type) || type->type_kind == TYPE_UNION))
if (is_regcall && x64_type_is_structure(type))
{
info = x64_classify_reg_call_struct_type_check(type, &needed_registers);
}
else
{
info = x64_classify_argument_type(type, available_registers->int_registers, &needed_registers, arg);
info = x64_classify_argument_type(type, available_registers->int_registers, &needed_registers, named);
}
if (!try_use_registers(available_registers, &needed_registers))
{
// use a register?
info = x64_indirect_result(type, available_registers->int_registers);
}
return info;
// Check if we can fit in a register, we're golden.
if (try_use_registers(available_registers, &needed_registers)) return info;
// The rest needs to be passed indirectly.
return x64_indirect_result(type, available_registers->int_registers);
}
void c_abi_func_create_x64(FunctionSignature *signature)
{
if (signature->use_win64)
{
return c_abi_func_create_win64(signature);
}
// TODO 32 bit pointers
// TODO allow override to get win64
bool is_regcall = signature->convention == CALL_CONVENTION_REGCALL;
Registers available_registers = {
@@ -948,7 +979,7 @@ void c_abi_func_create_x64(FunctionSignature *signature)
}
if (signature->rtype->type->type_kind != TYPE_VOID)
{
signature->ret_abi_info = x64_classify_parameter(type_get_ptr(type_lowering(signature->rtype->type)), &available_registers, is_regcall);
signature->ret_abi_info = x64_classify_parameter(type_get_ptr(type_lowering(signature->rtype->type)), &available_registers, is_regcall, NAMED);
}
}
else
@@ -963,6 +994,6 @@ void c_abi_func_create_x64(FunctionSignature *signature)
Decl **params = signature->params;
VECEACH(params, i)
{
params[i]->var.abi_info = x64_classify_parameter(params[i]->type, &available_registers, is_regcall);
params[i]->var.abi_info = x64_classify_parameter(params[i]->type, &available_registers, is_regcall, NAMED);
}
}

View File

@@ -152,23 +152,29 @@ static bool x86_should_return_type_in_reg(Type *type)
return true;
}
/**
* This code is based on X86_32ABIInfo::classifyReturnType in Clang.
* @param call convention used.
* @param regs registers available
* @param type type of the return.
* @return
*/
ABIArgInfo *x86_classify_return(CallConvention call, Regs *regs, Type *type)
{
if (type == type_void)
{
return abi_arg_ignore();
}
// 1. Lower any type like enum etc.
type = type_lowering(type);
// 2. Void is ignored
if (type == type_void) return abi_arg_ignore();
// 3. In the case of a vector or regcall, a homogenous aggregate
// should be passed directly in a register.
Type *base = NULL;
unsigned elements = 0;
if (call == CALL_CONVENTION_VECTOR || call == CALL_CONVENTION_REGCALL)
{
// Pass in the normal way.
if (type_is_homogenous_aggregate(type, &base, &elements))
{
return abi_arg_new_direct();
}
// This aggregate can lower safely
if (type_is_homogenous_aggregate(type, &base, &elements)) return abi_arg_new_direct();
}
if (type->type_kind == TYPE_VECTOR)
@@ -195,13 +201,18 @@ ABIArgInfo *x86_classify_return(CallConvention call, Regs *regs, Type *type)
if (type_is_abi_aggregate(type))
{
// Structs with variable arrays are always indirect.
if (type_is_structlike(type) && type->decl->has_variable_array)
{
return create_indirect_return_x86(regs);
}
// If we don't allow small structs in reg:
if (!platform_target.x86.return_small_struct_in_reg_abi && type->type_kind == TYPE_COMPLEX)
{
return create_indirect_return_x86(regs);
}
// Ignore empty struct/unions
if (type_is_empty_union_struct(type, true))
if (type_is_empty_record(type, true))
{
return abi_arg_ignore();
}
@@ -482,8 +493,13 @@ static inline ABIArgInfo *x86_classify_aggregate(CallConvention call, Regs *regs
// Only called for aggregates.
assert(type_is_abi_aggregate(type));
if (type_is_structlike(type) && type->decl->has_variable_array)
{
// TODO, check why this should not be by_val
return x86_create_indirect_result(regs, type, BY_VAL);
}
// Ignore empty unions / structs on non-win.
if (!platform_target.x86.is_win32_float_struct_abi && type_is_empty_union_struct(type, true))
if (!platform_target.x86.is_win32_float_struct_abi && type_is_empty_record(type, true))
{
return abi_arg_ignore();
}
@@ -625,6 +641,11 @@ static ABIArgInfo *x86_classify_argument(CallConvention call, Regs *regs, Type *
void c_abi_func_create_x86(FunctionSignature *signature)
{
// 1. Calculate the registers we have available
// Normal: 0 / 0 (3 on win32 struct ABI)
// Reg: 5 / 8
// Vector: 2 / 6
// Fast: 2 / 3
Regs regs = { 0, 0 };
switch (signature->convention)
{
@@ -634,7 +655,7 @@ void c_abi_func_create_x86(FunctionSignature *signature)
{
regs.float_regs = 3;
}
regs.int_regs = platform_target.default_number_regs;
regs.int_regs = platform_target.default_number_regs_x86;
break;
case CALL_CONVENTION_REGCALL:
regs.int_regs = 5;
@@ -646,16 +667,20 @@ void c_abi_func_create_x86(FunctionSignature *signature)
break;
case CALL_CONVENTION_FAST:
regs.int_regs = 2;
regs.float_regs = 3;
break;
default:
UNREACHABLE
}
// 3. Special case for MCU:
if (platform_target.x86.is_mcu_api)
{
regs.float_regs = 0;
regs.int_regs = 3;
}
// 4. Classify the return type. In the case of failable, we need to classify the failable itself as the
// return type.
if (signature->failable)
{
signature->failable_abi_info = x86_classify_return(signature->convention, &regs, type_error);

View File

@@ -251,7 +251,7 @@ typedef struct
PieGeneration pie : 3;
bool pic_required : 1;
FloatABI float_abi : 3;
unsigned default_number_regs : 8;
unsigned default_number_regs_x86 : 8;
union
{
struct

View File

@@ -265,23 +265,26 @@ bool type_is_union_struct(Type *type)
bool type_is_empty_field(Type *type, bool allow_array)
{
type = type->canonical;
type = type_flatten(type);
if (allow_array)
{
while (type->type_kind == TYPE_ARRAY)
{
if (type->array.len == 0) return true;
type = type->array.base->canonical;
type = type_flatten(type->array.base);
}
}
return type_is_union_struct(type) && type_is_empty_union_struct(type, allow_array);
return type_is_empty_record(type, allow_array);
}
bool type_is_empty_union_struct(Type *type, bool allow_array)
bool type_is_empty_record(Type *type, bool allow_array)
{
if (!type_is_union_struct(type)) return false;
Decl **members = type->decl->strukt.members;
Decl *decl = type->decl;
if (decl->has_variable_array) return false;
Decl **members = decl->strukt.members;
VECEACH(members, i)
{
if (!type_is_empty_field(members[i]->type, allow_array)) return false;
@@ -295,29 +298,36 @@ bool type_is_int128(Type *type)
return kind == TYPE_U128 || kind == TYPE_I128;
}
/**
* Based on isSingleElementStruct in Clang
*/
Type *type_abi_find_single_struct_element(Type *type)
{
if (!type_is_union_struct(type)) return NULL;
if (!type_is_structlike(type)) return NULL;
// Elements with a variable array? If so no.
if (type->decl->has_variable_array) return NULL;
Type *found = NULL;
Decl **members = type->decl->strukt.members;
VECEACH(members, i)
{
Type *field_type = type_flatten(members[i]->type);
// Ignore empty arrays
if (type_is_empty_field(members[i]->type, true)) continue;
if (type_is_empty_field(field_type, true)) continue;
// Already one field found, not single field.
if (found) return NULL;
Type *field_type = members[i]->type->canonical;
// Flatten single element arrays.
while (field_type->type_kind == TYPE_ARRAY)
{
if (field_type->array.len != 1) break;
field_type = field_type->array.base;
}
if (type_is_union_struct(field_type))
if (type_is_structlike(field_type))
{
field_type = type_abi_find_single_struct_element(field_type);
if (!field_type) return NULL;
@@ -329,22 +339,6 @@ Type *type_abi_find_single_struct_element(Type *type)
return found;
}
static bool type_is_qpx_vector(Type *type)
{
if (platform_target.abi != ABI_PPC64_SVR4 || !platform_target.ppc64.has_qpx) return false;
type = type->canonical;
if (type->type_kind != TYPE_VECTOR) return false;
if (type->vector.len == 1) return false;
switch (type->vector.base->type_kind)
{
case TYPE_F64:
return type_size(type) >= 256 / 8;
case TYPE_F32:
return type_size(type) <= 128 / 8;
default:
return false;
}
}
bool type_is_abi_aggregate(Type *type)
@@ -401,7 +395,7 @@ bool type_is_homogenous_base_type(Type *type)
case TYPE_F64:
return !platform_target.ppc64.is_softfp;
case TYPE_VECTOR:
return type_size(type) == 128 / 8 || type_is_qpx_vector(type);
return type_size(type) == 128 / 8;
default:
return false;
}
@@ -481,6 +475,7 @@ bool type_homogenous_aggregate_small_enough(Type *type, unsigned members)
case ABI_PPC64_SVR4:
if (type->type_kind == TYPE_F128 && platform_target.float128) return members <= 8;
if (type->type_kind == TYPE_VECTOR) return members <= 8;
// Use max 8 registers.
return ((type_size(type) + 7) / 8) * members <= 8;
case ABI_X64:
case ABI_WIN64:
@@ -497,17 +492,28 @@ bool type_homogenous_aggregate_small_enough(Type *type, unsigned members)
UNREACHABLE
}
/**
* Calculate whether this is a homogenous aggregate for the ABI.
* // Based on bool ABIInfo::isHomogeneousAggregate in Clang
* @param type the (flattened) type to check.
* @param base the base type of the aggregate
* @param elements the elements found
* @return true if it is an aggregate, false otherwise.
*/
bool type_is_homogenous_aggregate(Type *type, Type **base, unsigned *elements)
{
*elements = 0;
RETRY:
switch (type->type_kind)
{
case TYPE_COMPLEX:
// Complex types are basically structs with 2 elements.
*base = type->complex;
*elements = 2;
break;
case TYPE_DISTINCT:
return type_is_homogenous_aggregate(type->decl->distinct_decl.base_type, base, elements);
type = type->decl->distinct_decl.base_type;
goto RETRY;
case TYPE_FXX:
case TYPE_POISONED:
case TYPE_IXX:
@@ -521,6 +527,8 @@ bool type_is_homogenous_aggregate(Type *type, Type **base, unsigned *elements)
case TYPE_INFERRED_ARRAY:
return false;
case TYPE_ERR_UNION:
DEBUG_LOG("Should error be passed as homogenous aggregate?");
FALLTHROUGH;
case TYPE_VIRTUAL:
case TYPE_VIRTUAL_ANY:
*base = type_iptr->canonical;
@@ -531,27 +539,35 @@ bool type_is_homogenous_aggregate(Type *type, Type **base, unsigned *elements)
*elements = 1;
return true;
case TYPE_TYPEDEF:
return type_is_homogenous_aggregate(type->canonical, base, elements);
type = type->canonical;
goto RETRY;
case TYPE_STRUCT:
case TYPE_UNION:
if (type->decl->has_variable_array) return false;
*elements = 0;
{
Decl **members = type->decl->strukt.members;
VECEACH(members, i)
{
unsigned member_mult = 1;
Type *member_type = members[i]->type->canonical;
// Flatten the type.
Type *member_type = type_lowering(members[i]->type);
// Go down deep into a nester array.
while (member_type->type_kind == TYPE_ARRAY)
{
// If we find a zero length array, this is not allowed.
if (member_type->array.len == 0) return false;
member_mult *= member_type->array.len;
member_type = member_type->array.base;
}
unsigned member_members = 0;
if (type_is_empty_field(member_type, true)) continue;
// Skip any empty record.
if (type_is_empty_record(member_type, true)) continue;
// Check recursively if the field member is homogenous
if (!type_is_homogenous_aggregate(member_type, base, &member_members)) return false;
member_members *= member_mult;
// In the case of a union, grab the bigger set of elements.
if (type->type_kind == TYPE_UNION)
{
*elements = MAX(*elements, member_members);
@@ -562,19 +578,22 @@ bool type_is_homogenous_aggregate(Type *type, Type **base, unsigned *elements)
}
}
assert(base);
if (!*base) return false;
// Ensure no padding
if (type_size(*base) * *elements != type_size(type)) return false;
}
goto TYPECHECK;
case TYPE_ARRAY:
// Empty arrays? Not homogenous.
if (type->array.len == 0) return false;
// Check the underlying type and multiply by length.
if (!type_is_homogenous_aggregate(type->array.base, base, elements)) return false;
*elements *= type->array.len;
goto TYPECHECK;
case TYPE_ENUM:
// Lower enum to underlying type
type = type->decl->enums.type_info->type;
break;
goto RETRY;
case TYPE_BOOL:
// Lower bool to unsigned char
type = type_char;
@@ -593,21 +612,25 @@ bool type_is_homogenous_aggregate(Type *type, Type **base, unsigned *elements)
type = type_voidptr;
break;
}
// The common case:
*elements = 1;
// Is it a valid base type?
if (!type_is_homogenous_base_type(type)) return false;
// If we don't have a base type yet, set it.
if (!*base)
{
*base = type;
// Special handling of non-power-of-2 vectors
if (type->type_kind == TYPE_VECTOR)
{
// Expand to actual size.
// Widen the type with elements.
unsigned vec_elements = type_size(type) / type_size(type->vector.base);
*base = type_get_vector(type->vector.base, vec_elements);
}
}
// One is vector - other isn't => failure
if (((*base)->type_kind == TYPE_VECTOR) != (type->type_kind == TYPE_VECTOR)) return false;
// Size does not match => failure
if (type_size(*base) != type_size(type)) return false;