Updated attributes and some cleaning of ABI info.

This commit is contained in:
Christoffer Lerno
2021-07-17 18:02:26 +02:00
committed by Christoffer Lerno
parent 5fc937897a
commit d578b249c2
9 changed files with 133 additions and 89 deletions

View File

@@ -350,7 +350,7 @@ typedef struct
typedef struct FunctionSignature_
{
CallConvention convention : 4;
CallABI call_abi : 4;
bool variadic : 1;
bool has_default : 1;
bool failable : 1;
@@ -377,7 +377,6 @@ typedef struct
bool attr_inline : 1;
bool attr_noinline : 1;
bool attr_extname : 1;
bool attr_stdcall : 1;
};
TypeInfo *type_parent;

View File

@@ -595,7 +595,6 @@ typedef enum
{
ATTRIBUTE_INLINE,
ATTRIBUTE_NOINLINE,
ATTRIBUTE_STDCALL,
ATTRIBUTE_OPAQUE,
ATTRIBUTE_NORETURN,
ATTRIBUTE_SECTION,
@@ -603,38 +602,30 @@ typedef enum
ATTRIBUTE_WEAK,
ATTRIBUTE_ALIGN,
ATTRIBUTE_PACKED,
NUMBER_OF_ATTRIBUTES = ATTRIBUTE_PACKED + 1,
ATTRIBUTE_UNUSED,
ATTRIBUTE_USED,
ATTRIBUTE_NAKED,
ATTRIBUTE_CDECL,
ATTRIBUTE_STDCALL,
ATTRIBUTE_VECCALL,
ATTRIBUTE_REGCALL,
ATTRIBUTE_FASTCALL,
ATTRIBUTE_DEPRECATED,
NUMBER_OF_ATTRIBUTES = ATTRIBUTE_DEPRECATED + 1,
ATTRIBUTE_NONE,
} AttributeType;
typedef enum
{
CALL_CONVENTION_NORMAL = 0,
CALL_CONVENTION_VECTOR,
CALL_CONVENTION_SYSCALL,
CALL_CONVENTION_REGCALL,
CALL_CONVENTION_STD,
CALL_CONVENTION_FAST,
} CallConvention;
typedef enum
{
CALL_C,
CALL_X86_STD,
CALL_X86_FAST,
CALL_X86_THIS,
CALL_X86_VECTOR,
CALL_X86_PASCAL,
CALL_WIN64,
CALL_X64_SYSV,
CALL_X86_REG,
CALL_AAPCS,
CALL_AAPCS_VFP,
CALL_INTEL_OCL_BICC,
CALL_SPIR_FUNCTION,
CALL_OPENCL_KERNEL,
CALL_PRESERVE_MOST,
CALL_PRESERVE_ALL,
CALL_AARCH64_VECTOR,
} CallABI;
typedef enum

View File

@@ -145,13 +145,13 @@ void c_abi_func_create_win64(FunctionSignature *signature)
Regs regs = { 0, 0 };
bool is_reg_call = false;
bool is_vector_call = false;
switch (signature->convention)
switch (signature->call_abi)
{
case CALL_CONVENTION_VECTOR:
case CALL_X86_VECTOR:
regs.float_regs = 4;
is_vector_call = true;
break;
case CALL_CONVENTION_REGCALL:
case CALL_X86_REG:
regs.float_regs = 16;
is_reg_call = true;
break;
@@ -174,13 +174,13 @@ void c_abi_func_create_win64(FunctionSignature *signature)
}
// Set up parameter registers.
switch (signature->convention)
switch (signature->call_abi)
{
case CALL_CONVENTION_VECTOR:
case CALL_X86_VECTOR:
regs.float_regs = 6;
is_vector_call = true;
break;
case CALL_CONVENTION_REGCALL:
case CALL_X86_REG:
regs.float_regs = 16;
is_reg_call = true;
break;

View File

@@ -913,7 +913,7 @@ void c_abi_func_create_x64(FunctionSignature *signature)
return c_abi_func_create_win64(signature);
}
// TODO 32 bit pointers
bool is_regcall = signature->convention == CALL_CONVENTION_REGCALL;
bool is_regcall = signature->call_abi == CALL_X86_REG;
Registers available_registers = {
.int_registers = is_regcall ? 11 : 16,

View File

@@ -156,7 +156,7 @@ static bool x86_should_return_type_in_reg(Type *type)
* @param type type of the return.
* @return
*/
ABIArgInfo *x86_classify_return(CallConvention call, Regs *regs, Type *type)
ABIArgInfo *x86_classify_return(CallABI call, Regs *regs, Type *type)
{
// 1. Lower any type like enum etc.
type = type_lowering(type);
@@ -168,7 +168,7 @@ ABIArgInfo *x86_classify_return(CallConvention call, Regs *regs, Type *type)
// should be passed directly in a register.
Type *base = NULL;
unsigned elements = 0;
if (call == CALL_CONVENTION_VECTOR || call == CALL_CONVENTION_REGCALL)
if (call == CALL_X86_VECTOR || call == CALL_X86_REG)
{
// This aggregate can lower safely
if (type_is_homogenous_aggregate(type, &base, &elements)) return abi_arg_new_direct();
@@ -246,7 +246,7 @@ ABIArgInfo *x86_classify_return(CallConvention call, Regs *regs, Type *type)
}
static inline bool x86_should_aggregate_use_direct(CallConvention call, Regs *regs, Type *type, bool *needs_padding)
static inline bool x86_should_aggregate_use_direct(CallABI call, Regs *regs, Type *type, bool *needs_padding)
{
// On Windows, aggregates other than HFAs are never passed in registers, and
// they do not consume register slots. Homogenous floating-point aggregates
@@ -261,9 +261,9 @@ static inline bool x86_should_aggregate_use_direct(CallConvention call, Regs *re
switch (call)
{
case CALL_CONVENTION_FAST:
case CALL_CONVENTION_VECTOR:
case CALL_CONVENTION_REGCALL:
case CALL_X86_FAST:
case CALL_X86_VECTOR:
case CALL_X86_REG:
if (type_size(type) <= 4 && regs->int_regs)
{
*needs_padding = true;
@@ -367,7 +367,7 @@ static bool x86_try_use_free_regs(Regs *regs, Type *type)
* Check if a primitive should be in reg, if so, remove number of free registers.
* @return true if it should have an inreg attribute, false otherwise.
*/
static bool x86_try_put_primitive_in_reg(CallConvention call, Regs *regs, Type *type)
static bool x86_try_put_primitive_in_reg(CallABI call, Regs *regs, Type *type)
{
// 1. Try to use regs for this type,
// regardless whether we succeed or not, this will update
@@ -385,9 +385,9 @@ static bool x86_try_put_primitive_in_reg(CallConvention call, Regs *regs, Type *
// to get an inreg attribute. Investigate!
switch (call)
{
case CALL_CONVENTION_FAST:
case CALL_CONVENTION_VECTOR:
case CALL_CONVENTION_REGCALL:
case CALL_X86_FAST:
case CALL_X86_VECTOR:
case CALL_X86_REG:
if (type_size(type) > 4) return false;
return type_is_integer_kind(type) || type_is_pointer(type);
default:
@@ -473,7 +473,7 @@ static inline ABIArgInfo *x86_classify_vector(Regs *regs, Type *type)
* error type, struct, union, subarray,
* string, array, error union, complex.
*/
static inline ABIArgInfo *x86_classify_aggregate(CallConvention call, Regs *regs, Type *type)
static inline ABIArgInfo *x86_classify_aggregate(CallABI call, Regs *regs, Type *type)
{
// Only called for aggregates.
assert(type_is_abi_aggregate(type));
@@ -527,9 +527,9 @@ static inline ABIArgInfo *x86_classify_aggregate(CallConvention call, Regs *regs
// This is padded expansion
ABIArgInfo *info = abi_arg_new_expand_padded(type_int);
bool is_reg_call = call == CALL_CONVENTION_REGCALL;
bool is_vec_call = call == CALL_CONVENTION_VECTOR;
bool is_fast_call = call == CALL_CONVENTION_FAST;
bool is_reg_call = call == CALL_X86_REG;
bool is_vec_call = call == CALL_X86_VECTOR;
bool is_fast_call = call == CALL_X86_FAST;
info->expand.padding_by_reg = is_fast_call || is_reg_call || is_vec_call;
return info;
@@ -543,7 +543,7 @@ static inline ABIArgInfo *x86_classify_aggregate(CallConvention call, Regs *regs
* @param type
* @return
*/
static ABIArgInfo *x86_classify_primitives(CallConvention call, Regs *regs, Type *type)
static ABIArgInfo *x86_classify_primitives(CallABI call, Regs *regs, Type *type)
{
// f128 i128 u128 on stack.
if (type_size(type) > 8) return x86_create_indirect_result(regs, type, BY_VAL_SKIP);
@@ -566,15 +566,15 @@ static ABIArgInfo *x86_classify_primitives(CallConvention call, Regs *regs, Type
/**
* Classify an argument to an x86 function.
*/
static ABIArgInfo *x86_classify_argument(CallConvention call, Regs *regs, Type *type)
static ABIArgInfo *x86_classify_argument(CallABI call, Regs *regs, Type *type)
{
// FIXME: Set alignment on indirect arguments.
// We lower all types here first to avoid enums and typedefs.
type = type_lowering(type);
bool is_reg_call = call == CALL_CONVENTION_REGCALL;
bool is_vec_call = call == CALL_CONVENTION_VECTOR;
bool is_reg_call = call == CALL_X86_REG;
bool is_vec_call = call == CALL_X86_VECTOR;
Type *base = NULL;
unsigned elements = 0;
@@ -629,25 +629,24 @@ void c_abi_func_create_x86(FunctionSignature *signature)
// Vector: 2 / 6
// Fast: 2 / 3
Regs regs = { 0, 0 };
switch (signature->convention)
switch (signature->call_abi)
{
case CALL_CONVENTION_NORMAL:
case CALL_CONVENTION_SYSCALL:
case CALL_C:
if (platform_target.x86.is_win32_float_struct_abi)
{
regs.float_regs = 3;
}
regs.int_regs = platform_target.default_number_regs_x86;
break;
case CALL_CONVENTION_REGCALL:
case CALL_X86_REG:
regs.int_regs = 5;
regs.float_regs = 8;
break;
case CALL_CONVENTION_VECTOR:
case CALL_X86_VECTOR:
regs.int_regs = 2;
regs.float_regs = 6;
break;
case CALL_CONVENTION_FAST:
case CALL_X86_FAST:
regs.int_regs = 2;
regs.float_regs = 3;
break;
@@ -665,15 +664,15 @@ void c_abi_func_create_x86(FunctionSignature *signature)
// return type.
if (signature->failable)
{
signature->failable_abi_info = x86_classify_return(signature->convention, &regs, type_error);
signature->failable_abi_info = x86_classify_return(signature->call_abi, &regs, type_error);
if (signature->rtype->type->type_kind != TYPE_VOID)
{
signature->ret_abi_info = x86_classify_argument(signature->convention, &regs, type_get_ptr(type_lowering(signature->rtype->type)));
signature->ret_abi_info = x86_classify_argument(signature->call_abi, &regs, type_get_ptr(type_lowering(signature->rtype->type)));
}
}
else
{
signature->ret_abi_info = x86_classify_return(signature->convention, &regs, signature->rtype->type);
signature->ret_abi_info = x86_classify_return(signature->call_abi, &regs, signature->rtype->type);
}
/*
@@ -687,7 +686,7 @@ void c_abi_func_create_x86(FunctionSignature *signature)
runVectorCallFirstPass(FI, State);
*/
if (signature->convention == CALL_CONVENTION_VECTOR)
if (signature->call_abi == CALL_X86_VECTOR)
{
FATAL_ERROR("X86 vector call not supported");
}
@@ -696,7 +695,7 @@ void c_abi_func_create_x86(FunctionSignature *signature)
Decl **params = signature->params;
VECEACH(params, i)
{
params[i]->var.abi_info = x86_classify_argument(signature->convention, &regs, params[i]->type);
params[i]->var.abi_info = x86_classify_argument(signature->call_abi, &regs, params[i]->type);
}
}
}

View File

@@ -577,11 +577,14 @@ void llvm_emit_function_decl(GenContext *c, Decl *decl)
}
llvm_attribute_add(c, function, attribute_nounwind, -1);
if (decl->func_decl.attr_stdcall && (platform_target.os == OS_TYPE_WIN32))
if (decl->func_decl.function_signature.call_abi == CALL_X86_STD)
{
LLVMSetFunctionCallConv(function, LLVMX86StdcallCallConv);
LLVMSetDLLStorageClass(function, LLVMDLLImportStorageClass);
if (platform_target.os == OS_TYPE_WIN32)
{
LLVMSetDLLStorageClass(function, LLVMDLLImportStorageClass);
}
}
LLVMSetFunctionCallConv(function, llvm_call_convention_from_call(decl->func_decl.function_signature.call_abi, platform_target.arch, platform_target.os));
switch (decl->visibility)
{

View File

@@ -338,10 +338,7 @@ static inline bool call_supports_variadic(CallABI abi)
case CALL_X86_REG:
case CALL_X86_THIS:
case CALL_X86_FAST:
case CALL_X86_PASCAL:
case CALL_X86_VECTOR:
case CALL_SPIR_FUNCTION:
case CALL_OPENCL_KERNEL:
return false;
default:
return true;
@@ -349,42 +346,26 @@ static inline bool call_supports_variadic(CallABI abi)
}
}
static inline LLVMCallConv llvm_call_convention_from_call(CallABI abi)
static inline LLVMCallConv llvm_call_convention_from_call(CallABI abi, ArchType arch, OsType os)
{
switch (abi)
{
case CALL_C:
return LLVMCCallConv;
case CALL_X86_STD:
return LLVMX86StdcallCallConv;
case CALL_X86_FAST:
return LLVMX86FastcallCallConv;
case CALL_X86_PASCAL:
return LLVMCCallConv;
case CALL_X86_REG:
return LLVMX86RegCallCallConv;
case CALL_X86_THIS:
return LLVMX86ThisCallCallConv;
case CALL_X86_VECTOR:
return LLVMX86VectorCallCallConv;
case CALL_WIN64:
return LLVMWin64CallConv;
case CALL_X64_SYSV:
return LLVMX8664SysVCallConv;
case CALL_AAPCS:
return LLVMARMAAPCSCallConv;
case CALL_AAPCS_VFP:
return LLVMARMAAPCSVFPCallConv;
case CALL_INTEL_OCL_BICC:
return LLVMIntelOCLBICallConv;
case CALL_AARCH64_VECTOR:
TODO
case CALL_SPIR_FUNCTION:
return LLVMSPIRFUNCCallConv;
case CALL_OPENCL_KERNEL:
TODO // Target dependent.
case CALL_PRESERVE_ALL:
return LLVMPreserveAllCallConv;
case CALL_PRESERVE_MOST:
return LLVMPreserveMostCallConv;
default:
return LLVMCCallConv;
}

View File

@@ -694,6 +694,7 @@ static AttributeType sema_analyse_attribute(Context *context, Attr *attr, Attrib
static AttributeDomain attribute_domain[NUMBER_OF_ATTRIBUTES] = {
[ATTRIBUTE_WEAK] = ATTR_FUNC | ATTR_CONST | ATTR_VAR,
[ATTRIBUTE_EXTNAME] = ~0,
[ATTRIBUTE_DEPRECATED] = ~0,
[ATTRIBUTE_SECTION] = ATTR_FUNC | ATTR_CONST | ATTR_VAR,
[ATTRIBUTE_PACKED] = ATTR_STRUCT | ATTR_UNION | ATTR_ERROR,
[ATTRIBUTE_NORETURN] = ATTR_FUNC,
@@ -701,7 +702,14 @@ static AttributeType sema_analyse_attribute(Context *context, Attr *attr, Attrib
[ATTRIBUTE_INLINE] = ATTR_FUNC,
[ATTRIBUTE_NOINLINE] = ATTR_FUNC,
[ATTRIBUTE_OPAQUE] = ATTR_STRUCT | ATTR_UNION,
[ATTRIBUTE_STDCALL] = ATTR_FUNC
[ATTRIBUTE_USED] = ~0,
[ATTRIBUTE_UNUSED] = ~0,
[ATTRIBUTE_NAKED] = ATTR_FUNC,
[ATTRIBUTE_CDECL] = ATTR_FUNC,
[ATTRIBUTE_STDCALL] = ATTR_FUNC,
[ATTRIBUTE_VECCALL] = ATTR_FUNC,
[ATTRIBUTE_REGCALL] = ATTR_FUNC,
[ATTRIBUTE_FASTCALL] = ATTR_FUNC,
};
if ((attribute_domain[type] & domain) != domain)
@@ -711,7 +719,11 @@ static AttributeType sema_analyse_attribute(Context *context, Attr *attr, Attrib
}
switch (type)
{
case ATTRIBUTE_CDECL:
case ATTRIBUTE_FASTCALL:
case ATTRIBUTE_STDCALL:
case ATTRIBUTE_VECCALL:
case ATTRIBUTE_REGCALL:
return type;
case ATTRIBUTE_ALIGN:
if (!attr->expr)
@@ -804,6 +816,13 @@ static inline bool sema_analyse_doc_header(Ast *docs, Decl **params, Decl **extr
}
return true;
}
static inline bool sema_update_call_convention(Decl *decl, CallABI abi)
{
bool had = decl->func_decl.function_signature.call_abi > 0;
decl->func_decl.function_signature.call_abi = abi;
return had;
}
static inline bool sema_analyse_func(Context *context, Decl *decl)
{
DEBUG_LOG("----Analysing function %s", decl->name);
@@ -837,6 +856,7 @@ static inline bool sema_analyse_func(Context *context, Decl *decl)
bool had = false;
#define SET_ATTR(_X) had = decl->func_decl._X; decl->func_decl._X = true; break
switch (attribute)
{
case ATTRIBUTE_EXTNAME:
@@ -852,7 +872,50 @@ static inline bool sema_analyse_func(Context *context, Decl *decl)
decl->alignment = attr->alignment;
break;
case ATTRIBUTE_NOINLINE: SET_ATTR(attr_noinline);
case ATTRIBUTE_STDCALL: SET_ATTR(attr_stdcall);
case ATTRIBUTE_STDCALL:
if (platform_target.arch == ARCH_TYPE_X86 || platform_target.arch == ARCH_TYPE_X86_64)
{
had = sema_update_call_convention(decl, CALL_X86_STD);
}
else if (platform_target.arch == ARCH_TYPE_ARM || platform_target.arch == ARCH_TYPE_ARMB)
{
had = sema_update_call_convention(decl, CALL_AAPCS);
}
break;
case ATTRIBUTE_CDECL:
had = sema_update_call_convention(decl, CALL_C);
break;
case ATTRIBUTE_VECCALL:
switch (platform_target.arch)
{
case ARCH_TYPE_X86_64:
case ARCH_TYPE_X86:
had = sema_update_call_convention(decl, CALL_X86_VECTOR);
break;
case ARCH_TYPE_ARM:
case ARCH_TYPE_ARMB:
case ARCH_TYPE_AARCH64:
case ARCH_TYPE_AARCH64_32:
case ARCH_TYPE_AARCH64_BE:
had = sema_update_call_convention(decl, CALL_AAPCS_VFP);
break;
default:
break;
}
break;
case ATTRIBUTE_FASTCALL:
if (platform_target.arch == ARCH_TYPE_X86)
{
had = sema_update_call_convention(decl, CALL_X86_FAST);
}
break;
case ATTRIBUTE_REGCALL:
had = decl->func_decl.function_signature.call_abi > 0;
if (platform_target.arch == ARCH_TYPE_X86)
{
had = sema_update_call_convention(decl, CALL_X86_REG);
}
break;
case ATTRIBUTE_INLINE: SET_ATTR(attr_inline);
case ATTRIBUTE_NORETURN: SET_ATTR(attr_noreturn);
case ATTRIBUTE_WEAK: SET_ATTR(attr_weak);

View File

@@ -135,14 +135,22 @@ void symtab_init(uint32_t capacity)
attribute_list[ATTRIBUTE_INLINE] = kw_inline;
attribute_list[ATTRIBUTE_NOINLINE] = KW_DEF("noinline");
attribute_list[ATTRIBUTE_STDCALL] = KW_DEF("stdcall");
attribute_list[ATTRIBUTE_OPAQUE] = KW_DEF("opaque");
attribute_list[ATTRIBUTE_NORETURN] = KW_DEF("noreturn");
attribute_list[ATTRIBUTE_ALIGN] = kw_align;
attribute_list[ATTRIBUTE_PACKED] = KW_DEF("packed");
attribute_list[ATTRIBUTE_SECTION] = KW_DEF("section");
attribute_list[ATTRIBUTE_EXTNAME] = KW_DEF("extname");
attribute_list[ATTRIBUTE_WEAK] = KW_DEF("weak");
attribute_list[ATTRIBUTE_OPAQUE] = KW_DEF("opaque");
attribute_list[ATTRIBUTE_ALIGN] = kw_align;
attribute_list[ATTRIBUTE_PACKED] = KW_DEF("packed");
attribute_list[ATTRIBUTE_UNUSED] = KW_DEF("unused");
attribute_list[ATTRIBUTE_USED] = KW_DEF("used");
attribute_list[ATTRIBUTE_NAKED] = KW_DEF("naked");
attribute_list[ATTRIBUTE_CDECL] = KW_DEF("cdecl");
attribute_list[ATTRIBUTE_STDCALL] = KW_DEF("stdcall");
attribute_list[ATTRIBUTE_VECCALL] = KW_DEF("veccall");
attribute_list[ATTRIBUTE_REGCALL] = KW_DEF("regcall");
attribute_list[ATTRIBUTE_FASTCALL] = KW_DEF("fastcall");
attribute_list[ATTRIBUTE_DEPRECATED] = KW_DEF("deprecated");
}
static inline SymEntry *entry_find(const char *key, uint32_t key_len, uint32_t hash)