Moved ABI lowering from codegen to semantic analysis. Fixes for failables in the ABI and the codegen.

This commit is contained in:
Christoffer Lerno
2020-12-04 19:19:57 +01:00
parent 0b00fe6988
commit 6a5a0f2b94
22 changed files with 400 additions and 342 deletions

View File

@@ -106,7 +106,7 @@ add_executable(c3c
src/utils/vmem.c
src/utils/vmem.h
src/utils/whereami.c
src/compiler/llvm_codegen_c_abi_x86.c src/compiler/llvm_codegen_c_abi_internal.h src/compiler/llvm_codegen_c_abi_x64.c src/compiler/llvm_codegen_c_abi_win64.c src/compiler/llvm_codegen_c_abi_aarch64.c src/compiler/headers.c src/compiler/llvm_codegen_c_abi_riscv.c src/compiler/llvm_codegen_c_abi_wasm.c)
src/compiler/llvm_codegen_c_abi_x86.c src/compiler/c_abi_internal.h src/compiler/llvm_codegen_c_abi_x64.c src/compiler/llvm_codegen_c_abi_win64.c src/compiler/llvm_codegen_c_abi_aarch64.c src/compiler/headers.c src/compiler/llvm_codegen_c_abi_riscv.c src/compiler/llvm_codegen_c_abi_wasm.c)
target_compile_options(c3c PRIVATE -Wimplicit-int -Werror -Wall -Wno-unknown-pragmas -Wextra -Wno-unused-function -Wno-unused-variable -Wno-unused-parameter)

View File

@@ -1,5 +1,5 @@
module std::builtin;
/+
enum TypeKind
{
VOID,
@@ -97,3 +97,4 @@ struct TypeErrorValue
char* name;
ulong value;
}
+/

View File

@@ -20,8 +20,27 @@ extern func void* fopen(char *, char *);
extern func int _puts(char* message) @cname("puts");
extern func int printf(char* message, ...);
extern func int _putchar(char c) @cname("putchar");
public func int println(char *message)
public func int putchar(char c) @inline
{
return _putchar(c);
}
public func int print(char *message)
{
char* pointer = message;
while (*pointer != '\0')
{
if (!putchar(*pointer)) return 0;
pointer++;
}
return 1;
}
public func int println(char *message) @inline
{
return _puts(message);
}

View File

@@ -3,7 +3,7 @@
// Use of this source code is governed by a LGPLv3.0
// a copy of which can be found in the LICENSE file.
#include "llvm_codegen_internal.h"
#include "compiler_internal.h"
typedef enum
{
@@ -34,13 +34,19 @@ AbiType *abi_type_new_plain(Type *type);
AbiType *abi_type_new_int_bits(unsigned bits);
size_t abi_type_size(AbiType *type);
typedef struct
{
unsigned int_regs;
unsigned float_regs;
} Regs;
ABIArgInfo *c_abi_classify_return_type_default(Type *type);
ABIArgInfo *c_abi_classify_argument_type_default(Type *type);
void c_abi_func_create_win64(GenContext *context, FunctionSignature *signature);
void c_abi_func_create_x86(GenContext *context, FunctionSignature *signature);
void c_abi_func_create_x64(GenContext *context, FunctionSignature *signature);
void c_abi_func_create_win64(FunctionSignature *signature);
void c_abi_func_create_x86(FunctionSignature *signature);
void c_abi_func_create_x64(FunctionSignature *signature);
void c_abi_func_create_aarch64(FunctionSignature *signature);
void c_abi_func_create_riscv(GenContext *context, FunctionSignature *signature);
void c_abi_func_create_riscv(FunctionSignature *signature);
void c_abi_func_create_wasm(FunctionSignature *signature);
// Implementation

View File

@@ -33,6 +33,7 @@ void compiler_init(void)
SourceLocation *loc = sourceloc_calloc();
char *token_type = toktype_calloc();
TokenData *data = tokdata_calloc();
compiler.lib_dir = find_lib_dir();
}
static void compiler_lex(BuildTarget *target)
@@ -75,8 +76,11 @@ void compiler_compile(BuildTarget *target)
{
Context **contexts = NULL;
diag_reset();
//vec_add(target->sources, strformat("%s/std/builtin.c3", compiler.lib_dir));
//vec_add(target->sources, strformat("%s/std/io.c3", compiler.lib_dir));
if (compiler.lib_dir)
{
vec_add(target->sources, strformat("%s/std/builtin.c3", compiler.lib_dir));
vec_add(target->sources, strformat("%s/std/io.c3", compiler.lib_dir));
}
VECEACH(target->sources, i)
{
bool loaded = false;

View File

@@ -443,6 +443,7 @@ typedef struct _Decl
Visibility visibility : 2;
ResolveStatus resolve_status : 2;
bool is_packed : 1;
bool is_opaque : 1;
bool needs_additional_pad : 1;
void *backend_ref;
const char *cname;
@@ -1155,6 +1156,86 @@ typedef enum
MODULE_SYMBOL_SEARCH_THIS
} ModuleSymbolSearch;
typedef enum
{
ABI_ARG_IGNORE,
ABI_ARG_DIRECT_PAIR,
ABI_ARG_DIRECT_COERCE,
ABI_ARG_EXPAND_COERCE,
ABI_ARG_INDIRECT,
ABI_ARG_EXPAND,
} ABIKind;
typedef enum
{
ABI_TYPE_PLAIN,
ABI_TYPE_INT_BITS
} AbiTypeKind;
typedef struct
{
AbiTypeKind kind : 2;
union
{
Type *type;
unsigned int_bits;
};
} AbiType;
typedef struct ABIArgInfo_
{
unsigned param_index_start : 16;
unsigned param_index_end : 16;
ABIKind kind : 6;
struct
{
bool by_reg : 1;
bool zeroext : 1;
bool signext : 1;
} attributes;
union
{
struct
{
bool padding_by_reg : 1;
Type *padding_type;
} expand;
struct
{
AbiType *lo;
AbiType *hi;
} direct_pair;
struct
{
unsigned char offset_lo;
unsigned char padding_hi;
unsigned char lo_index;
unsigned char hi_index;
unsigned char offset_hi;
bool packed : 1;
AbiType *lo;
AbiType *hi;
} coerce_expand;
struct
{
AbiType *partial_type;
};
struct
{
AbiType *type;
unsigned elements : 3;
bool prevent_flatten : 1;
} direct_coerce;
struct
{
// We may request a certain alignment of the parameters.
unsigned realignment : 16;
bool by_val : 1;
} indirect;
};
} ABIArgInfo;
extern Compiler compiler;
extern Ast *poisoned_ast;
extern Decl *poisoned_decl;

View File

@@ -2,7 +2,7 @@
// Use of this source code is governed by the GNU LGPLv3.0 license
// a copy of which can be found in the LICENSE file.
#include "llvm_codegen_c_abi_internal.h"
#include "c_abi_internal.h"
static ABIArgInfo *abi_arg_new(ABIKind kind)
@@ -231,24 +231,24 @@ ABIArgInfo *abi_arg_new_expand_padded(Type *padding)
}
void c_abi_func_create(GenContext *context, FunctionSignature *signature)
void c_abi_func_create(FunctionSignature *signature)
{
switch (build_target.abi)
{
case ABI_X64:
c_abi_func_create_x64(context, signature);
c_abi_func_create_x64(signature);
break;
case ABI_X86:
c_abi_func_create_x86(context, signature);
c_abi_func_create_x86(signature);
break;
case ABI_WIN64:
c_abi_func_create_win64(context, signature);
c_abi_func_create_win64(signature);
break;
case ABI_AARCH64:
c_abi_func_create_aarch64(signature);
break;
case ABI_RISCV:
c_abi_func_create_riscv(context, signature);
c_abi_func_create_riscv(signature);
break;
case ABI_WASM:
c_abi_func_create_wasm(signature);

View File

@@ -2,7 +2,7 @@
// Use of this source code is governed by a LGPLv3.0
// a copy of which can be found in the LICENSE file.
#include "llvm_codegen_c_abi_internal.h"
#include "c_abi_internal.h"
ABIArgInfo *aarch64_illegal_vector(Type *type)
{

View File

@@ -2,7 +2,7 @@
// Use of this source code is governed by a LGPLv3.0
// a copy of which can be found in the LICENSE file.
#include "llvm_codegen_c_abi_internal.h"
#include "c_abi_internal.h"
static ABIArgInfo *riscv_coerce_and_expand_fpcc_struct(AbiType *field1, unsigned field1_offset, AbiType *field2, unsigned field2_offset)
@@ -32,7 +32,7 @@ static ABIArgInfo *riscv_coerce_and_expand_fpcc_struct(AbiType *field1, unsigned
return abi_arg_new_expand_coerce_pair(field1, field1_offset, field2, padding, is_packed);
}
static bool riscv_detect_fpcc_struct_internal(GenContext *c, Type *type, unsigned current_offset, AbiType **field1, unsigned *field1_offset, AbiType **field2, unsigned *field2_offset)
static bool riscv_detect_fpcc_struct_internal(Type *type, unsigned current_offset, AbiType **field1, unsigned *field1_offset, AbiType **field2, unsigned *field2_offset)
{
bool is_int = type_is_integer(type);
bool is_float = type_is_float(type);
@@ -87,8 +87,7 @@ static bool riscv_detect_fpcc_struct_internal(GenContext *c, Type *type, unsigne
unsigned element_size = type_size(element_type);
for (size_t i = 0; i < array_len; i++)
{
if (!riscv_detect_fpcc_struct_internal(c,
element_type,
if (!riscv_detect_fpcc_struct_internal(element_type,
current_offset,
field1,
field1_offset,
@@ -108,8 +107,7 @@ static bool riscv_detect_fpcc_struct_internal(GenContext *c, Type *type, unsigne
VECEACH(members, i)
{
Decl *member = members[i];
if (!riscv_detect_fpcc_struct_internal(c,
member->type,
if (!riscv_detect_fpcc_struct_internal(member->type,
current_offset + member->offset,
field1,
field1_offset,
@@ -122,14 +120,14 @@ static bool riscv_detect_fpcc_struct_internal(GenContext *c, Type *type, unsigne
return false;
}
static bool riscv_detect_fpcc_struct(GenContext *c, Type *type, AbiType **field1, unsigned *field1_offset, AbiType **field2, unsigned *field2_offset, unsigned *gprs, unsigned *fprs)
static bool riscv_detect_fpcc_struct(Type *type, AbiType **field1, unsigned *field1_offset, AbiType **field2, unsigned *field2_offset, unsigned *gprs, unsigned *fprs)
{
*field1 = NULL;
*field2 = NULL;
*gprs = 0;
*fprs = 0;
bool is_candidate = riscv_detect_fpcc_struct_internal(c, type, 0, field1, field1_offset, field2, field2_offset);
bool is_candidate = riscv_detect_fpcc_struct_internal(type, 0, field1, field1_offset, field2, field2_offset);
// Not really a candidate if we have a single int but no float.
if (*field1 && !*field2 && !abi_type_is_float(*field1)) return false;
@@ -159,7 +157,7 @@ static bool riscv_detect_fpcc_struct(GenContext *c, Type *type, AbiType **field1
return true;
}
static ABIArgInfo *riscv_classify_argument_type(GenContext *c, Type *type, bool is_fixed, unsigned *gprs, unsigned *fprs)
static ABIArgInfo *riscv_classify_argument_type(Type *type, bool is_fixed, unsigned *gprs, unsigned *fprs)
{
assert(type == type->canonical);
@@ -199,8 +197,7 @@ static ABIArgInfo *riscv_classify_argument_type(GenContext *c, Type *type, bool
unsigned offset2 = 0;
unsigned needed_gprs;
unsigned needed_fprs;
bool is_candidate = riscv_detect_fpcc_struct(c,
type,
bool is_candidate = riscv_detect_fpcc_struct(type,
&field1,
&offset1,
&field2,
@@ -273,7 +270,7 @@ static ABIArgInfo *riscv_classify_argument_type(GenContext *c, Type *type, bool
return abi_arg_new_indirect_not_by_val();
}
static ABIArgInfo *riscv_classify_return(GenContext *c, Type *return_type)
static ABIArgInfo *riscv_classify_return(Type *return_type)
{
if (return_type->type_kind == TYPE_VOID) return abi_arg_ignore();
@@ -282,10 +279,10 @@ static ABIArgInfo *riscv_classify_return(GenContext *c, Type *return_type)
// The rules for return and argument types are the same, so defer to
// classifyArgumentType.
return riscv_classify_argument_type(c, return_type, true, &arg_gpr_left, &arg_fpr_left);
return riscv_classify_argument_type(return_type, true, &arg_gpr_left, &arg_fpr_left);
}
void c_abi_func_create_riscv(GenContext *context, FunctionSignature *signature)
void c_abi_func_create_riscv(FunctionSignature *signature)
{
// Registers
unsigned gpr = 8;
@@ -293,7 +290,7 @@ void c_abi_func_create_riscv(GenContext *context, FunctionSignature *signature)
Type *return_type = signature->failable ? type_error : signature->rtype->type;
return_type = type_lowering(return_type);
ABIArgInfo *return_abi = riscv_classify_return(context, return_type);
ABIArgInfo *return_abi = riscv_classify_return(return_type);
if (signature->failable)
{
signature->failable_abi_info = return_abi;
@@ -332,7 +329,7 @@ void c_abi_func_create_riscv(GenContext *context, FunctionSignature *signature)
// If we have a failable, then the return type is a parameter.
if (signature->failable && signature->rtype->type->type_kind != TYPE_VOID)
{
signature->ret_abi_info = riscv_classify_argument_type(context, type_get_ptr(type_lowering(signature->rtype->type)),
signature->ret_abi_info = riscv_classify_argument_type(type_get_ptr(type_lowering(signature->rtype->type)),
true, &arg_gprs_left, &arg_fprs_left);
}
@@ -340,6 +337,6 @@ void c_abi_func_create_riscv(GenContext *context, FunctionSignature *signature)
VECEACH(params, i)
{
bool is_fixed = true;
params[i]->var.abi_info = riscv_classify_argument_type(context, type_lowering(params[i]->type), is_fixed, &arg_gprs_left, &arg_fprs_left);
params[i]->var.abi_info = riscv_classify_argument_type(type_lowering(params[i]->type), is_fixed, &arg_gprs_left, &arg_fprs_left);
}
}

View File

@@ -2,7 +2,7 @@
// Use of this source code is governed by a LGPLv3.0
// a copy of which can be found in the LICENSE file.
#include "llvm_codegen_c_abi_internal.h"
#include "c_abi_internal.h"
static ABIArgInfo *wasm_classify_argument_type(Type *type)
{

View File

@@ -2,9 +2,9 @@
// Use of this source code is governed by a LGPLv3.0
// a copy of which can be found in the LICENSE file.
#include "llvm_codegen_c_abi_internal.h"
#include "c_abi_internal.h"
ABIArgInfo *win64_classify(GenContext *context, Type *type, bool is_return, bool is_vector, bool is_reg)
ABIArgInfo *win64_classify(Regs *regs, Type *type, bool is_return, bool is_vector, bool is_reg)
{
if (type->type_kind == TYPE_VOID) return abi_arg_ignore();
@@ -18,9 +18,9 @@ ABIArgInfo *win64_classify(GenContext *context, Type *type, bool is_return, bool
if (is_reg)
{
// Enough registers? Then use direct/expand
if (context->abi.sse_registers >= elements)
if (regs->float_regs >= elements)
{
context->abi.sse_registers -= elements;
regs->float_regs -= elements;
// Direct if return / builtin / vector
if (is_return || type_is_builtin(type->type_kind) || type->type_kind == TYPE_VECTOR)
{
@@ -34,10 +34,10 @@ ABIArgInfo *win64_classify(GenContext *context, Type *type, bool is_return, bool
if (is_vector)
{
// Enough registers AND return / builtin / vector
if (context->abi.sse_registers >= elements &&
if (regs->float_regs >= elements &&
(is_return || type_is_builtin(type->type_kind) || type->type_kind == TYPE_VECTOR))
{
context->abi.sse_registers -= elements;
regs->float_regs -= elements;
return abi_arg_new_direct();
}
// HVAs are handled later.
@@ -82,7 +82,7 @@ ABIArgInfo *win64_classify(GenContext *context, Type *type, bool is_return, bool
return abi_arg_new_direct();
}
ABIArgInfo *win64_reclassify_hva_arg(GenContext *context, Type *type, ABIArgInfo *info)
ABIArgInfo *win64_reclassify_hva_arg(Regs *regs, Type *type, ABIArgInfo *info)
{
// Assumes vectorCall calling convention.
Type *base = NULL;
@@ -90,9 +90,9 @@ ABIArgInfo *win64_reclassify_hva_arg(GenContext *context, Type *type, ABIArgInfo
type = type_lowering(type);
if (!type_is_builtin(type->type_kind) && type->type_kind != TYPE_VECTOR && type_is_homogenous_aggregate(type, &base, &elements))
{
if (context->abi.sse_registers >= elements)
if (regs->float_regs >= elements)
{
context->abi.sse_registers -= elements;
regs->float_regs -= elements;
ABIArgInfo *new_info = abi_arg_new_direct();
new_info->attributes.by_reg = true;
return new_info;
@@ -101,7 +101,7 @@ ABIArgInfo *win64_reclassify_hva_arg(GenContext *context, Type *type, ABIArgInfo
return info;
}
void win64_vector_call_args(GenContext *context, FunctionSignature *signature, bool is_vector, bool is_reg)
void win64_vector_call_args(Regs *regs, FunctionSignature *signature, bool is_vector, bool is_reg)
{
static const unsigned max_param_vector_calls_as_reg = 6;
unsigned count = 0;
@@ -111,85 +111,85 @@ void win64_vector_call_args(GenContext *context, FunctionSignature *signature, b
Decl *param = params[i];
if (count < max_param_vector_calls_as_reg)
{
param->var.abi_info = win64_classify(context, param->type, false, is_vector, is_reg);
param->var.abi_info = win64_classify(regs, param->type, false, is_vector, is_reg);
}
else
{
// Cannot be passed in registers pretend no registers.
unsigned regs = context->abi.sse_registers;
context->abi.sse_registers = 0;
param->var.abi_info = win64_classify(context, param->type, false, is_vector, is_reg);
context->abi.sse_registers = regs;
unsigned float_regs = regs->float_regs;
regs->float_regs = 0;
param->var.abi_info = win64_classify(regs, param->type, false, is_vector, is_reg);
regs->float_regs = float_regs;
}
count++;
}
VECEACH(params, i)
{
Decl *param = params[i];
param->var.abi_info = win64_reclassify_hva_arg(context, param->type, param->var.abi_info);
param->var.abi_info = win64_reclassify_hva_arg(regs, param->type, param->var.abi_info);
}
}
void c_abi_func_create_win64(GenContext *context, FunctionSignature *signature)
void c_abi_func_create_win64(FunctionSignature *signature)
{
// allow calling sysv?
// Set up return registers.
context->abi.int_registers = 0;
Regs regs = { 0, 0 };
bool is_reg_call = false;
bool is_vector_call = false;
switch (context->abi.call_convention)
switch (signature->convention)
{
case CALL_CONVENTION_VECTOR:
context->abi.sse_registers = 4;
regs.float_regs = 4;
is_vector_call = true;
break;
case CALL_CONVENTION_REGCALL:
context->abi.sse_registers = 16;
regs.float_regs = 16;
is_reg_call = true;
break;
default:
context->abi.sse_registers = 0;
regs.float_regs = 0;
break;
}
if (signature->failable)
{
signature->failable_abi_info = win64_classify(context, type_error, true, is_vector_call, is_reg_call);
signature->failable_abi_info = win64_classify(&regs, type_error, true, is_vector_call, is_reg_call);
if (signature->rtype->type->type_kind != TYPE_VOID)
{
signature->ret_abi_info = win64_classify(context, type_get_ptr(type_lowering(signature->rtype->type)), false, is_vector_call, is_reg_call);
signature->ret_abi_info = win64_classify(&regs, type_get_ptr(type_lowering(signature->rtype->type)), false, is_vector_call, is_reg_call);
}
}
else
{
signature->ret_abi_info = win64_classify(context, signature->rtype->type, true, is_vector_call, is_reg_call);
signature->ret_abi_info = win64_classify(&regs, signature->rtype->type, true, is_vector_call, is_reg_call);
}
// Set up parameter registers.
switch (context->abi.call_convention)
switch (signature->convention)
{
case CALL_CONVENTION_VECTOR:
context->abi.sse_registers = 6;
regs.float_regs = 6;
is_vector_call = true;
break;
case CALL_CONVENTION_REGCALL:
context->abi.sse_registers = 16;
regs.float_regs = 16;
is_reg_call = true;
break;
default:
context->abi.sse_registers = 0;
regs.float_regs = 0;
break;
}
if (is_vector_call)
{
win64_vector_call_args(context, signature, is_vector_call, is_reg_call);
win64_vector_call_args(&regs, signature, is_vector_call, is_reg_call);
return;
}
Decl **params = signature->params;
VECEACH(params, i)
{
params[i]->var.abi_info = win64_classify(context, params[i]->type, false, is_vector_call, is_reg_call);
params[i]->var.abi_info = win64_classify(&regs, params[i]->type, false, is_vector_call, is_reg_call);
}
}

View File

@@ -2,7 +2,7 @@
// Use of this source code is governed by a LGPLv3.0
// a copy of which can be found in the LICENSE file.
#include "llvm_codegen_c_abi_internal.h"
#include "c_abi_internal.h"
typedef enum
{
@@ -914,12 +914,11 @@ static ABIArgInfo *x64_classify_parameter(Type *type, Registers *available_regis
return info;
}
void c_abi_func_create_x64(GenContext *context, FunctionSignature *signature)
void c_abi_func_create_x64(FunctionSignature *signature)
{
// TODO 32 bit pointers
// TODO allow override to get win64
bool is_regcall = signature->convention == CALL_CONVENTION_REGCALL;
context->abi.call_convention = signature->convention;
Registers available_registers = {
.int_registers = is_regcall ? 11 : 16,

View File

@@ -2,11 +2,11 @@
// Use of this source code is governed by a LGPLv3.0
// a copy of which can be found in the LICENSE file.
#include "llvm_codegen_c_abi_internal.h"
#include "c_abi_internal.h"
#define MIN_ABI_STACK_ALIGN 4
static bool x86_try_use_free_regs(GenContext *context, Type *type);
static bool x86_try_use_free_regs(Regs *regs, Type *type);
static inline bool type_is_simd_vector(Type *type)
{
@@ -46,15 +46,15 @@ static unsigned x86_stack_alignment(Type *type, unsigned alignment)
}
static ABIArgInfo *x86_create_indirect_result(GenContext *context, Type *type, ByVal by_val)
static ABIArgInfo *x86_create_indirect_result(Regs *regs, Type *type, ByVal by_val)
{
if (by_val != BY_VAL)
{
ABIArgInfo *info = abi_arg_new_indirect_not_by_val();
if (context->abi.int_registers)
if (regs->int_regs)
{
context->abi.int_registers--;
regs->int_regs--;
if (!build_target.x86.is_mcu_api) info->attributes.by_reg = true;
}
return info;
@@ -79,12 +79,12 @@ static ABIArgInfo *x86_create_indirect_result(GenContext *context, Type *type, B
}
ABIArgInfo *create_indirect_return_x86(GenContext *context)
ABIArgInfo *create_indirect_return_x86(Regs *regs)
{
ABIArgInfo *info = abi_arg_new_indirect_not_by_val();
if (!context->abi.int_registers) return info;
if (!regs->int_regs) return info;
// Consume a register for the return.
context->abi.int_registers--;
regs->int_regs--;
if (build_target.x86.is_mcu_api) return info;
return abi_arg_by_reg_attr(info);
@@ -148,7 +148,7 @@ static bool x86_should_return_type_in_reg(Type *type)
return true;
}
ABIArgInfo *x86_classify_return(GenContext *context, Type *type)
ABIArgInfo *x86_classify_return(CallConvention call, Regs *regs, Type *type)
{
if (type == type_void)
{
@@ -158,7 +158,7 @@ ABIArgInfo *x86_classify_return(GenContext *context, Type *type)
type = type_lowering(type);
Type *base = NULL;
unsigned elements = 0;
if (context->abi.call_convention == CALL_CONVENTION_VECTOR || context->abi.call_convention == CALL_CONVENTION_REGCALL)
if (call == CALL_CONVENTION_VECTOR || call == CALL_CONVENTION_REGCALL)
{
// Pass in the normal way.
if (type_is_homogenous_aggregate(type, &base, &elements))
@@ -184,7 +184,7 @@ ABIArgInfo *x86_classify_return(GenContext *context, Type *type)
{
return abi_arg_new_direct_coerce(abi_type_new_int_bits(size * 8));
}
return create_indirect_return_x86(context);
return create_indirect_return_x86(regs);
}
return abi_arg_new_direct();
}
@@ -194,7 +194,7 @@ ABIArgInfo *x86_classify_return(GenContext *context, Type *type)
// If we don't allow small structs in reg:
if (!build_target.x86.return_small_struct_in_reg_abi && type->type_kind == TYPE_COMPLEX)
{
return create_indirect_return_x86(context);
return create_indirect_return_x86(regs);
}
// Ignore empty struct/unions
if (type_is_empty_union_struct(type, true))
@@ -222,7 +222,7 @@ ABIArgInfo *x86_classify_return(GenContext *context, Type *type)
// This is not a single element struct, so we wrap it in an int.
return abi_arg_new_direct_coerce(abi_type_new_int_bits(size * 8));
}
return create_indirect_return_x86(context);
return create_indirect_return_x86(regs);
}
// Is this small enough to need to be extended?
@@ -232,14 +232,14 @@ ABIArgInfo *x86_classify_return(GenContext *context, Type *type)
}
// If we support something like int128, then this is an indirect return.
if (type_is_integer(type) && type_size(type) > 8) return create_indirect_return_x86(context);
if (type_is_integer(type) && type_size(type) > 8) return create_indirect_return_x86(regs);
// Otherwise we expect to just pass this nicely in the return.
return abi_arg_new_direct();
}
static inline bool x86_should_aggregate_use_direct(GenContext *context, Type *type, bool *needs_padding)
static inline bool x86_should_aggregate_use_direct(CallConvention call, Regs *regs, Type *type, bool *needs_padding)
{
// On Windows, aggregates other than HFAs are never passed in registers, and
// they do not consume register slots. Homogenous floating-point aggregates
@@ -248,16 +248,16 @@ static inline bool x86_should_aggregate_use_direct(GenContext *context, Type *ty
*needs_padding = false;
if (!x86_try_use_free_regs(context, type)) return false;
if (!x86_try_use_free_regs(regs, type)) return false;
if (build_target.x86.is_mcu_api) return true;
switch (context->abi.call_convention)
switch (call)
{
case CALL_CONVENTION_FAST:
case CALL_CONVENTION_VECTOR:
case CALL_CONVENTION_REGCALL:
if (type_size(type) <= 4 && context->abi.int_registers)
if (type_size(type) <= 4 && regs->int_regs)
{
*needs_padding = true;
}
@@ -319,7 +319,7 @@ static inline bool x86_can_expand_indirect_aggregate_arg(Type *type)
return size == type_size(type);
}
static bool x86_try_use_free_regs(GenContext *context, Type *type)
static bool x86_try_use_free_regs(Regs *regs, Type *type)
{
// 1. Floats are not passed in regs on soft floats.
if (!build_target.x86.use_soft_float && type_is_float(type)) return false;
@@ -339,26 +339,26 @@ static bool x86_try_use_free_regs(GenContext *context, Type *type)
if (build_target.x86.is_mcu_api)
{
// 4a. Just return if there are not enough registers.
if (size_in_regs > context->abi.int_registers) return false;
if (size_in_regs > regs->int_regs) return false;
// 4b. If the size in regs > 2 then refuse.
if (size_in_regs > 2) return false;
// 4c. Use registers, we're fine.
context->abi.int_registers -= size_in_regs;
regs->int_regs -= size_in_regs;
return true;
}
// 5. The non-MCU ABI, if we don't have enough registers,
// clear them to prevent register use later on.
if (size_in_regs > context->abi.int_registers)
if (size_in_regs > regs->int_regs)
{
context->abi.int_registers = 0;
regs->int_regs = 0;
return false;
}
// 6. Use registers, we're fine.
context->abi.int_registers -= size_in_regs;
regs->int_regs -= size_in_regs;
return true;
}
@@ -367,12 +367,12 @@ static bool x86_try_use_free_regs(GenContext *context, Type *type)
* Check if a primitive should be in reg, if so, remove number of free registers.
* @return true if it should have an inreg attribute, false otherwise.
*/
static bool x86_try_put_primitive_in_reg(GenContext *context, Type *type)
static bool x86_try_put_primitive_in_reg(CallConvention call, Regs *regs, Type *type)
{
// 1. Try to use regs for this type,
// regardless whether we succeed or not, this will update
// the number of registers available.
if (!x86_try_use_free_regs(context, type)) return false;
if (!x86_try_use_free_regs(regs, type)) return false;
// 2. On MCU, do not use the inreg attribute.
if (build_target.x86.is_mcu_api) return false;
@@ -383,7 +383,7 @@ static bool x86_try_put_primitive_in_reg(GenContext *context, Type *type)
// Some questions here though if we use 3 registers on these
// we don't mark it as inreg, however a later register may use a reg.
// to get an inreg attribute. Investigate!
switch (context->abi.call_convention)
switch (call)
{
case CALL_CONVENTION_FAST:
case CALL_CONVENTION_VECTOR:
@@ -398,7 +398,7 @@ static bool x86_try_put_primitive_in_reg(GenContext *context, Type *type)
/**
* Handle the vector/regcalls with HVAs.
*/
static inline ABIArgInfo *x86_classify_homogenous_aggregate(GenContext *context, Type *type, unsigned elements, bool is_vec_call)
static inline ABIArgInfo *x86_classify_homogenous_aggregate(Regs *regs, Type *type, unsigned elements, bool is_vec_call)
{
// We now know it's a float/double or a vector,
// since only those are valid for x86
@@ -406,13 +406,13 @@ static inline ABIArgInfo *x86_classify_homogenous_aggregate(GenContext *context,
// If we don't have enough SSE registers,
// just send this by pointer.
if (context->abi.sse_registers < elements)
if (regs->float_regs < elements)
{
return x86_create_indirect_result(context, type, BY_VAL_SKIP);
return x86_create_indirect_result(regs, type, BY_VAL_SKIP);
}
// Use the SSE registers.
context->abi.sse_registers -= elements;
regs->float_regs -= elements;
// In case of a vector call, pass HVA directly and
// don't flatten.
@@ -433,7 +433,7 @@ static inline ABIArgInfo *x86_classify_homogenous_aggregate(GenContext *context,
return abi_arg_new_expand();
}
static inline ABIArgInfo *x86_classify_vector(GenContext *context, Type *type)
static inline ABIArgInfo *x86_classify_vector(Regs *regs, Type *type)
{
unsigned size = type_size(type);
@@ -442,12 +442,12 @@ static inline ABIArgInfo *x86_classify_vector(GenContext *context, Type *type)
// user-defined vector types larger than 512 bits indirectly for simplicity.
if (build_target.x86.is_win32_float_struct_abi)
{
if (size < 64 && context->abi.sse_registers)
if (size < 64 && regs->float_regs)
{
context->abi.sse_registers--;
regs->float_regs--;
return abi_arg_by_reg_attr(abi_arg_new_direct());
}
return x86_create_indirect_result(context, type, BY_VAL_SKIP);
return x86_create_indirect_result(regs, type, BY_VAL_SKIP);
}
// On Darwin, some vectors are passed in memory, we handle this by passing
// it as an i8/i16/i32/i64.
@@ -473,7 +473,7 @@ static inline ABIArgInfo *x86_classify_vector(GenContext *context, Type *type)
* error type, struct, union, subarray,
* string, array, error union, complex.
*/
static inline ABIArgInfo *x86_classify_aggregate(GenContext *context, Type *type)
static inline ABIArgInfo *x86_classify_aggregate(CallConvention call, Regs *regs, Type *type)
{
// Only called for aggregates.
assert(type_is_abi_aggregate(type));
@@ -491,12 +491,12 @@ static inline ABIArgInfo *x86_classify_aggregate(GenContext *context, Type *type
// added in MSVC 2015.
if (build_target.x86.is_win32_float_struct_abi && type_abi_alignment(type) > 4)
{
return x86_create_indirect_result(context, type, BY_VAL_SKIP);
return x86_create_indirect_result(regs, type, BY_VAL_SKIP);
}
// See if we can pass aggregates directly.
// this never happens for MSVC
if (x86_should_aggregate_use_direct(context, type, &needs_padding_in_reg))
if (x86_should_aggregate_use_direct(call, regs, type, &needs_padding_in_reg))
{
// Here we coerce the aggregate into a struct { i32, i32, ... }
// but we do not generate this struct immediately here.
@@ -514,7 +514,7 @@ static inline ABIArgInfo *x86_classify_aggregate(GenContext *context, Type *type
// optimizations.
// Don't do this for the MCU if there are still free integer registers
// (see X86_64 ABI for full explanation).
if (size <= 16 && (!build_target.x86.is_mcu_api || !context->abi.int_registers) &&
if (size <= 16 && (!build_target.x86.is_mcu_api || !regs->int_regs) &&
x86_can_expand_indirect_aggregate_arg(type))
{
if (!needs_padding_in_reg) return abi_arg_new_expand();
@@ -522,14 +522,14 @@ static inline ABIArgInfo *x86_classify_aggregate(GenContext *context, Type *type
// This is padded expansion
ABIArgInfo *info = abi_arg_new_expand_padded(type_int);
bool is_reg_call = context->abi.call_convention == CALL_CONVENTION_REGCALL;
bool is_vec_call = context->abi.call_convention == CALL_CONVENTION_VECTOR;
bool is_fast_call = context->abi.call_convention == CALL_CONVENTION_FAST;
bool is_reg_call = call == CALL_CONVENTION_REGCALL;
bool is_vec_call = call == CALL_CONVENTION_VECTOR;
bool is_fast_call = call == CALL_CONVENTION_FAST;
info->expand.padding_by_reg = is_fast_call || is_reg_call || is_vec_call;
return info;
}
return x86_create_indirect_result(context, type, BY_VAL);
return x86_create_indirect_result(regs, type, BY_VAL);
}
/**
@@ -538,12 +538,12 @@ static inline ABIArgInfo *x86_classify_aggregate(GenContext *context, Type *type
* @param type
* @return
*/
static ABIArgInfo *x86_classify_primitives(GenContext *context, Type *type)
static ABIArgInfo *x86_classify_primitives(CallConvention call, Regs *regs, Type *type)
{
// f128 i128 u128 on stack.
if (type_size(type) > 8) return x86_create_indirect_result(context, type, BY_VAL_SKIP);
if (type_size(type) > 8) return x86_create_indirect_result(regs, type, BY_VAL_SKIP);
bool in_reg = x86_try_put_primitive_in_reg(context, type);
bool in_reg = x86_try_put_primitive_in_reg(call, regs, type);
if (type_is_promotable_integer(type))
{
@@ -561,15 +561,15 @@ static ABIArgInfo *x86_classify_primitives(GenContext *context, Type *type)
/**
* Classify an argument to an x86 function.
*/
static ABIArgInfo *x86_classify_argument(GenContext *context, Type *type)
static ABIArgInfo *x86_classify_argument(CallConvention call, Regs *regs, Type *type)
{
// FIXME: Set alignment on indirect arguments.
// We lower all types here first to avoid enums and typedefs.
type = type_lowering(type);
bool is_reg_call = context->abi.call_convention == CALL_CONVENTION_REGCALL;
bool is_vec_call = context->abi.call_convention == CALL_CONVENTION_VECTOR;
bool is_reg_call = call == CALL_CONVENTION_REGCALL;
bool is_vec_call = call == CALL_CONVENTION_VECTOR;
Type *base = NULL;
unsigned elements = 0;
@@ -578,7 +578,7 @@ static ABIArgInfo *x86_classify_argument(GenContext *context, Type *type)
if ((is_vec_call || is_reg_call)
&& type_is_homogenous_aggregate(type, &base, &elements))
{
return x86_classify_homogenous_aggregate(context, type, elements, is_vec_call);
return x86_classify_homogenous_aggregate(regs, type, elements, is_vec_call);
}
@@ -596,9 +596,9 @@ static ABIArgInfo *x86_classify_argument(GenContext *context, Type *type)
case TYPE_BOOL:
case TYPE_VARARRAY:
case TYPE_POINTER:
return x86_classify_primitives(context, type);
return x86_classify_primitives(call, regs, type);
case TYPE_VECTOR:
return x86_classify_vector(context, type);
return x86_classify_vector(regs, type);
case TYPE_ERRTYPE:
case TYPE_STRUCT:
case TYPE_UNION:
@@ -607,7 +607,7 @@ static ABIArgInfo *x86_classify_argument(GenContext *context, Type *type)
case TYPE_ARRAY:
case TYPE_ERR_UNION:
case TYPE_COMPLEX:
return x86_classify_aggregate(context, type);
return x86_classify_aggregate(call, regs, type);
case TYPE_TYPEINFO:
case TYPE_MEMBER:
UNREACHABLE
@@ -615,51 +615,50 @@ static ABIArgInfo *x86_classify_argument(GenContext *context, Type *type)
UNREACHABLE
}
void c_abi_func_create_x86(GenContext *context, FunctionSignature *signature)
void c_abi_func_create_x86(FunctionSignature *signature)
{
context->abi.call_convention = signature->convention;
context->abi.sse_registers = 0;
Regs regs = { 0, 0 };
switch (signature->convention)
{
case CALL_CONVENTION_NORMAL:
case CALL_CONVENTION_SYSCALL:
if (build_target.x86.is_win32_float_struct_abi)
{
context->abi.sse_registers = 3;
regs.float_regs = 3;
}
context->abi.int_registers = build_target.default_number_regs;
regs.int_regs = build_target.default_number_regs;
break;
case CALL_CONVENTION_REGCALL:
context->abi.int_registers = 5;
context->abi.sse_registers = 8;
regs.int_regs = 5;
regs.float_regs = 8;
break;
case CALL_CONVENTION_VECTOR:
context->abi.int_registers = 2;
context->abi.sse_registers = 6;
regs.int_regs = 2;
regs.float_regs = 6;
break;
case CALL_CONVENTION_FAST:
context->abi.int_registers = 2;
regs.int_regs = 2;
break;
default:
UNREACHABLE
}
if (build_target.x86.is_mcu_api)
{
context->abi.sse_registers = 0;
context->abi.int_registers = 3;
regs.float_regs = 0;
regs.int_regs = 3;
}
if (signature->failable)
{
signature->failable_abi_info = x86_classify_return(context, type_error);
signature->failable_abi_info = x86_classify_return(signature->convention, &regs, type_error);
if (signature->rtype->type->type_kind != TYPE_VOID)
{
signature->ret_abi_info = x86_classify_argument(context, type_get_ptr(type_lowering(signature->rtype->type)));
signature->ret_abi_info = x86_classify_argument(signature->convention, &regs, type_get_ptr(type_lowering(signature->rtype->type)));
}
}
else
{
signature->ret_abi_info = x86_classify_return(context, signature->rtype->type);
signature->ret_abi_info = x86_classify_return(signature->convention, &regs, signature->rtype->type);
}
/*
@@ -673,7 +672,7 @@ void c_abi_func_create_x86(GenContext *context, FunctionSignature *signature)
runVectorCallFirstPass(FI, State);
*/
if (context->abi.call_convention == CALL_CONVENTION_VECTOR)
if (signature->convention == CALL_CONVENTION_VECTOR)
{
FATAL_ERROR("X86 vector call not supported");
}
@@ -682,7 +681,7 @@ void c_abi_func_create_x86(GenContext *context, FunctionSignature *signature)
Decl **params = signature->params;
VECEACH(params, i)
{
params[i]->var.abi_info = x86_classify_argument(context, params[i]->type);
params[i]->var.abi_info = x86_classify_argument(signature->convention, &regs, params[i]->type);
}
}
}

View File

@@ -2188,6 +2188,117 @@ void gencontext_emit_call_intrinsic_expr(GenContext *c, BEValue *be_value, Expr
UNREACHABLE
}
void llvm_emit_parameter(GenContext *context, LLVMValueRef **args, ABIArgInfo *info, BEValue *be_value, Type *type)
{
switch (info->kind)
{
case ABI_ARG_IGNORE:
// Skip.
return;
case ABI_ARG_INDIRECT:
{
// If we want we could optimize for structs by doing it by reference here.
unsigned alignment = info->indirect.realignment ?: type_abi_alignment(type);
LLVMValueRef indirect = llvm_emit_alloca(context,
llvm_get_type(context, type),
alignment,
"indirectarg");
llvm_store_bevalue_aligned(context, indirect, be_value, alignment);
vec_add(*args, indirect);
return;
}
case ABI_ARG_DIRECT_COERCE:
{
LLVMTypeRef coerce_type = llvm_get_coerce_type(context, info);
if (!coerce_type || coerce_type == llvm_get_type(context, type))
{
vec_add(*args, llvm_value_rvalue_store(context, be_value));
return;
}
if (!abi_info_should_flatten(info))
{
vec_add(*args, llvm_emit_coerce(context, coerce_type, be_value, type));
return;
}
LLVMValueRef cast;
unsigned target_alignment = llvm_abi_alignment(coerce_type);
unsigned max_align = MAX(((unsigned)be_value->alignment), llvm_abi_alignment(coerce_type));
// If we are loading something with greater alignment than what we have, we cannot directly memcpy.
if (llvm_value_is_addr(be_value) && be_value->alignment < target_alignment)
{
// So load it instead.
llvm_value_rvalue(context, be_value);
}
// In this case we have something nicely aligned, so we just do a cast.
if (llvm_value_is_addr(be_value))
{
cast = LLVMBuildBitCast(context->builder, be_value->value, LLVMPointerType(coerce_type, 0), "");
}
else
{
cast = llvm_emit_alloca(context, coerce_type, max_align, "coerce");
LLVMValueRef target = LLVMBuildBitCast(context->builder, cast, llvm_get_ptr_type(context, type), "");
llvm_store_bevalue_aligned(context, target, be_value, max_align);
}
LLVMTypeRef element = llvm_abi_type(context, info->direct_coerce.type);
for (unsigned idx = 0; idx < info->direct_coerce.elements; idx++)
{
LLVMValueRef element_ptr = LLVMBuildStructGEP2(context->builder, coerce_type, cast, idx, "");
vec_add(*args,
llvm_emit_load_aligned(context, element, element_ptr, llvm_abi_alignment(element), ""));
}
return;
}
case ABI_ARG_DIRECT_PAIR:
{
llvm_value_addr(context, be_value);
printf("Handle invalid alignment");
// Here we do the following transform:
// struct -> { lo, hi } -> lo, hi
LLVMTypeRef lo = llvm_abi_type(context, info->direct_pair.lo);
LLVMTypeRef hi = llvm_abi_type(context, info->direct_pair.hi);
LLVMTypeRef struct_type = llvm_get_coerce_type(context, info);
LLVMValueRef cast = LLVMBuildBitCast(context->builder, be_value->value, llvm_get_ptr_type(context, type), "casttemp");
// Get the lo value.
LLVMValueRef lo_ptr = LLVMBuildStructGEP2(context->builder, struct_type, cast, 0, "lo");
vec_add(*args, llvm_emit_load_aligned(context, lo, lo_ptr, llvm_abi_alignment(lo), "lo"));
// Get the hi value.
LLVMValueRef hi_ptr = LLVMBuildStructGEP2(context->builder, struct_type, cast, 1, "hi");
vec_add(*args, llvm_emit_load_aligned(context, hi, hi_ptr, llvm_abi_alignment(hi), "hi"));
return;
}
case ABI_ARG_EXPAND_COERCE:
{
// Move this to an address (if needed)
llvm_value_addr(context, be_value);
LLVMTypeRef coerce_type = llvm_get_coerce_type(context, info);
LLVMValueRef temp = LLVMBuildBitCast(context->builder, be_value->value, LLVMPointerType(coerce_type, 0), "coerce");
LLVMValueRef gep_first = LLVMBuildStructGEP2(context->builder, coerce_type, temp, info->coerce_expand.lo_index, "first");
vec_add(*args, LLVMBuildLoad2(context->builder, llvm_abi_type(context, info->coerce_expand.lo), gep_first, ""));
if (info->coerce_expand.hi)
{
LLVMValueRef gep_second = LLVMBuildStructGEP2(context->builder, coerce_type, temp, info->coerce_expand.hi_index, "second");
vec_add(*args, LLVMBuildLoad2(context->builder, llvm_abi_type(context, info->coerce_expand.hi), gep_second, ""));
}
return;
}
case ABI_ARG_EXPAND:
{
// Move this to an address (if needed)
llvm_value_addr(context, be_value);
llvm_expand_type_to_args(context, type, be_value->value, args);
// Expand the padding here.
if (info->expand.padding_type)
{
vec_add(*args, LLVMGetUndef(llvm_get_type(context, info->expand.padding_type)));
}
return;
}
}
}
void gencontext_emit_call_expr(GenContext *context, BEValue *be_value, Expr *expr)
{
printf("Optimize call return\n");
@@ -2225,9 +2336,16 @@ void gencontext_emit_call_expr(GenContext *context, BEValue *be_value, Expr *exp
LLVMValueRef return_param = NULL;
LLVMValueRef *values = NULL;
ABIArgInfo *ret_info = signature->ret_abi_info;
Type *return_type = signature->rtype->type->canonical;
if (signature->failable)
{
ret_info = signature->failable_abi_info;
return_type = type_error;
}
switch (ret_info->kind)
{
case ABI_ARG_INDIRECT:
@@ -2249,7 +2367,14 @@ void gencontext_emit_call_expr(GenContext *context, BEValue *be_value, Expr *exp
case ABI_ARG_EXPAND_COERCE:
break;
}
unsigned param_index = 0;
if (signature->failable && signature->ret_abi_info)
{
Type *actual_return_type = type_lowering(signature->rtype->type);
return_param = llvm_emit_alloca(context, llvm_get_type(context, actual_return_type), 0, "retparam");
llvm_value_set(be_value, return_param, type_get_ptr(actual_return_type));
llvm_emit_parameter(context, &values, signature->ret_abi_info, be_value, be_value->type);
}
unsigned arguments = vec_size(expr->call_expr.arguments);
assert(arguments >= vec_size(signature->params));
VECEACH(signature->params, i)
@@ -2258,113 +2383,7 @@ void gencontext_emit_call_expr(GenContext *context, BEValue *be_value, Expr *exp
llvm_emit_expr(context, be_value, arg_expr);
Decl *param = signature->params[i];
ABIArgInfo *info = param->var.abi_info;
switch (info->kind)
{
case ABI_ARG_IGNORE:
// Skip.
break;
case ABI_ARG_INDIRECT:
{
// If we want we could optimize for structs by doing it by reference here.
unsigned alignment = info->indirect.realignment ?: type_abi_alignment(param->type);
LLVMValueRef indirect = llvm_emit_alloca(context,
llvm_get_type(context, param->type),
alignment,
"indirectarg");
llvm_store_bevalue_aligned(context, indirect, be_value, alignment);
vec_add(values, indirect);
break;
}
case ABI_ARG_DIRECT_COERCE:
{
LLVMTypeRef coerce_type = llvm_get_coerce_type(context, info);
if (!coerce_type || coerce_type == llvm_get_type(context, param->type))
{
vec_add(values, llvm_value_rvalue_store(context, be_value));
break;
}
if (!abi_info_should_flatten(info))
{
vec_add(values, llvm_emit_coerce(context, coerce_type, be_value, param->type));
break;
}
LLVMValueRef cast;
unsigned target_alignment = llvm_abi_alignment(coerce_type);
unsigned max_align = MAX(((unsigned)be_value->alignment), llvm_abi_alignment(coerce_type));
// If we are loading something with greater alignment than what we have, we cannot directly memcpy.
if (llvm_value_is_addr(be_value) && be_value->alignment < target_alignment)
{
// So load it instead.
llvm_value_rvalue(context, be_value);
}
// In this case we have something nicely aligned, so we just do a cast.
if (llvm_value_is_addr(be_value))
{
cast = LLVMBuildBitCast(context->builder, be_value->value, LLVMPointerType(coerce_type, 0), "");
}
else
{
cast = llvm_emit_alloca(context, coerce_type, max_align, "coerce");
LLVMValueRef target = LLVMBuildBitCast(context->builder, cast, llvm_get_ptr_type(context, param->type), "");
llvm_store_bevalue_aligned(context, target, be_value, max_align);
}
LLVMTypeRef element = llvm_abi_type(context, info->direct_coerce.type);
for (unsigned idx = 0; idx < info->direct_coerce.elements; idx++)
{
LLVMValueRef element_ptr = LLVMBuildStructGEP2(context->builder, coerce_type, cast, idx, "");
vec_add(values,
llvm_emit_load_aligned(context, element, element_ptr, llvm_abi_alignment(element), ""));
}
break;
}
case ABI_ARG_DIRECT_PAIR:
{
llvm_value_addr(context, be_value);
printf("Handle invalid alignment");
// Here we do the following transform:
// struct -> { lo, hi } -> lo, hi
LLVMTypeRef lo = llvm_abi_type(context, info->direct_pair.lo);
LLVMTypeRef hi = llvm_abi_type(context, info->direct_pair.hi);
LLVMTypeRef struct_type = llvm_get_coerce_type(context, info);
LLVMValueRef cast = LLVMBuildBitCast(context->builder, be_value->value, llvm_get_ptr_type(context, param->type), "casttemp");
// Get the lo value.
LLVMValueRef lo_ptr = LLVMBuildStructGEP2(context->builder, struct_type, cast, 0, "lo");
vec_add(values, llvm_emit_load_aligned(context, lo, lo_ptr, llvm_abi_alignment(lo), "lo"));
// Get the hi value.
LLVMValueRef hi_ptr = LLVMBuildStructGEP2(context->builder, struct_type, cast, 1, "hi");
vec_add(values, llvm_emit_load_aligned(context, hi, hi_ptr, llvm_abi_alignment(hi), "hi"));
break;
}
case ABI_ARG_EXPAND_COERCE:
{
// Move this to an address (if needed)
llvm_value_addr(context, be_value);
LLVMTypeRef coerce_type = llvm_get_coerce_type(context, info);
LLVMValueRef temp = LLVMBuildBitCast(context->builder, be_value->value, LLVMPointerType(coerce_type, 0), "coerce");
LLVMValueRef gep_first = LLVMBuildStructGEP2(context->builder, coerce_type, temp, info->coerce_expand.lo_index, "first");
vec_add(values, LLVMBuildLoad2(context->builder, llvm_abi_type(context, info->coerce_expand.lo), gep_first, ""));
if (info->coerce_expand.hi)
{
LLVMValueRef gep_second = LLVMBuildStructGEP2(context->builder, coerce_type, temp, info->coerce_expand.hi_index, "second");
vec_add(values, LLVMBuildLoad2(context->builder, llvm_abi_type(context, info->coerce_expand.hi), gep_second, ""));
}
break;
}
case ABI_ARG_EXPAND:
{
// Move this to an address (if needed)
llvm_value_addr(context, be_value);
llvm_expand_type_to_args(context, param->type, be_value->value, &values);
// Expand the padding here.
if (info->expand.padding_type)
{
vec_add(values, LLVMGetUndef(llvm_get_type(context, info->expand.padding_type)));
}
break;
}
}
llvm_emit_parameter(context, &values, info, be_value, param->type);
}
for (unsigned i = vec_size(signature->params); i < arguments; i++)
{
@@ -2473,16 +2492,14 @@ void gencontext_emit_call_expr(GenContext *context, BEValue *be_value, Expr *exp
llvm_emit_cond_br(context, &no_err, after_block, context->catch_block);
}
llvm_emit_block(context, after_block);
// If void, be_value contents should be skipped.
if (!signature->ret_abi_info) return;
llvm_value_set_address(be_value, return_param, expr->type);
return;
}
//gencontext_emit_throw_branch(context, call, signature->throws, expr->call_expr.throw_info, signature->error_return);
/*
if (function->func.function_signature.convention)
{
LLVMSetFunctionCallConv(call, LLVMX86StdcallCallConv);
}*/
llvm_value_set(be_value, call, expr->type);
}

View File

@@ -252,6 +252,8 @@ void llvm_emit_return_abi(GenContext *c, BEValue *return_value, BEValue *failabl
LLVMValueRef return_out = c->return_out;
Type *return_type = signature->rtype->type;
BEValue no_fail;
// In this case we use the failable as the actual return.
if (signature->failable)
{
@@ -261,6 +263,11 @@ void llvm_emit_return_abi(GenContext *c, BEValue *return_value, BEValue *failabl
}
return_out = c->failable_out;
return_type = type_error;
if (!failable)
{
llvm_value_set(&no_fail, LLVMConstNull(llvm_get_type(c, type_error)), type_error);
failable = &no_fail;
}
return_value = failable;
info = signature->failable_abi_info;
}
@@ -406,6 +413,10 @@ void llvm_emit_function_body(GenContext *context, Decl *decl)
else
{
context->return_out = NULL;
if (signature->ret_abi_info && signature->failable)
{
context->return_out = LLVMGetParam(context->function, arg++);
}
}

View File

@@ -45,85 +45,9 @@ typedef struct
LLVMBasicBlockRef next_block;
} BreakContinue;
typedef enum
{
ABI_ARG_IGNORE,
ABI_ARG_DIRECT_PAIR,
ABI_ARG_DIRECT_COERCE,
ABI_ARG_EXPAND_COERCE,
ABI_ARG_INDIRECT,
ABI_ARG_EXPAND,
} ABIKind;
typedef enum
{
ABI_TYPE_PLAIN,
ABI_TYPE_INT_BITS
} AbiTypeKind;
typedef struct
{
AbiTypeKind kind : 2;
union
{
Type *type;
unsigned int_bits;
};
} AbiType;
typedef struct ABIArgInfo_
{
unsigned param_index_start : 16;
unsigned param_index_end : 16;
ABIKind kind : 6;
struct
{
bool by_reg : 1;
bool zeroext : 1;
bool signext : 1;
} attributes;
union
{
struct
{
bool padding_by_reg : 1;
Type *padding_type;
} expand;
struct
{
AbiType *lo;
AbiType *hi;
} direct_pair;
struct
{
unsigned char offset_lo;
unsigned char padding_hi;
unsigned char lo_index;
unsigned char hi_index;
unsigned char offset_hi;
bool packed : 1;
AbiType *lo;
AbiType *hi;
} coerce_expand;
struct
{
AbiType *partial_type;
};
struct
{
AbiType *type;
unsigned elements : 3;
bool prevent_flatten : 1;
} direct_coerce;
struct
{
// We may request a certain alignment of the parameters.
unsigned realignment : 16;
bool by_val : 1;
} indirect;
};
} ABIArgInfo;
typedef struct
@@ -171,14 +95,6 @@ typedef struct
bool current_block_is_target : 1;
bool did_call_stack_save : 1;
LLVMTypeRef type_data_definitions[TYPE_KINDS];
struct
{
unsigned int_registers;
unsigned sse_registers;
unsigned simd_registers;
int args;
CallConvention call_convention;
} abi;
} GenContext;
// LLVM Intrinsics
@@ -331,7 +247,6 @@ void llvm_debug_scope_pop(GenContext *context);
void llvm_debug_push_lexical_scope(GenContext *context, SourceSpan location);
LLVMMetadataRef llvm_debug_current_scope(GenContext *context);
void c_abi_func_create(GenContext *context, FunctionSignature *signature);
bool llvm_emit_check_block_branch(GenContext *context);

View File

@@ -949,6 +949,7 @@ void gencontext_emit_catch_stmt(GenContext *c, Ast *ast)
// Emit the catch, which will create jumps like we want them.
BEValue value;
llvm_emit_expr(c, &value, catch_expr);
llvm_value_fold_failable(c, &value);
// Restore.
POP_ERROR();

View File

@@ -257,8 +257,6 @@ LLVMTypeRef llvm_func_type(GenContext *context, Type *type)
LLVMTypeRef *params = NULL;
FunctionSignature *signature = type->func.signature;
c_abi_func_create(context, signature);
LLVMTypeRef return_type = NULL;
Type *real_return_type = signature->failable ? type_error : signature->rtype->type->canonical;

View File

@@ -254,6 +254,10 @@ static bool sema_analyse_struct_union(Context *context, Decl *decl)
had = decl->is_packed;
decl->is_packed = true;
break;
case ATTRIBUTE_OPAQUE:
had = decl->is_opaque;
decl->is_opaque = true;
break;
default:
UNREACHABLE
}
@@ -379,6 +383,7 @@ static inline Type *sema_analyse_function_signature(Context *context, FunctionSi
TokenType type = TOKEN_INVALID_TOKEN;
signature->mangled_signature = symtab_add(buffer, buffer_write_offset, fnv1a(buffer, buffer_write_offset), &type);
Type *func_type = stable_get(&context->local_symbols, signature->mangled_signature);
c_abi_func_create(signature);
if (!func_type)
{
func_type = type_new(TYPE_FUNC, signature->mangled_signature);
@@ -564,6 +569,7 @@ static AttributeType sema_analyse_attribute(Context *context, Attr *attr, Attrib
[ATTRIBUTE_NORETURN] = ATTR_FUNC,
[ATTRIBUTE_ALIGN] = ATTR_FUNC | ATTR_CONST | ATTR_VAR | ATTR_STRUCT | ATTR_UNION,
[ATTRIBUTE_INLINE] = ATTR_FUNC,
[ATTRIBUTE_NOINLINE] = ATTR_FUNC,
[ATTRIBUTE_OPAQUE] = ATTR_STRUCT | ATTR_UNION,
[ATTRIBUTE_STDCALL] = ATTR_FUNC
};

View File

@@ -26,3 +26,5 @@ static inline void context_push_scope(Context *context)
#define POP_NEXT() POP_X(next); context->next_switch = _old_next_switch
#define PUSH_BREAKCONT(ast) PUSH_CONTINUE(ast); PUSH_BREAK(ast)
#define POP_BREAKCONT() POP_CONTINUE(); POP_BREAK()
void c_abi_func_create(FunctionSignature *signature);

View File

@@ -612,6 +612,7 @@ unsigned int type_abi_alignment(Type *type)
case TYPE_UNION:
return type->decl->alignment;
case TYPE_TYPEID:
return type_abi_alignment(type_usize);
case TYPE_BOOL:
case ALL_INTS:
case ALL_FLOATS:

View File

@@ -118,7 +118,8 @@ const char* find_lib_dir(void)
return lib_path;
}
error_exit("Could not find the standard library /lib/std/");
DEBUG_LOG("Could not find the standard library /lib/std/");
return NULL;
}
void path_get_dir_and_filename_from_full(const char *full_path, char **filename, char **dir_path)