Add macro arguments &foo $foo #foo.

C ABI compatibility aarch64, win64, x86, x64
Added debug info
This commit is contained in:
Christoffer Lerno
2020-10-19 20:58:40 +02:00
committed by Christoffer Lerno
parent 4222f2731e
commit 3c15e495dd
52 changed files with 7936 additions and 2113 deletions

View File

@@ -57,6 +57,13 @@ static void usage(void)
OUTPUT(" -Os - Optimize for size.");
OUTPUT(" -O3 - Aggressive optimization.");
OUTPUT(" --emit-llvm - Emit LLVM IR as a .ll file per module.");
OUTPUT("");
OUTPUT(" -freg-struct-return - Override default ABI to return small structs in registers.");
OUTPUT(" -fpcc-struct-return - Override default ABI to return small structs on the stack.");
OUTPUT(" -fno-memcpy-pass - Prevents compiler from doing a mem copy pass (for debug).");
OUTPUT("");
OUTPUT(" -msoft-float - Use software floating point.");
OUTPUT(" -mno-soft-float - Prevent use of software floating point.");
}
@@ -204,6 +211,37 @@ static void parse_option()
{
case 'h':
break;
case 'f':
if (match_shortopt("freg-struct-return"))
{
build_options.feature.reg_struct_return = true;
return;
}
if (match_shortopt("fpcc-struct-return"))
{
build_options.feature.stack_struct_return = true;
return;
}
if (match_shortopt("fno-memcpy-pass"))
{
build_options.feature.no_memcpy_pass = true;
return;
}
FAIL_WITH_ERR("Unknown argument -%s.", &current_arg[1]);
case 'm':
if (match_shortopt("msoft-float"))
{
build_options.feature.soft_float = true;
build_options.feature.no_soft_float = false;
return;
}
if (match_shortopt("mno-soft-float"))
{
build_options.feature.soft_float = true;
build_options.feature.no_soft_float = false;
return;
}
FAIL_WITH_ERR("Cannot process the unknown command \"%s\".", current_arg);
case 'O':
if (build_options.optimization_level != OPTIMIZATION_NOT_SET)
{
@@ -323,7 +361,7 @@ void parse_arguments(int argc, const char *argv[])
build_options.emit_bitcode = true;
build_options.optimization_level = OPTIMIZATION_NOT_SET;
build_options.size_optimization_level = SIZE_OPTIMIZATION_NOT_SET;
build_options.debug_info = DEBUG_INFO_NONE;
build_options.debug_info = DEBUG_INFO_FULL;
build_options.debug_mode = false;
build_options.command = COMMAND_MISSING;
build_options.symtab_size = DEFAULT_SYMTAB_SIZE;

View File

@@ -103,6 +103,15 @@ typedef struct
const char* path;
const char* cpu;
const char* target_triple;
struct
{
bool reg_struct_return : 1;
bool stack_struct_return : 1;
bool no_memcpy_pass : 1;
bool soft_float : 1;
bool no_soft_float : 1;
} feature;
unsigned version;
CompilerCommand command;
uint32_t symtab_size;
CompileOption compile_option;

View File

@@ -46,6 +46,11 @@ static TypeInfo poison_type_info = { .kind = TYPE_INFO_POISON };
Type *poisoned_type = &poison_type;
TypeInfo *poisoned_type_info = &poison_type_info;
unsigned decl_abi_alignment(Decl *decl)
{
return decl->alignment ?: type_abi_alignment(decl->type);
}
void decl_set_external_name(Decl *decl)
{
if (decl->visibility == VISIBLE_EXTERN)
@@ -322,6 +327,12 @@ void fprint_type_recursive(Context *context, FILE *file, Type *type, int indent)
switch (type->type_kind)
{
case TYPE_COMPLEX:
DUMP("(type complex");
return;
case TYPE_VECTOR:
DUMP("(type vector");
return;
case TYPE_TYPEINFO:
DUMP("(type typeinfo)");
return;
@@ -371,16 +382,9 @@ void fprint_type_recursive(Context *context, FILE *file, Type *type, int indent)
DUMPEND();
case TYPE_VOID:
case TYPE_BOOL:
case TYPE_I8:
case TYPE_I16:
case TYPE_I32:
case TYPE_I64:
case TYPE_U8:
case TYPE_U16:
case TYPE_U32:
case TYPE_U64:
case TYPE_F32:
case TYPE_F64:
case ALL_SIGNED_INTS:
case ALL_UNSIGNED_INTS:
case ALL_REAL_FLOATS:
DUMPF("(%s)", type->name);
return;
case TYPE_IXX:
@@ -545,6 +549,10 @@ void fprint_expr_recursive(Context *context, FILE *file, Expr *expr, int indent)
DUMPF("(ctident %s", expr->ct_ident_expr.identifier);
DUMPEXPC(expr);
DUMPEND();
case EXPR_HASH_IDENT:
DUMPF("(hashident %s", expr->hash_ident_expr.identifier);
DUMPEXPC(expr);
DUMPEND();
case EXPR_MACRO_CT_IDENTIFIER:
DUMPF("(macroctident @%s", expr->ct_ident_expr.identifier);
DUMPEXPC(expr);

View File

@@ -21,22 +21,17 @@ typedef struct
unsigned index;
} TokenId;
#define INVALID_LOC UINT32_MAX
#define NO_TOKEN_ID ((TokenId) { 0 })
#define NO_TOKEN ((Token) { .type = TOKEN_INVALID_TOKEN })
#define INVALID_TOKEN_ID ((TokenId) { UINT32_MAX })
#define INVALID_RANGE ((SourceSpan){ INVALID_TOKEN_ID, INVALID_TOKEN_ID })
#define MAX_LOCALS 0xFFFF
#define MAX_FLAGS 0xFFFF
#define MAX_SCOPE_DEPTH 0xFF
#define MAX_PATH 1024
#define MAX_DEFERS 0xFFFF
#define MAX_MACRO_NESTING 1024
#define MAX_FUNCTION_SIGNATURE_SIZE 2048
#define MAX_PARAMS 512
#define MAX_ERRORS 0xFFFF
#define MAX_ALIGNMENT (1U << 29U)
#define LEXER_VM_SIZE_MB 2048
typedef struct _Ast Ast;
typedef struct _Decl Decl;
@@ -71,6 +66,11 @@ typedef struct
int len;
} string;
Decl *enum_constant;
struct
{
long double i;
long double r;
} complex;
};
// Valid type kinds:
// bool, ints, floats, string
@@ -198,6 +198,12 @@ typedef struct
size_t len;
} TypeArray;
typedef struct
{
Type *base;
size_t len;
} TypeVector;
typedef struct
{
struct _FunctionSignature *signature;
@@ -224,6 +230,9 @@ struct _Type
TypeFunc func;
// Type*
Type *pointer;
// Type[<123>] or Type<[123]>
TypeVector vector;
Type *complex;
};
};
@@ -266,7 +275,6 @@ typedef struct
typedef struct
{
uint32_t abi_alignment;
uint64_t size;
Decl **members;
} StructDecl;
@@ -289,7 +297,11 @@ typedef struct _VarDecl
void *backend_debug_ref;
void *scope;
};
void *failable_ref;
union
{
void *failable_ref;
struct ABIArgInfo_ *abi_info;
};
} VarDecl;
@@ -332,9 +344,10 @@ typedef struct _FunctionSignature
CallConvention convention : 4;
bool variadic : 1;
bool has_default : 1;
bool return_param : 1;
bool failable : 1;
TypeInfo *rtype;
struct ABIArgInfo_ *ret_abi_info;
struct ABIArgInfo_ *failable_abi_info;
Decl** params;
const char *mangled_signature;
} FunctionSignature;
@@ -434,6 +447,7 @@ typedef struct _Decl
const char *cname;
uint32_t alignment;
const char *section;
size_t offset;
/* bool is_exported : 1;
bool is_used : 1;
bool is_used_public : 1;
@@ -458,6 +472,7 @@ typedef struct _Decl
};
union
{
// Unions, Errtype and Struct use strukt
StructDecl strukt;
EnumDecl enums;
};
@@ -591,14 +606,6 @@ typedef struct
Decl *decl;
} ExprIdentifierRaw;
typedef struct
{
const char *identifier;
bool is_macro;
Decl *decl;
} ExprCtIdentifier;
typedef struct
{
CastKind kind;
@@ -684,6 +691,7 @@ struct _Expr
bool failable : 1;
bool pure : 1;
bool constant : 1;
bool reeval : 1;
SourceSpan span;
Type *type;
union {
@@ -712,6 +720,7 @@ struct _Expr
ExprIdentifier macro_identifier_expr;
ExprIdentifierRaw ct_ident_expr;
ExprIdentifierRaw ct_macro_ident_expr;
ExprIdentifierRaw hash_ident_expr;
TypeInfo *typeid_expr;
ExprInitializer expr_initializer;
Decl *expr_enum;
@@ -1011,7 +1020,6 @@ typedef struct _Module
Ast **files; // Asts
Decl** functions;
STable struct_functions;
STable symbols;
STable public_symbols;
Module **sub_modules;
@@ -1077,6 +1085,7 @@ typedef struct _Context
Decl **types;
Decl **generic_defines;
Decl **functions;
Decl **macros;
Decl **methods;
Decl **vars;
Decl **incr_array;
@@ -1110,8 +1119,6 @@ typedef struct _Context
int in_volatile_section;
struct
{
bool in_macro_call : 1;
bool in_macro : 1;
Decl **macro_locals_start;
int macro_counter;
int macro_nesting;
@@ -1157,9 +1164,10 @@ extern Diagnostics diagnostics;
extern Type *type_bool, *type_void, *type_string, *type_voidptr;
extern Type *type_float, *type_double;
extern Type *type_half, *type_float, *type_double, *type_quad;
extern Type *type_char, *type_short, *type_int, *type_long, *type_isize;
extern Type *type_byte, *type_ushort, *type_uint, *type_ulong, *type_usize;
extern Type *type_u128, *type_i128;
extern Type *type_compint, *type_compfloat;
extern Type *type_c_short, *type_c_int, *type_c_long, *type_c_longlong;
extern Type *type_c_ushort, *type_c_uint, *type_c_ulong, *type_c_ulonglong;
@@ -1170,6 +1178,7 @@ extern const char *attribute_list[NUMBER_OF_ATTRIBUTES];
extern const char *kw_main;
extern const char *kw_sizeof;
extern const char *kw_alignof;
extern const char *kw_align;
extern const char *kw_offsetof;
extern const char *kw_kindof;
extern const char *kw_nameof;
@@ -1230,26 +1239,6 @@ static inline bool builtin_may_negate(Type *canonical)
}
}
static inline bool builtin_may_bit_negate(Type *canonical)
{
assert(canonical->canonical == canonical);
switch (canonical->type_kind)
{
case TYPE_BOOL:
case TYPE_I8:
case TYPE_I16:
case TYPE_I32:
case TYPE_I64:
case TYPE_IXX:
case TYPE_U8:
case TYPE_U16:
case TYPE_U32:
case TYPE_U64:
return true;
default:
return false;
}
}
bool cast_implicit(Context *context, Expr *expr, Type *to_type);
@@ -1262,12 +1251,6 @@ void llvm_codegen(Context *context);
void llvm_codegen_setup();
bool sema_expr_analyse_assign_right_side(Context *context, Expr *expr, Type *left_type, Expr *right, ExprFailableStatus lhs_is_failable);
bool sema_analyse_expr_of_required_type(Context *context, Type *to, Expr *expr, bool may_be_failable);
bool sema_analyse_expr(Context *context, Type *to, Expr *expr);
bool sema_analyse_decl(Context *context, Decl *decl);
bool expr_is_constant_eval(Expr *expr);
void compiler_add_type(Type *type);
Decl *compiler_find_symbol(const char *name);
Module *compiler_find_or_create_module(Path *module_name);
@@ -1291,27 +1274,12 @@ Decl *decl_new_var(TokenId name, TypeInfo *type, VarDeclKind kind, Visibility vi
#define DECL_NEW_VAR(_type, _kind, _vis) decl_new_var(context->tok.id, _type, _kind, _vis)
void decl_set_external_name(Decl *decl);
const char *decl_var_to_string(VarDeclKind kind);
static inline Decl *decl_raw(Decl *decl)
{
if (decl->decl_kind != DECL_VAR || decl->var.kind != VARDECL_ALIAS) return decl;
decl = decl->var.alias;
assert(decl->decl_kind != DECL_VAR || decl->var.kind != VARDECL_ALIAS);
return decl;
}
static inline Decl *decl_raw(Decl *decl);
static inline bool decl_ok(Decl *decl) { return !decl || decl->decl_kind != DECL_POISONED; }
static inline bool decl_poison(Decl *decl) { decl->decl_kind = DECL_POISONED; decl->resolve_status = RESOLVE_DONE; return false; }
static inline bool decl_is_struct_type(Decl *decl)
{
DeclKind kind = decl->decl_kind;
return (kind == DECL_UNION) | (kind == DECL_STRUCT) | (kind == DECL_ERR);
}
static inline DeclKind decl_from_token(TokenType type)
{
if (type == TOKEN_STRUCT) return DECL_STRUCT;
if (type == TOKEN_UNION) return DECL_UNION;
UNREACHABLE
}
static inline bool decl_is_struct_type(Decl *decl);
unsigned decl_abi_alignment(Decl *decl);
static inline DeclKind decl_from_token(TokenType type);
#pragma mark --- Diag functions
@@ -1337,6 +1305,7 @@ void expr_const_set_null(ExprConst *expr);
void expr_const_fprint(FILE *__restrict file, ExprConst *expr);
bool expr_const_int_overflowed(const ExprConst *expr);
bool expr_const_compare(const ExprConst *left, const ExprConst *right, BinaryOp op);
bool expr_is_constant_eval(Expr *expr);
const char *expr_const_to_error_string(const ExprConst *expr);
void fprint_decl(Context *context, FILE *file, Decl *dec);
@@ -1401,8 +1370,13 @@ bool sema_add_local(Context *context, Decl *decl);
bool sema_unwrap_var(Context *context, Decl *decl);
bool sema_rewrap_var(Context *context, Decl *decl);
bool sema_analyse_expr_of_required_type(Context *context, Type *to, Expr *expr, bool may_be_failable);
bool sema_analyse_expr(Context *context, Type *to, Expr *expr);
bool sema_analyse_decl(Context *context, Decl *decl);
bool sema_analyse_ct_assert_stmt(Context *context, Ast *statement);
bool sema_analyse_statement(Context *context, Ast *statement);
bool sema_expr_analyse_assign_right_side(Context *context, Expr *expr, Type *left_type, Expr *right, ExprFailableStatus lhs_is_failable);
Decl *sema_resolve_symbol_in_current_dynamic_scope(Context *context, const char *symbol);
Decl *sema_resolve_symbol(Context *context, const char *symbol, Path *path, Decl **ambiguous_other_decl, Decl **private_decl);
bool sema_resolve_type_info(Context *context, TypeInfo *type_info);
@@ -1437,7 +1411,7 @@ void stable_clear(STable *table);
const char *symtab_add(const char *symbol, uint32_t len, uint32_t fnv1hash, TokenType *type);
void target_setup();
void target_setup(void);
int target_alloca_addr_space();
void *target_data_layout();
void *target_machine();
@@ -1450,72 +1424,67 @@ bool token_is_any_type(TokenType type);
bool token_is_symbol(TokenType type);
const char *token_type_to_string(TokenType type);
unsigned type_abi_alignment(Type *type);
unsigned type_alloca_alignment(Type *type);
void type_append_signature_name(Type *type, char *dst, size_t *offset);
static inline bool type_convert_will_trunc(Type *destination, Type *source);
Type *type_find_common_ancestor(Type *left, Type *right);
Type *type_find_largest_union_element(Type *type);
Type *type_find_max_type(Type *type, Type *other);
Type *type_find_single_struct_element(Type *type);
const char *type_generate_qname(Type *type);
Type *type_get_array(Type *arr_type, uint64_t len);
Type *type_get_indexed_type(Type *type);
Type *type_get_ptr(Type *ptr_type);
Type *type_get_subarray(Type *arr_type);
Type *type_get_vararray(Type *arr_type);
Type *type_get_meta(Type *meta_type);
Type *type_get_indexed_type(Type *type);
Type *type_get_array(Type *arr_type, uint64_t len);
bool type_is_user_defined(Type *type);
Type *type_signed_int_by_bitsize(unsigned bytesize);
Type *type_unsigned_int_by_bitsize(unsigned bytesize);
Type *type_get_vector(Type *vector_type, unsigned len);
Type *type_int_signed_by_bitsize(unsigned bytesize);
Type *type_int_unsigned_by_bitsize(unsigned bytesize);
bool type_is_abi_aggregate(Type *type);
static inline bool type_is_any_integer(Type *type);
static inline bool type_is_builtin(TypeKind kind);
static inline bool type_is_ct(Type *type);
bool type_is_empty_union_struct(Type *type, bool allow_array);
bool type_is_empty_field(Type *type, bool allow_array);
static inline bool type_is_float(Type *type);
bool type_is_homogenous_aggregate(Type *type, Type **base, unsigned *elements);
bool type_is_int128(Type *type);
static inline bool type_is_integer(Type *type);
static inline bool type_is_integer_unsigned(Type *type);
static inline bool type_is_integer_signed(Type *type);
static inline bool type_is_integer_kind(Type *type);
static inline bool type_is_numeric(Type *type);
static inline bool type_is_pointer(Type *type);
static inline bool type_is_promotable_integer(Type *type);
static inline bool type_is_signed(Type *type);
static inline bool type_is_structlike(Type *type);
static inline bool type_is_promotable_integer(Type *type);
bool type_is_subtype(Type *type, Type *possible_subtype);
Type *type_find_common_ancestor(Type *left, Type *right);
const char *type_to_error_string(Type *type);
size_t type_size(Type *type);
unsigned int type_abi_alignment(Type *type);
const char *type_generate_qname(Type *type);
void type_append_signature_name(Type *type, char *dst, size_t *offset);
Type *type_find_max_type(Type *type, Type *other);
static inline bool type_is_builtin(TypeKind kind) { return kind >= TYPE_VOID && kind <= TYPE_TYPEID; }
static inline bool type_kind_is_signed(TypeKind kind) { return kind >= TYPE_I8 && kind <= TYPE_I64; }
static inline bool type_kind_is_unsigned(TypeKind kind) { return kind >= TYPE_U8 && kind <= TYPE_U64; }
static inline bool type_kind_is_any_integer(TypeKind kind) { return kind >= TYPE_I8 && kind <= TYPE_IXX; }
static inline bool type_is_signed(Type *type) { return type->type_kind >= TYPE_I8 && type->type_kind <= TYPE_I64; }
static inline bool type_is_unsigned(Type *type) { return type->type_kind >= TYPE_U8 && type->type_kind <= TYPE_U64; }
static inline bool type_ok(Type *type) { return !type || type->type_kind != TYPE_POISONED; }
static inline bool type_info_ok(TypeInfo *type_info) { return !type_info || type_info->kind != TYPE_INFO_POISON; }
bool type_is_union_struct(Type *type);
bool type_is_user_defined(Type *type);
static inline Type *type_lowering(Type *type);
bool type_may_have_sub_elements(Type *type);
static inline bool type_kind_is_derived(TypeKind kind)
{
switch (kind)
{
case TYPE_ARRAY:
case TYPE_POINTER:
case TYPE_VARARRAY:
case TYPE_SUBARRAY:
return true;
default:
return false;
}
}
static inline bool type_ok(Type *type);
static inline Type *type_reduced_from_expr(Expr *expr);
size_t type_size(Type *type);
const char *type_to_error_string(Type *type);
static inline Type *type_reduced(Type *type)
{
Type *canonical = type->canonical;
if (canonical->type_kind == TYPE_ENUM) return canonical->decl->enums.type_info->type->canonical;
return canonical;
}
static inline TypeInfo *type_info_new(TypeInfoKind kind, SourceSpan span);
static inline TypeInfo *type_info_new_base(Type *type, SourceSpan span);
static inline bool type_info_ok(TypeInfo *type_info);
static inline bool type_info_poison(TypeInfo *type);
static inline bool type_is_structlike(Type *type)
{
assert(type->canonical == type);
switch (type->type_kind)
{
case TYPE_UNION:
case TYPE_STRUCT:
case TYPE_ERRTYPE:
return true;
default:
return false;
static inline bool type_kind_is_signed(TypeKind kind);
static inline bool type_kind_is_unsigned(TypeKind kind);
static inline bool type_kind_is_any_integer(TypeKind kind);
static inline bool type_kind_is_derived(TypeKind kind);
}
}
// ---- static inline function implementations.
static inline Type *type_reduced_from_expr(Expr *expr)
{
return type_reduced(expr->type);
return type_lowering(expr->type);
}
@@ -1525,19 +1494,26 @@ static inline bool type_is_integer(Type *type)
return type->type_kind >= TYPE_I8 && type->type_kind <= TYPE_U64;
}
static inline bool type_is_any_integer(Type *type)
{
assert(type == type->canonical);
return type->type_kind >= TYPE_I8 && type->type_kind <= TYPE_IXX;
}
static inline bool type_is_signed_integer(Type *type)
static inline bool type_is_integer_signed(Type *type)
{
assert(type == type->canonical);
return type->type_kind >= TYPE_I8 && type->type_kind <= TYPE_I64;
}
static inline bool type_is_unsigned_integer(Type *type)
static inline bool type_is_integer_kind(Type *type)
{
assert(type == type->canonical);
return type->type_kind >= TYPE_BOOL && type->type_kind <= TYPE_U64;
}
static inline bool type_is_integer_unsigned(Type *type)
{
assert(type == type->canonical);
return type->type_kind >= TYPE_U8 && type->type_kind <= TYPE_U64;
@@ -1552,22 +1528,30 @@ static inline bool type_info_poison(TypeInfo *type)
static inline bool type_is_ct(Type *type)
{
while (1)
RETRY:
switch (type->type_kind)
{
switch (type->type_kind)
{
case TYPE_FXX:
case TYPE_IXX:
return true;
case TYPE_TYPEDEF:
type = type->canonical;
break;
default:
return false;
}
case TYPE_FXX:
case TYPE_IXX:
return true;
case TYPE_TYPEDEF:
type = type->canonical;
goto RETRY;
default:
return false;
}
}
static inline bool type_is_pointer(Type *type)
{
return type->type_kind == TYPE_POINTER || type->type_kind == TYPE_VARARRAY;
}
static inline size_t aligned_offset(size_t offset, size_t alignment)
{
return ((offset + alignment - 1) / alignment) * alignment;
}
static inline bool type_is_float(Type *type)
{
assert(type == type->canonical);
@@ -1618,11 +1602,6 @@ static inline bool type_is_numeric(Type *type)
return type->type_kind >= TYPE_I8 && type->type_kind <= TYPE_FXX;
}
#define TYPE_MODULE_UNRESOLVED(_module, _name) ({ Type *__type = type_new(TYPE_USER_DEFINED); \
__type->name_loc = _name; __type->unresolved.module = _module; __type; })
#define TYPE_UNRESOLVED(_name) ({ TypeInfo *__type = type_new(TYPE_USER_DEFINED); __type->name_loc = _name; __type; })
UnaryOp unaryop_from_token(TokenType type);
TokenType unaryop_to_token(UnaryOp type);
PostUnaryOp post_unaryop_from_token(TokenType type);
@@ -1647,6 +1626,80 @@ static inline void advance_and_verify(Context *context, TokenType token_type)
advance(context);
}
static inline bool type_is_builtin(TypeKind kind) { return kind >= TYPE_VOID && kind <= TYPE_TYPEID; }
static inline bool type_kind_is_signed(TypeKind kind) { return kind >= TYPE_I8 && kind <= TYPE_I64; }
static inline bool type_kind_is_unsigned(TypeKind kind) { return kind >= TYPE_U8 && kind <= TYPE_U64; }
static inline bool type_kind_is_any_integer(TypeKind kind) { return kind >= TYPE_I8 && kind <= TYPE_IXX; }
static inline bool type_is_signed(Type *type) { return type->type_kind >= TYPE_I8 && type->type_kind <= TYPE_I64; }
static inline bool type_is_unsigned(Type *type) { return type->type_kind >= TYPE_U8 && type->type_kind <= TYPE_U64; }
static inline bool type_ok(Type *type) { return !type || type->type_kind != TYPE_POISONED; }
static inline bool type_info_ok(TypeInfo *type_info) { return !type_info || type_info->kind != TYPE_INFO_POISON; }
static inline bool type_kind_is_derived(TypeKind kind)
{
switch (kind)
{
case TYPE_ARRAY:
case TYPE_POINTER:
case TYPE_VARARRAY:
case TYPE_SUBARRAY:
return true;
default:
return false;
}
}
static inline bool type_is_structlike(Type *type)
{
assert(type->canonical == type);
switch (type->type_kind)
{
case TYPE_UNION:
case TYPE_STRUCT:
case TYPE_ERRTYPE:
return true;
default:
return false;
}
}
static inline Type *type_lowering(Type *type)
{
Type *canonical = type->canonical;
if (canonical->type_kind == TYPE_ENUM) return canonical->decl->enums.type_info->type->canonical;
if (canonical->type_kind == TYPE_TYPEID) return type_usize->canonical;
return canonical;
}
static inline Decl *decl_raw(Decl *decl)
{
if (decl->decl_kind != DECL_VAR || decl->var.kind != VARDECL_ALIAS) return decl;
decl = decl->var.alias;
assert(decl->decl_kind != DECL_VAR || decl->var.kind != VARDECL_ALIAS);
return decl;
}
static inline bool decl_is_struct_type(Decl *decl)
{
DeclKind kind = decl->decl_kind;
return (kind == DECL_UNION) | (kind == DECL_STRUCT) | (kind == DECL_ERR);
}
static inline DeclKind decl_from_token(TokenType type)
{
if (type == TOKEN_STRUCT) return DECL_STRUCT;
if (type == TOKEN_UNION) return DECL_UNION;
UNREACHABLE
}
static inline bool type_is_promotable_integer(Type *type)
{
// If we support other architectures, update this.
return type_is_integer_kind(type) && type->builtin.bytesize < type_c_int->builtin.bytesize;
}
#define TRY_AST_OR(_ast_stmt, _res) ({ Ast* _ast = (_ast_stmt); if (!ast_ok(_ast)) return _res; _ast; })
#define TRY_EXPR_OR(_expr_stmt, _res) ({ Expr* _expr = (_expr_stmt); if (!expr_ok(_expr)) return _res; _expr; })
#define TRY_TYPE_OR(_type_stmt, _res) ({ TypeInfo* _type = (_type_stmt); if (!type_info_ok(_type)) return _res; _type; })

View File

@@ -94,9 +94,12 @@ void context_register_global_decl(Context *context, Decl *decl)
switch (decl->decl_kind)
{
case DECL_POISONED:
case DECL_MACRO:
case DECL_GENERIC:
break;
case DECL_MACRO:
vec_add(context->macros, decl);
decl_set_external_name(decl);
break;
case DECL_FUNC:
if (decl->func.type_parent)
{

View File

@@ -107,6 +107,7 @@ typedef enum
CAST_BOOLFP,
CAST_FPBOOL,
CAST_INTBOOL,
CAST_CXBOOL,
CAST_FPFP,
CAST_FPSI,
CAST_FPUI,
@@ -187,6 +188,7 @@ typedef enum
EXPR_IDENTIFIER,
EXPR_MACRO_IDENTIFIER,
EXPR_CT_IDENT,
EXPR_HASH_IDENT,
EXPR_CONST_IDENTIFIER,
EXPR_MACRO_CT_IDENTIFIER,
EXPR_CALL,
@@ -475,13 +477,17 @@ typedef enum
TYPE_I16,
TYPE_I32,
TYPE_I64,
TYPE_I128,
TYPE_U8,
TYPE_U16,
TYPE_U32,
TYPE_U64,
TYPE_U128,
TYPE_IXX,
TYPE_F16,
TYPE_F32,
TYPE_F64,
TYPE_F128,
TYPE_FXX,
TYPE_TYPEID,
TYPE_POINTER,
@@ -498,14 +504,17 @@ typedef enum
TYPE_SUBARRAY,
TYPE_TYPEINFO,
TYPE_MEMBER,
TYPE_VECTOR,
TYPE_COMPLEX,
TYPE_LAST = TYPE_MEMBER
} TypeKind;
#define ALL_INTS TYPE_I8: case TYPE_I16: case TYPE_I32: case TYPE_I64: \
case TYPE_U8: case TYPE_U16: case TYPE_U32: case TYPE_U64: case TYPE_IXX
#define ALL_SIGNED_INTS TYPE_I8: case TYPE_I16: case TYPE_I32: case TYPE_I64
#define ALL_UNSIGNED_INTS TYPE_U8: case TYPE_U16: case TYPE_U32: case TYPE_U64
#define ALL_FLOATS TYPE_F32: case TYPE_F64: case TYPE_FXX
#define ALL_INTS TYPE_I8: case TYPE_I16: case TYPE_I32: case TYPE_I64: case TYPE_I128: \
case TYPE_U8: case TYPE_U16: case TYPE_U32: case TYPE_U64: case TYPE_U128: case TYPE_IXX
#define ALL_SIGNED_INTS TYPE_I8: case TYPE_I16: case TYPE_I32: case TYPE_I64: case TYPE_I128
#define ALL_UNSIGNED_INTS TYPE_U8: case TYPE_U16: case TYPE_U32: case TYPE_U64: case TYPE_U128
#define ALL_REAL_FLOATS TYPE_F16: case TYPE_F32: case TYPE_F64: case TYPE_F128
#define ALL_FLOATS ALL_REAL_FLOATS: case TYPE_FXX
#define TYPE_KINDS (TYPE_LAST + 1)
@@ -586,6 +595,11 @@ typedef enum
typedef enum
{
CALL_CONVENTION_NORMAL = 0,
CALL_CONVENTION_VECTOR,
CALL_CONVENTION_SYSCALL,
CALL_CONVENTION_REGCALL,
CALL_CONVENTION_STD,
CALL_CONVENTION_FAST,
} CallConvention;
typedef enum

View File

@@ -5,6 +5,7 @@
#include "llvm_codegen_internal.h"
static int get_inlining_threshold(void);
static void diagnostics_handler(LLVMDiagnosticInfoRef ref, void *context)
{
char *message = LLVMGetDiagInfoDescription(ref);
@@ -33,6 +34,8 @@ static void gencontext_init(GenContext *context, Context *ast_context)
{
memset(context, 0, sizeof(GenContext));
context->context = LLVMContextCreate();
context->bool_type = LLVMInt1TypeInContext(context->context);
context->byte_type = LLVMInt8TypeInContext(context->context);
LLVMContextSetDiagnosticHandler(context->context, &diagnostics_handler, context);
context->ast_context = ast_context;
}
@@ -42,19 +45,19 @@ static void gencontext_destroy(GenContext *context)
LLVMContextDispose(context->context);
}
LLVMValueRef gencontext_emit_memclear_size_align(GenContext *context, LLVMValueRef ref, uint64_t size, unsigned int align, bool bitcast)
LLVMValueRef llvm_emit_memclear_size_align(GenContext *c, LLVMValueRef ref, uint64_t size, unsigned int align, bool bitcast)
{
LLVMValueRef target = bitcast ? LLVMBuildBitCast(context->builder, ref, llvm_type(type_get_ptr(type_byte)), "") : ref;
return LLVMBuildMemSet(context->builder, target, LLVMConstInt(llvm_type(type_byte), 0, false),
LLVMConstInt(llvm_type(type_ulong), size, false), align);
LLVMValueRef target = bitcast ? LLVMBuildBitCast(c->builder, ref, llvm_get_type(c, type_get_ptr(type_byte)), "") : ref;
return LLVMBuildMemSet(c->builder, target, LLVMConstInt(llvm_get_type(c, type_byte), 0, false),
LLVMConstInt(llvm_get_type(c, type_ulong), size, false), align);
}
LLVMValueRef gencontext_emit_memclear(GenContext *context, LLVMValueRef ref, Type *type)
LLVMValueRef llvm_emit_memclear(GenContext *c, LLVMValueRef ref, Type *type)
{
// TODO avoid bitcast on those that do not need them.
return gencontext_emit_memclear_size_align(context, ref, type_size(type),
type_abi_alignment(type), true);
return llvm_emit_memclear_size_align(c, ref, type_size(type),
type_abi_alignment(type), true);
}
@@ -66,15 +69,17 @@ static void gencontext_emit_global_variable_definition(GenContext *context, Decl
if (!decl->type) return;
// TODO fix name
decl->backend_ref = LLVMAddGlobal(context->module, llvm_type(decl->type), decl->name);
decl->backend_ref = LLVMAddGlobal(context->module, llvm_get_type(context, decl->type), decl->name);
if (decl->var.init_expr)
{
LLVMSetInitializer(decl->backend_ref, gencontext_emit_expr(context, decl->var.init_expr));
BEValue value;
llvm_emit_expr(context, &value, decl->var.init_expr);
LLVMSetInitializer(decl->backend_ref, bevalue_store_value(context, &value));
}
else
{
LLVMSetInitializer(decl->backend_ref, LLVMConstNull(llvm_type(decl->type)));
LLVMSetInitializer(decl->backend_ref, LLVMConstNull(llvm_get_type(context, decl->type)));
}
LLVMSetGlobalConstant(decl->backend_ref, decl->var.kind == VARDECL_CONST);
@@ -95,22 +100,9 @@ static void gencontext_emit_global_variable_definition(GenContext *context, Decl
int alignment = 64; // TODO
// Should we set linkage here?
if (context->debug.builder && false)
if (llvm_use_debug(context))
{
decl->var.backend_debug_ref = LLVMDIBuilderCreateGlobalVariableExpression(context->debug.builder,
NULL /*scope*/,
decl->name,
1, /*source_range_len(decl->name_span),*/
"linkagename",
2,
context->debug.file,
12 /* lineno */,
llvm_debug_type(decl->type),
decl->visibility ==
VISIBLE_LOCAL, /* expr */
NULL, /** declaration **/
NULL,
alignment);
llvm_emit_debug_global_var(context, decl);
}
}
static void gencontext_verify_ir(GenContext *context)
@@ -154,15 +146,30 @@ void gencontext_print_llvm_ir(GenContext *context)
}
LLVMValueRef gencontext_emit_alloca(GenContext *context, LLVMTypeRef type, const char *name)
LLVMValueRef llvm_emit_alloca(GenContext *context, LLVMTypeRef type, unsigned alignment, const char *name)
{
LLVMBasicBlockRef current_block = LLVMGetInsertBlock(context->builder);
LLVMPositionBuilderBefore(context->builder, context->alloca_point);
LLVMValueRef alloca = LLVMBuildAlloca(context->builder, type, name);
LLVMSetAlignment(alloca, alignment ?: llvm_abi_alignment(type));
LLVMPositionBuilderAtEnd(context->builder, current_block);
return alloca;
}
LLVMValueRef llvm_emit_alloca_aligned(GenContext *c, Type *type, const char *name)
{
return llvm_emit_alloca(c, llvm_get_type(c, type), type_alloca_alignment(type), name);
}
LLVMValueRef llvm_emit_decl_alloca(GenContext *c, Decl *decl)
{
LLVMTypeRef type = llvm_get_type(c, decl->type);
return llvm_emit_alloca(c,
type,
decl->alignment ?: type_alloca_alignment(decl->type),
decl->name ?: "anon");
}
/**
* Values here taken from LLVM.
* @return return the inlining threshold given the build options.
@@ -196,54 +203,64 @@ static inline unsigned lookup_attribute(const char *name)
}
static bool intrinsics_setup = false;
unsigned ssub_overflow_intrinsic_id;
unsigned usub_overflow_intrinsic_id;
unsigned sadd_overflow_intrinsic_id;
unsigned uadd_overflow_intrinsic_id;
unsigned smul_overflow_intrinsic_id;
unsigned umul_overflow_intrinsic_id;
unsigned trap_intrinsic_id;
unsigned assume_intrinsic_id;
unsigned intrinsic_id_ssub_overflow;
unsigned intrinsic_id_usub_overflow;
unsigned intrinsic_id_sadd_overflow;
unsigned intrinsic_id_uadd_overflow;
unsigned intrinsic_id_smul_overflow;
unsigned intrinsic_id_umul_overflow;
unsigned intrinsic_id_trap;
unsigned intrinsic_id_assume;
unsigned noinline_attribute;
unsigned alwaysinline_attribute;
unsigned inlinehint_attribute;
unsigned noreturn_attribute;
unsigned nounwind_attribute;
unsigned writeonly_attribute;
unsigned readonly_attribute;
unsigned optnone_attribute;
unsigned noalias_attribute;
unsigned sret_attribute;
unsigned attribute_noinline;
unsigned attribute_alwaysinline;
unsigned attribute_inlinehint;
unsigned attribute_noreturn;
unsigned attribute_nounwind;
unsigned attribute_writeonly;
unsigned attribute_readonly;
unsigned attribute_optnone;
unsigned attribute_align;
unsigned attribute_noalias;
unsigned attribute_sret;
unsigned attribute_zext;
unsigned attribute_sext;
unsigned attribute_byval;
unsigned attribute_inreg;
void llvm_codegen_setup()
{
assert(intrinsics_setup == false);
ssub_overflow_intrinsic_id = lookup_intrinsic("llvm.ssub.with.overflow");
usub_overflow_intrinsic_id = lookup_intrinsic("llvm.usub.with.overflow");
sadd_overflow_intrinsic_id = lookup_intrinsic("llvm.sadd.with.overflow");
uadd_overflow_intrinsic_id = lookup_intrinsic("llvm.uadd.with.overflow");
smul_overflow_intrinsic_id = lookup_intrinsic("llvm.smul.with.overflow");
umul_overflow_intrinsic_id = lookup_intrinsic("llvm.umul.with.overflow");
trap_intrinsic_id = lookup_intrinsic("llvm.trap");
assume_intrinsic_id = lookup_intrinsic("llvm.assume");
intrinsic_id_ssub_overflow = lookup_intrinsic("llvm.ssub.with.overflow");
intrinsic_id_usub_overflow = lookup_intrinsic("llvm.usub.with.overflow");
intrinsic_id_sadd_overflow = lookup_intrinsic("llvm.sadd.with.overflow");
intrinsic_id_uadd_overflow = lookup_intrinsic("llvm.uadd.with.overflow");
intrinsic_id_smul_overflow = lookup_intrinsic("llvm.smul.with.overflow");
intrinsic_id_umul_overflow = lookup_intrinsic("llvm.umul.with.overflow");
intrinsic_id_trap = lookup_intrinsic("llvm.trap");
intrinsic_id_assume = lookup_intrinsic("llvm.assume");
noinline_attribute = lookup_attribute("noinline");
alwaysinline_attribute = lookup_attribute("alwaysinline");
inlinehint_attribute = lookup_attribute("inlinehint");
noreturn_attribute = lookup_attribute("noreturn");
nounwind_attribute = lookup_attribute("nounwind");
writeonly_attribute = lookup_attribute("writeonly");
readonly_attribute = lookup_attribute("readonly");
optnone_attribute = lookup_attribute("optnone");
sret_attribute = lookup_attribute("sret");
noalias_attribute = lookup_attribute("noalias");
attribute_noinline = lookup_attribute("noinline");
attribute_alwaysinline = lookup_attribute("alwaysinline");
attribute_inlinehint = lookup_attribute("inlinehint");
attribute_noreturn = lookup_attribute("noreturn");
attribute_nounwind = lookup_attribute("nounwind");
attribute_writeonly = lookup_attribute("writeonly");
attribute_readonly = lookup_attribute("readonly");
attribute_optnone = lookup_attribute("optnone");
attribute_sret = lookup_attribute("sret");
attribute_noalias = lookup_attribute("noalias");
attribute_zext = lookup_attribute("zeroext");
attribute_sext = lookup_attribute("signext");
attribute_align = lookup_attribute("align");
attribute_byval = lookup_attribute("byval");
attribute_inreg = lookup_attribute("inreg");
intrinsics_setup = true;
}
void gencontext_emit_introspection_type(GenContext *context, Decl *decl)
{
llvm_type(decl->type);
llvm_get_type(context, decl->type);
if (decl_is_struct_type(decl))
{
Decl **decls = decl->strukt.members;
@@ -256,10 +273,10 @@ void gencontext_emit_introspection_type(GenContext *context, Decl *decl)
}
}
}
LLVMValueRef global_name = LLVMAddGlobal(context->module, llvm_type(type_byte), decl->name ? decl->name : "anon");
LLVMValueRef global_name = LLVMAddGlobal(context->module, llvm_get_type(context, type_byte), decl->name ? decl->name : "anon");
LLVMSetGlobalConstant(global_name, 1);
LLVMSetInitializer(global_name, LLVMConstInt(llvm_type(type_byte), 1, false));
decl->type->backend_typeid = LLVMBuildPtrToInt(context->builder, global_name, llvm_type(type_typeid), "");
LLVMSetInitializer(global_name, LLVMConstInt(llvm_get_type(context, type_byte), 1, false));
decl->type->backend_typeid = LLVMBuildPtrToInt(context->builder, global_name, llvm_get_type(context, type_typeid), "");
switch (decl->visibility)
{
@@ -288,6 +305,119 @@ static inline uint32_t upper_power_of_two(uint32_t v)
return v;
}
void llvm_value_set_bool(BEValue *value, LLVMValueRef llvm_value)
{
value->value = llvm_value;
value->alignment = type_abi_alignment(type_bool);
value->kind = BE_BOOLEAN;
value->type = type_bool;
}
void llvm_value_set(BEValue *value, LLVMValueRef llvm_value, Type *type)
{
value->value = llvm_value;
value->alignment = type_abi_alignment(type);
value->kind = BE_VALUE;
value->type = type;
}
bool llvm_value_is_const(BEValue *value)
{
return LLVMIsConstant(value->value);
}
void llvm_value_set_address_align(BEValue *value, LLVMValueRef llvm_value, Type *type, unsigned alignment)
{
value->value = llvm_value;
value->alignment = alignment;
value->kind = BE_ADDRESS;
value->type = type;
}
void llvm_value_set_address(BEValue *value, LLVMValueRef llvm_value, Type *type)
{
llvm_value_set_address_align(value, llvm_value, type, type_abi_alignment(type));
}
static inline void be_value_fold_failable(GenContext *c, BEValue *value)
{
if (value->kind == BE_ADDRESS_FAILABLE)
{
LLVMBasicBlockRef after_block = llvm_basic_block_new(c, "after_check");
// TODO optimize load.
LLVMValueRef error_value = gencontext_emit_load(c, type_error, value->failable);
BEValue comp;
llvm_value_set_bool(&comp, llvm_emit_is_no_error(c, error_value));
if (c->error_var)
{
LLVMBasicBlockRef error_block = llvm_basic_block_new(c, "error");
llvm_emit_cond_br(c, &comp, after_block, error_block);
llvm_emit_block(c, error_block);
llvm_store_aligned(c, c->error_var, error_value, type_abi_alignment(type_usize));
llvm_emit_br(c, c->catch_block);
}
else
{
llvm_emit_cond_br(c, &comp, after_block, c->catch_block);
}
llvm_emit_block(c, after_block);
value->kind = BE_ADDRESS;
}
}
LLVMValueRef bevalue_store_value(GenContext *c, BEValue *value)
{
be_value_fold_failable(c, value);
switch (value->kind)
{
case BE_VALUE:
return value->value;
case BE_ADDRESS_FAILABLE:
UNREACHABLE
case BE_ADDRESS:
return llvm_emit_load_aligned(c,
llvm_get_type(c, value->type),
value->value,
value->alignment ?: type_abi_alignment(value->type),
"");
case BE_BOOLEAN:
return LLVMBuildZExt(c->builder, value->value, c->byte_type, "");
}
UNREACHABLE
}
LLVMBasicBlockRef llvm_basic_block_new(GenContext *c, const char *name)
{
return LLVMCreateBasicBlockInContext(c->context, name);
}
void llvm_value_addr(GenContext *c, BEValue *value)
{
be_value_fold_failable(c, value);
if (value->kind == BE_ADDRESS) return;
LLVMValueRef temp = llvm_emit_alloca_aligned(c, value->type, "tempaddr");
llvm_store_self_aligned(c, temp, value->value, value->type);
llvm_value_set_address(value, temp, value->type);
}
void llvm_value_rvalue(GenContext *c, BEValue *value)
{
if (value->kind != BE_ADDRESS && value->kind != BE_ADDRESS_FAILABLE) return;
be_value_fold_failable(c, value);
value->value = llvm_emit_load_aligned(c,
llvm_get_type(c, value->type),
value->value,
value->alignment ?: type_abi_alignment(value->type),
"");
if (value->type->type_kind == TYPE_BOOL)
{
value->value = LLVMBuildTrunc(c->builder, value->value, c->bool_type, "");
value->kind = BE_BOOLEAN;
return;
}
value->kind = BE_VALUE;
}
static void gencontext_emit_decl(GenContext *context, Decl *decl)
{
@@ -328,15 +458,15 @@ void llvm_codegen(Context *context)
// EmitDeferred()
VECEACH(context->external_symbol_list, i)
{
gencontext_emit_extern_decl(&gen_context, context->external_symbol_list[i]);
llvm_emit_extern_decl(&gen_context, context->external_symbol_list[i]);
}
VECEACH(context->methods, i)
{
gencontext_emit_function_decl(&gen_context, context->methods[i]);
llvm_emit_function_decl(&gen_context, context->methods[i]);
}
VECEACH(context->functions, i)
{
gencontext_emit_function_decl(&gen_context, context->functions[i]);
llvm_emit_function_decl(&gen_context, context->functions[i]);
}
VECEACH(context->types, i)
{
@@ -349,15 +479,15 @@ void llvm_codegen(Context *context)
VECEACH(context->functions, i)
{
Decl *decl = context->functions[i];
if (decl->func.body) gencontext_emit_function_body(&gen_context, decl);
if (decl->func.body) llvm_emit_function_body(&gen_context, decl);
}
VECEACH(context->methods, i)
{
Decl *decl = context->methods[i];
if (decl->func.body) gencontext_emit_function_body(&gen_context, decl);
if (decl->func.body) llvm_emit_function_body(&gen_context, decl);
}
if (gen_context.debug.builder) LLVMDIBuilderFinalize(gen_context.debug.builder);
if (llvm_use_debug(&gen_context)) LLVMDIBuilderFinalize(gen_context.debug.builder);
gencontext_print_llvm_ir(&gen_context);
// Starting from here we could potentially thread this:
@@ -365,14 +495,12 @@ void llvm_codegen(Context *context)
LLVMPassManagerBuilderSetOptLevel(pass_manager_builder, build_options.optimization_level);
LLVMPassManagerBuilderSetSizeLevel(pass_manager_builder, build_options.size_optimization_level);
LLVMPassManagerBuilderSetDisableUnrollLoops(pass_manager_builder, build_options.optimization_level == OPTIMIZATION_NONE);
LLVMPassManagerBuilderUseInlinerWithThreshold(pass_manager_builder, 0); //get_inlining_threshold());
LLVMPassManagerBuilderUseInlinerWithThreshold(pass_manager_builder, get_inlining_threshold());
LLVMPassManagerRef pass_manager = LLVMCreatePassManager();
LLVMPassManagerRef function_pass_manager = LLVMCreateFunctionPassManagerForModule(gen_context.module);
LLVMAddAnalysisPasses(target_machine(), function_pass_manager);
LLVMAddAnalysisPasses(target_machine(), pass_manager);
LLVMPassManagerBuilderPopulateModulePassManager(pass_manager_builder, pass_manager);
// We insert a memcpy pass here, or it will be used too late to fix our aggregate copies.
LLVMAddMemCpyOptPass(function_pass_manager);
LLVMPassManagerBuilderPopulateFunctionPassManager(pass_manager_builder, function_pass_manager);
// IMPROVE
@@ -407,8 +535,123 @@ void llvm_codegen(Context *context)
gencontext_destroy(&gen_context);
}
void gencontext_add_attribute(GenContext *context, LLVMValueRef value_to_add_attribute_to, unsigned attribute_id, int index)
void llvm_attribute_add_int(GenContext *context, LLVMValueRef value_to_add_attribute_to, unsigned attribute_id, uint64_t val, int index)
{
LLVMAttributeRef llvm_attr = LLVMCreateEnumAttribute(context->context, attribute_id, 0);
LLVMAttributeRef llvm_attr = LLVMCreateEnumAttribute(context->context, attribute_id, val);
LLVMAddAttributeAtIndex(value_to_add_attribute_to, index, llvm_attr);
}
}
void llvm_attribute_add(GenContext *context, LLVMValueRef value_to_add_attribute_to, unsigned attribute_id, int index)
{
llvm_attribute_add_int(context, value_to_add_attribute_to, attribute_id, 0, index);
}
void llvm_attribute_add_range(GenContext *context, LLVMValueRef value_to_add_attribute_to, unsigned attribute_id, int index_start, int index_end)
{
for (int i = index_start; i <= index_end; i++)
{
llvm_attribute_add_int(context, value_to_add_attribute_to, attribute_id, 0, i);
}
}
void llvm_attribute_add_string(GenContext *context, LLVMValueRef value_to_add_attribute_to, const char *attribute, const char *value, int index)
{
LLVMAttributeRef llvm_attr = LLVMCreateStringAttribute(context->context, attribute, strlen(attribute), value, strlen(value));
LLVMAddAttributeAtIndex(value_to_add_attribute_to, index, llvm_attr);
}
unsigned llvm_abi_size(LLVMTypeRef type)
{
return LLVMABISizeOfType(target_data_layout(), type);
}
unsigned llvm_abi_alignment(LLVMTypeRef type)
{
return LLVMABIAlignmentOfType(target_data_layout(), type);
}
void llvm_store_bevalue_aligned(GenContext *c, LLVMValueRef destination, BEValue *value, unsigned alignment)
{
// If we have an address but not an aggregate, do a load.
be_value_fold_failable(c, value);
if (value->kind == BE_ADDRESS && !type_is_abi_aggregate(value->type))
{
value->value = llvm_emit_load_aligned(c, llvm_get_type(c, value->type), value->value, value->alignment, "");
value->kind = BE_VALUE;
}
switch (value->kind)
{
case BE_BOOLEAN:
value->value = LLVMBuildZExt(c->builder, value->value, c->byte_type, "");
FALLTHROUGH;
case BE_VALUE:
llvm_store_aligned(c, destination, value->value, alignment ?: type_abi_alignment(value->type));
return;
case BE_ADDRESS_FAILABLE:
UNREACHABLE
case BE_ADDRESS:
{
// Here we do an optimized(?) memcopy.
size_t size = type_size(value->type);
LLVMValueRef copy_size = llvm_const_int(c, size <= UINT32_MAX ? type_uint : type_usize, size);
destination = LLVMBuildBitCast(c->builder, destination, llvm_get_ptr_type(c, type_byte), "");
LLVMValueRef source = LLVMBuildBitCast(c->builder, value->value, llvm_get_ptr_type(c, type_byte), "");
LLVMBuildMemCpy(c->builder, destination, alignment ?: type_abi_alignment(value->type),
source, value->alignment ?: type_abi_alignment(value->type), copy_size);
return;
}
}
UNREACHABLE
}
void llvm_store_bevalue_dest_aligned(GenContext *c, LLVMValueRef destination, BEValue *value)
{
llvm_store_bevalue_aligned(c, destination, value, LLVMGetAlignment(destination));
}
void llvm_store_bevalue(GenContext *c, BEValue *destination, BEValue *value)
{
assert(llvm_value_is_addr(destination));
llvm_store_bevalue_aligned(c, destination->value, value, destination->alignment);
}
void llvm_store_bevalue_raw(GenContext *c, BEValue *destination, LLVMValueRef raw_value)
{
assert(llvm_value_is_addr(destination));
llvm_store_aligned(c, destination->value, raw_value, destination->alignment);
}
void llvm_store_self_aligned(GenContext *context, LLVMValueRef pointer, LLVMValueRef value, Type *type)
{
llvm_store_aligned(context, pointer, value, type_abi_alignment(type));
}
void llvm_store_aligned(GenContext *context, LLVMValueRef pointer, LLVMValueRef value, unsigned alignment)
{
LLVMValueRef ref = LLVMBuildStore(context->builder, value, pointer);
LLVMSetAlignment(ref, alignment);
}
void llvm_store_aligned_decl(GenContext *context, Decl *decl, LLVMValueRef value)
{
llvm_store_aligned(context, decl->backend_ref, value, decl->alignment);
}
void llvm_emit_memcpy_to_decl(GenContext *c, Decl *decl, LLVMValueRef source, unsigned source_alignment)
{
if (source_alignment == 0) source_alignment = type_abi_alignment(decl->type);
LLVMBuildMemCpy(c->builder, decl->backend_ref, decl->alignment ?: type_abi_alignment(decl->type),
source, source_alignment, llvm_const_int(c, type_usize, type_size(decl->type)));
}
LLVMValueRef llvm_emit_load_aligned(GenContext *context, LLVMTypeRef type, LLVMValueRef pointer, unsigned alignment, const char *name)
{
LLVMValueRef value = LLVMBuildLoad2(context->builder, type, pointer, name);
LLVMSetAlignment(value, alignment ?: llvm_abi_alignment(type));
return value;
}
unsigned llvm_store_size(LLVMTypeRef type)
{
return LLVMStoreSizeOfType(target_data_layout(), type);
}

View File

@@ -0,0 +1,273 @@
// Copyright (c) 2020 Christoffer Lerno. All rights reserved.
// Use of this source code is governed by the GNU LGPLv3.0 license
// a copy of which can be found in the LICENSE file.
#include "llvm_codegen_c_abi_internal.h"
ABIArgInfo *abi_arg_new(ABIKind kind)
{
ABIArgInfo *info = CALLOCS(ABIArgInfo);
info->kind = kind;
return info;
}
AbiType *abi_type_new_plain(Type *type)
{
AbiType *abi_type = CALLOCS(AbiType);
abi_type->kind = ABI_TYPE_PLAIN;
abi_type->type = type;
return abi_type;
}
AbiType *abi_type_new_int_bits(unsigned bits)
{
AbiType *abi_type = CALLOCS(AbiType);
abi_type->kind = ABI_TYPE_INT_BITS;
abi_type->int_bits = bits;
return abi_type;
}
bool abi_type_is_integer(AbiType *type)
{
return type->kind == ABI_TYPE_INT_BITS || type_is_integer(type->type);
}
bool abi_type_is_float(AbiType *type)
{
return type->kind != ABI_TYPE_INT_BITS && type_is_float(type->type);
}
size_t abi_type_size(AbiType *type)
{
switch (type->kind)
{
case ABI_TYPE_INT_BITS:
return type->int_bits / 8;
case ABI_TYPE_PLAIN:
return type_size(type->type);
}
UNREACHABLE;
}
size_t abi_type_abi_alignment(AbiType *type)
{
switch (type->kind)
{
case ABI_TYPE_INT_BITS:
return type_abi_alignment(type_int_unsigned_by_bitsize(next_highest_power_of_2(type->int_bits)));
case ABI_TYPE_PLAIN:
return type_abi_alignment(type->type);
}
UNREACHABLE;
}
bool abi_arg_is_indirect(ABIArgInfo *info)
{
switch (info->kind)
{
case ABI_ARG_IGNORE:
case ABI_ARG_DIRECT_COERCE:
case ABI_ARG_EXPAND:
case ABI_ARG_DIRECT_PAIR:
return false;
case ABI_ARG_INDIRECT:
return true;
}
UNREACHABLE
}
ABIArgInfo *abi_arg_new_indirect_realigned(unsigned alignment)
{
assert(alignment > 0);
ABIArgInfo *info = abi_arg_new(ABI_ARG_INDIRECT);
info->indirect.realignment = alignment;
info->indirect.by_val = true;
return info;
}
ABIArgInfo *abi_arg_new_indirect_by_val(void)
{
ABIArgInfo *info = abi_arg_new(ABI_ARG_INDIRECT);
info->indirect.by_val = true;
return info;
}
ABIArgInfo *abi_arg_new_indirect_not_by_val(void)
{
ABIArgInfo *info = abi_arg_new(ABI_ARG_INDIRECT);
info->indirect.by_val = true;
return info;
}
size_t abi_arg_expanded_size(ABIArgInfo *type_info, Type *type)
{
switch (type->type_kind)
{
case TYPE_TYPEDEF:
return abi_arg_expanded_size(type_info, type->canonical);
case TYPE_ARRAY:
return abi_arg_expanded_size(type_info, type->array.base) * type->array.len;
case TYPE_STRUCT:
{
Decl **members = type->decl->strukt.members;
size_t result = 0;
VECEACH(members, i)
{
members += abi_arg_expanded_size(type_info, members[i]->type);
}
return result;
}
case TYPE_UNION:
{
Type *max_union = type_find_largest_union_element(type);
if (!max_union) return 0;
return abi_arg_expanded_size(type_info, max_union);
}
case TYPE_COMPLEX:
case TYPE_SUBARRAY:
case TYPE_STRING:
// Complex is { real, real }, Sub array { pointer, len } = String?
return 2;
case TYPE_ERR_UNION:
case TYPE_VOID:
case TYPE_BOOL:
case ALL_FLOATS:
case ALL_INTS:
case TYPE_TYPEID:
case TYPE_POINTER:
case TYPE_ENUM:
case TYPE_ERRTYPE:
case TYPE_VARARRAY:
case TYPE_VECTOR:
return 1;
case TYPE_POISONED:
case TYPE_FUNC:
case TYPE_TYPEINFO:
case TYPE_MEMBER:
UNREACHABLE
}
UNREACHABLE
}
ABIArgInfo *abi_arg_new_direct_int_ext(Type *int_to_extend)
{
ABIArgInfo *arg_info = abi_arg_new(ABI_ARG_DIRECT_COERCE);
if (type_is_signed(int_to_extend))
{
arg_info->attributes.signext = true;
}
else
{
arg_info->attributes.zeroext = true;
}
return arg_info;
}
ABIArgInfo *abi_arg_new_direct_pair(AbiType *low_type, AbiType *high_type)
{
ABIArgInfo *arg_info = abi_arg_new(ABI_ARG_DIRECT_PAIR);
arg_info->direct_pair.hi = high_type;
arg_info->direct_pair.lo = low_type;
return arg_info;
}
ABIArgInfo *abi_arg_new_direct(void)
{
return abi_arg_new(ABI_ARG_DIRECT_COERCE);
}
ABIArgInfo *abi_arg_new_direct_coerce(AbiType *target_type)
{
assert(target_type);
ABIArgInfo *info = abi_arg_new(ABI_ARG_DIRECT_COERCE);
info->direct_coerce.type = target_type;
info->direct_coerce.elements = 0;
return info;
}
ABIArgInfo *abi_arg_new_expand_padded(Type *padding)
{
ABIArgInfo *info = abi_arg_new(ABI_ARG_EXPAND);
info->expand.padding_type = padding;
return info;
}
ABIArgInfo *classify_return_type_default(Type *type)
{
if (type == type_void)
{
return abi_arg_new(ABI_ARG_IGNORE);
}
// Struct-likes are returned by sret
if (type_is_abi_aggregate(type))
{
return abi_arg_new(ABI_ARG_INDIRECT);
}
// Otherwise do we have a type that needs promotion?
if (type_is_promotable_integer(type_lowering(type)))
{
ABIArgInfo *arg_info = abi_arg_new(ABI_ARG_DIRECT_COERCE);
if (type_is_signed(type))
{
arg_info->attributes.signext = true;
}
else
{
arg_info->attributes.zeroext = true;
}
return arg_info;
}
// No, then do a direct pass.
return abi_arg_new_direct();
}
void c_abi_func_create(GenContext *context, FunctionSignature *signature)
{
switch (build_target.abi)
{
case ABI_X64:
c_abi_func_create_x64(context, signature);
break;
case ABI_X86:
c_abi_func_create_x86(context, signature);
break;
case ABI_WIN64:
c_abi_func_create_win64(context, signature);
break;
case ABI_AARCH64:
c_abi_func_create_aarch64(context, signature);
break;
default:
FATAL_ERROR("Unsupported ABI");
}
}
ABIArgInfo *c_abi_classify_argument_type_default(Type *type)
{
type = type_lowering(type);
// Struct-likes are returned by sret
if (type_is_abi_aggregate(type))
{
return abi_arg_new(ABI_ARG_INDIRECT);
}
if (type_is_int128(type) && !build_target.int_128)
{
return abi_arg_new_indirect_by_val();
}
// Otherwise do we have a type that needs promotion?
if (type_is_promotable_integer(type))
{
return abi_arg_new_direct_int_ext(type);
}
// No, then do a direct pass.
return abi_arg_new_direct();
}

View File

@@ -0,0 +1,160 @@
// Copyright (c) 2020 Christoffer Lerno. All rights reserved.
// Use of this source code is governed by a LGPLv3.0
// a copy of which can be found in the LICENSE file.
#include "llvm_codegen_c_abi_internal.h"
ABIArgInfo *aarch64_illegal_vector(Type *type)
{
// Need to look up SVE vectors.
return false;
}
ABIArgInfo *aarch64_coerce_illegal_vector(Type *type)
{
TODO
}
ABIArgInfo *aarch64_classify_argument_type(GenContext *context, Type *type)
{
type = type_lowering(type);
if (type->type_kind == TYPE_VOID) return abi_arg_new(ABI_ARG_IGNORE);
if (type->type_kind == TYPE_VECTOR && aarch64_illegal_vector(type))
{
return aarch64_coerce_illegal_vector(type);
}
size_t size = type_size(type);
if (!type_is_abi_aggregate(type))
{
// Over 128 bits should be indirect, but
// we don't have that (yet?)
if (type_is_promotable_integer(type) && build_target.aarch64.is_darwin_pcs)
{
return abi_arg_new_direct_int_ext(type);
}
return abi_arg_new(ABI_ARG_DIRECT_COERCE);
}
// Is empty
if (!size) return abi_arg_new(ABI_ARG_IGNORE);
// Homogeneous Floating-point Aggregates (HFAs) need to be expanded.
Type *base = NULL;
unsigned members = 0;
if (type_is_homogenous_aggregate(type, &base, &members))
{
ABIArgInfo *info = abi_arg_new_direct_coerce(abi_type_new_plain(base));
info->direct_coerce.elements = members;
return info;
}
// Aggregates <= in registers
if (size <= 16)
{
// For RenderScript <= 16 needs to be coerced.
unsigned alignment = type_abi_alignment(type);
if (build_target.aarch64.is_aapcs)
{
alignment = alignment < 16 ? 8 : 16;
}
else
{
if (alignment < type_abi_alignment(type_voidptr))
{
alignment = type_abi_alignment(type_voidptr);
}
}
size = aligned_offset(size, alignment);
// We use a pair of i64 for 16-byte aggregate with 8-byte alignment.
// For aggregates with 16-byte alignment, we use i128.
ABIArgInfo *info = abi_arg_new_direct_coerce(abi_type_new_int_bits(alignment * 8));
info->direct_coerce.elements = size / alignment;
return info;
}
return abi_arg_new_indirect_not_by_val();
}
ABIArgInfo *aarch64_classify_return_type(GenContext *context, Type *type, bool variadic)
{
type = type_lowering(type);
if (type->type_kind == TYPE_VOID) return abi_arg_new(ABI_ARG_IGNORE);
if (type->type_kind == TYPE_VECTOR && aarch64_illegal_vector(type))
{
return aarch64_coerce_illegal_vector(type);
}
size_t size = type_size(type);
// Large vectors by mem.
if (type->type_kind == TYPE_VECTOR && size > 16)
{
return abi_arg_new_direct_coerce(abi_type_new_plain(type));
}
if (!type_is_abi_aggregate(type))
{
if (type_is_promotable_integer(type) && build_target.aarch64.is_darwin_pcs)
{
return abi_arg_new_direct_int_ext(type);
}
return abi_arg_new(ABI_ARG_DIRECT_COERCE);
}
// Abi aggregate:
// Is empty
if (!size) return abi_arg_new(ABI_ARG_IGNORE);
Type *base = NULL;
unsigned members = 0;
if (type_is_homogenous_aggregate(type, &base, &members) &&
!(build_target.arch == ARCH_TYPE_AARCH64_32 && variadic))
{
return abi_arg_new(ABI_ARG_DIRECT_COERCE);
}
// Aggregates <= in registers
if (size <= 16)
{
// For RenderScript <= 16 needs to be coerced.
unsigned alignment = type_abi_alignment(type);
// Align to multiple of 8.
unsigned aligned_size = aligned_offset(size, 8);
if (alignment < 16 && size == 16)
{
return abi_arg_new_direct_coerce(abi_type_new_plain(type_get_array(type_ulong, size / 8)));
}
return abi_arg_new_direct_coerce(abi_type_new_int_bits(aligned_size * 8));
}
return abi_arg_new_indirect_by_val();
}
void c_abi_func_create_aarch64(GenContext *context, FunctionSignature *signature)
{
if (signature->failable)
{
signature->failable_abi_info = aarch64_classify_return_type(context, signature->rtype->type, signature->variadic);
if (signature->rtype->type->type_kind != TYPE_VOID)
{
signature->ret_abi_info = aarch64_classify_argument_type(context, type_get_ptr(type_lowering(signature->rtype->type)));
}
}
else
{
signature->ret_abi_info = aarch64_classify_return_type(context, signature->rtype->type, signature->variadic);
}
Decl **params = signature->params;
VECEACH(params, i)
{
params[i]->var.abi_info = aarch64_classify_argument_type(context, params[i]->type);
}
}

View File

@@ -0,0 +1,58 @@
#pragma once
// Copyright (c) 2020 Christoffer Lerno. All rights reserved.
// Use of this source code is governed by a LGPLv3.0
// a copy of which can be found in the LICENSE file.
#include "llvm_codegen_internal.h"
typedef enum
{
WIN32_CDECL,
WIN32_VEC,
WIN32_STD,
WIN32_SYS,
UNIX_CDECL,
WIN64_CC,
AMD64_CC,
} CABIKind;
typedef enum
{
BY_VAL,
BY_VAL_SKIP
} ByVal;
static inline ABIArgInfo *abi_arg_by_reg_attr(ABIArgInfo *info);
size_t abi_arg_expanded_size(ABIArgInfo *type_info, Type *type);
bool abi_arg_is_indirect(ABIArgInfo *info);
ABIArgInfo *abi_arg_new(ABIKind kind);
ABIArgInfo *abi_arg_new_direct_pair(AbiType *low_type, AbiType *high_type);
ABIArgInfo *abi_arg_new_direct(void);
ABIArgInfo *abi_arg_new_direct_int_ext(Type *type_to_extend);
ABIArgInfo *abi_arg_new_direct_coerce(AbiType *target_type);
ABIArgInfo *abi_arg_new_expand_padded(Type *padding);
ABIArgInfo *abi_arg_new_indirect_realigned(unsigned alignment);
ABIArgInfo *abi_arg_new_indirect_by_val(void);
ABIArgInfo *abi_arg_new_indirect_not_by_val(void);
size_t abi_type_abi_alignment(AbiType *type);
bool abi_type_is_integer(AbiType *type);
bool abi_type_is_float(AbiType *type);
AbiType *abi_type_new_plain(Type *type);
AbiType *abi_type_new_int_bits(unsigned bits);
size_t abi_type_size(AbiType *type);
ABIArgInfo *c_abi_classify_argument_type_default(Type *type);
void c_abi_func_create_win64(GenContext *context, FunctionSignature *signature);
void c_abi_func_create_x86(GenContext *context, FunctionSignature *signature);
void c_abi_func_create_x64(GenContext *context, FunctionSignature *signature);
void c_abi_func_create_aarch64(GenContext *context, FunctionSignature *signature);
// Implementation
static inline ABIArgInfo *abi_arg_by_reg_attr(ABIArgInfo *info)
{
info->attributes.by_reg = true;
return info;
}

View File

@@ -0,0 +1,198 @@
// Copyright (c) 2020 Christoffer Lerno. All rights reserved.
// Use of this source code is governed by a LGPLv3.0
// a copy of which can be found in the LICENSE file.
#include "llvm_codegen_c_abi_internal.h"
ABIArgInfo *win64_classify(GenContext *context, Type *type, bool is_return, bool is_vector, bool is_reg)
{
if (type->type_kind == TYPE_VOID) return abi_arg_new(ABI_ARG_IGNORE);
// Lower enums etc.
type = type_lowering(type);
Type *base = NULL;
unsigned elements = 0;
if ((is_vector || is_reg) && type_is_homogenous_aggregate(type, &base, &elements))
{
if (is_reg)
{
// Enough registers? Then use direct/expand
if (context->abi.sse_registers >= elements)
{
context->abi.sse_registers -= elements;
// Direct if return / builtin / vector
if (is_return || type_is_builtin(type->type_kind) || type->type_kind == TYPE_VECTOR)
{
return abi_arg_new(ABI_ARG_DIRECT_COERCE);
}
return abi_arg_new(ABI_ARG_EXPAND);
}
// Otherwise use indirect
return abi_arg_new_indirect_not_by_val();
}
if (is_vector)
{
// Enough registers AND return / builtin / vector
if (context->abi.sse_registers >= elements &&
(is_return || type_is_builtin(type->type_kind) || type->type_kind == TYPE_VECTOR))
{
context->abi.sse_registers -= elements;
return abi_arg_new(ABI_ARG_DIRECT_COERCE);
}
if (is_return)
{
return abi_arg_new(ABI_ARG_INDIRECT);
}
// HVAs are handled later.
if (!type_is_builtin(type->type_kind) && type->type_kind != TYPE_VECTOR)
{
return abi_arg_new_indirect_not_by_val();
}
// => to main handling.
}
}
size_t size = type_size(type);
if (type_is_abi_aggregate(type))
{
// Not 1, 2, 4, 8? Pass indirect.
if (size > 8 || !is_power_of_two(size))
{
return abi_arg_new_indirect_not_by_val();
}
// Coerce to integer.
return abi_arg_new_direct_coerce(abi_type_new_int_bits(size * 8));
}
if (type_is_builtin(type->type_kind))
{
switch (type->type_kind)
{
case TYPE_BOOL:
return abi_arg_new_direct_int_ext(type_bool);
case TYPE_U128:
case TYPE_I128:
// Pass by val since greater than 8 bytes.
if (!is_return) return abi_arg_new_indirect_not_by_val();
// Make i128 return in XMM0
return abi_arg_new_direct_coerce(abi_type_new_plain(type_get_vector(type_long, 2)));
default:
break;
}
}
if (size > 8)
{
return abi_arg_new_indirect_not_by_val();
}
return abi_arg_new(ABI_ARG_DIRECT_COERCE);
}
ABIArgInfo *win64_reclassify_hva_arg(GenContext *context, Type *type, ABIArgInfo *info)
{
// Assumes vectorCall calling convention.
Type *base = NULL;
unsigned elements = 0;
type = type_lowering(type);
if (!type_is_builtin(type->type_kind) && type->type_kind != TYPE_VECTOR && type_is_homogenous_aggregate(type, &base, &elements))
{
if (context->abi.sse_registers >= elements)
{
context->abi.sse_registers -= elements;
ABIArgInfo *new_info = abi_arg_new(ABI_ARG_DIRECT_COERCE);
new_info->attributes.by_reg = true;
return new_info;
}
}
return info;
}
void win64_vector_call_args(GenContext *context, FunctionSignature *signature, bool is_vector, bool is_reg)
{
static const unsigned MaxParamVectorCallsAsReg = 6;
unsigned count = 0;
Decl **params = signature->params;
VECEACH(params, i)
{
Decl *param = params[i];
if (count < MaxParamVectorCallsAsReg)
{
param->var.abi_info = win64_classify(context, param->type, false, is_vector, is_reg);
}
else
{
// Cannot be passed in registers pretend no registers.
unsigned regs = context->abi.sse_registers;
context->abi.sse_registers = 0;
param->var.abi_info = win64_classify(context, param->type, false, is_vector, is_reg);
context->abi.sse_registers = regs;
}
count++;
}
VECEACH(params, i)
{
Decl *param = params[i];
param->var.abi_info = win64_reclassify_hva_arg(context, param->type, param->var.abi_info);
}
}
void c_abi_func_create_win64(GenContext *context, FunctionSignature *signature)
{
// allow calling sysv?
// Set up return registers.
context->abi.int_registers = 0;
bool is_reg_call = false;
bool is_vector_call = false;
switch (context->abi.call_convention)
{
case CALL_CONVENTION_VECTOR:
context->abi.sse_registers = 4;
is_vector_call = true;
break;
case CALL_CONVENTION_REGCALL:
context->abi.sse_registers = 16;
is_reg_call = true;
break;
default:
context->abi.sse_registers = 0;
break;
}
if (signature->failable)
{
signature->failable_abi_info = win64_classify(context, type_error, true, is_vector_call, is_reg_call);
if (signature->rtype->type->type_kind != TYPE_VOID)
{
signature->ret_abi_info = win64_classify(context, type_get_ptr(type_lowering(signature->rtype->type)), false, is_vector_call, is_reg_call);
}
}
else
{
signature->ret_abi_info = win64_classify(context, signature->rtype->type, true, is_vector_call, is_reg_call);
}
// Set up parameter registers.
switch (context->abi.call_convention)
{
case CALL_CONVENTION_VECTOR:
context->abi.sse_registers = 6;
is_vector_call = true;
break;
case CALL_CONVENTION_REGCALL:
context->abi.sse_registers = 16;
is_reg_call = true;
break;
default:
context->abi.sse_registers = 0;
break;
}
if (is_vector_call)
{
win64_vector_call_args(context, signature, is_vector_call, is_reg_call);
return;
}
Decl **params = signature->params;
VECEACH(params, i)
{
params[i]->var.abi_info = win64_classify(context, params[i]->type, false, is_vector_call, is_reg_call);
}
}

View File

@@ -0,0 +1,955 @@
// Copyright (c) 2020 Christoffer Lerno. All rights reserved.
// Use of this source code is governed by a LGPLv3.0
// a copy of which can be found in the LICENSE file.
#include "llvm_codegen_c_abi_internal.h"
typedef enum
{
UNNAMED,
NAMED
} NamedArgument;
typedef struct
{
unsigned sse_registers;
unsigned int_registers;
} Registers;
bool try_use_registers(Registers *available, Registers *used)
{
if (available->sse_registers < used->sse_registers) return false;
if (available->int_registers < used->int_registers) return false;
available->int_registers -= used->int_registers;
available->sse_registers -= used->sse_registers;
return true;
}
typedef enum
{
// No not change ordering.
CLASS_NO_CLASS,
CLASS_MEMORY,
CLASS_INTEGER,
CLASS_SSE,
CLASS_SSEUP,
} X64Class;
static ABIArgInfo *x64_classify_argument_type(Type *type, unsigned free_int_regs, Registers *needed_registers, NamedArgument is_named);
static bool x64_type_is_structure(Type *type);
ABIArgInfo *x64_indirect_return_result(Type *type)
{
if (type_is_abi_aggregate(type))
{
return abi_arg_new(ABI_ARG_INDIRECT);
}
type = type_lowering(type);
if (type_is_promotable_integer(type))
{
return abi_arg_new_direct_int_ext(type);
}
return abi_arg_new(ABI_ARG_DIRECT_COERCE);
}
static size_t x64_native_vector_size_for_avx(void)
{
switch (build_target.x64.avx_level)
{
case AVX_NONE:
return 16;
case AVX:
return 32;
case AVX_512:
return 64;
}
UNREACHABLE
}
static bool x64_type_is_illegal_vector(Type *type)
{
// Only check vectors.
if (type->type_kind != TYPE_VECTOR) return false;
unsigned size = type_size(type);
// Less than 64 bits or larger than the avx native size => not allowed.
if (size <= 8 || size > x64_native_vector_size_for_avx()) return true;
// If we pass i128 in mem, then check for that.
if (build_target.x64.pass_int128_vector_in_mem)
{
// Illegal if i128/u128
TypeKind kind = type->vector.base->type_kind;
return kind == TYPE_I128 || kind == TYPE_U128;
}
// Otherwise fine!
return true;
}
ABIArgInfo *x64_indirect_result(Type *type, unsigned free_int_regs)
{
// If this is a scalar LLVM value then assume LLVM will pass it in the right
// place naturally.
//
// This assumption is optimistic, as there could be free registers available
// when we need to pass this argument in memory, and LLVM could try to pass
// the argument in the free register. This does not seem to happen currently,
// but this code would be much safer if we could mark the argument with
// 'onstack'. See PR12193.
type = type_lowering(type);
if (!type_is_abi_aggregate(type) && !x64_type_is_illegal_vector(type))
{
if (type_is_promotable_integer(type))
{
return abi_arg_new_direct_int_ext(type);
}
// No change, just put it on the stack.
return abi_arg_new(ABI_ARG_DIRECT_COERCE);
}
// The byval alignment
unsigned align = type_abi_alignment(type);
// Pass as arguments if there are no more free int regs
// (if 'onstack' appears, change this code)
if (!free_int_regs)
{
unsigned size = type_size(type);
if (align == 8 && size <= 8)
{
return abi_arg_new_direct_coerce(abi_type_new_int_bits(size * 8));
}
}
if (align < 8)
{
return abi_arg_new_indirect_realigned(8);
}
return abi_arg_new(ABI_ARG_INDIRECT);
}
ABIArgInfo *x64_classify_reg_call_struct_type_check(Type *type, Registers *needed_registers)
{
if (type->type_kind == TYPE_ERR_UNION || type->type_kind == TYPE_STRING || type->type_kind == TYPE_SUBARRAY)
{
needed_registers->int_registers += 2;
return abi_arg_new(ABI_ARG_DIRECT_COERCE);
}
// Union, struct, err type handled =>
assert(type->type_kind == TYPE_STRUCT || type->type_kind == TYPE_UNION || type->type_kind == TYPE_ERRTYPE);
Decl **members = type->decl->strukt.members;
VECEACH(members, i)
{
Type *member_type = type_lowering(members[i]->type->canonical);
ABIArgInfo *member_info;
Registers temp_needed_registers = {};
if (x64_type_is_structure(member_type))
{
member_info = x64_classify_reg_call_struct_type_check(member_type, &temp_needed_registers);
}
else
{
member_info = x64_classify_argument_type(member_type, (unsigned)-1, &temp_needed_registers, NAMED);
}
if (abi_arg_is_indirect(member_info))
{
*needed_registers = (Registers) {};
return x64_indirect_return_result(type);
}
needed_registers->sse_registers += temp_needed_registers.sse_registers;
needed_registers->int_registers += temp_needed_registers.int_registers;
}
// Check this!
return abi_arg_new(ABI_ARG_DIRECT_COERCE);
}
ABIArgInfo *x64_classify_reg_call_struct_type(Type *return_type, Registers *available_registers)
{
Registers needed_registers = {};
ABIArgInfo *info = x64_classify_reg_call_struct_type_check(return_type, &needed_registers);
if (!try_use_registers(available_registers, &needed_registers))
{
return x64_indirect_return_result(return_type);
}
return info;
}
static void x64_classify(Type *type, size_t offset_base, X64Class *lo_class, X64Class *hi_class, NamedArgument named);
static X64Class x64_merge(X64Class accum, X64Class field)
{
// 1. Same => result
// 2. no class + something => something
// 3. mem + something => mem
// 4. int + something => int
// 6. SSE
// Accum should never be memory (we should have returned) or
assert(accum != CLASS_MEMORY);
if (accum == field) return accum;
// Swap
if (accum > field)
{
X64Class temp = field;
field = accum;
accum = temp;
}
switch (accum)
{
case CLASS_NO_CLASS:
return field;
case CLASS_MEMORY:
return CLASS_MEMORY;
case CLASS_INTEGER:
// Other can only be non MEM and non NO_CLASS
return CLASS_INTEGER;
case CLASS_SSEUP:
case CLASS_SSE:
// Other can only be SSE type
return CLASS_SSE;
}
UNREACHABLE
}
void x64_classify_post_merge(size_t size, X64Class *lo_class, X64Class *hi_class)
{
// If one is MEM => both is mem
// If X87UP is not before X87 => mem
// If size > 16 && first isn't SSE or any other is not SSEUP => mem
// If SSEUP is not preceeded by SSE/SSEUP => convert to SSE.
if (*hi_class == CLASS_MEMORY) goto DEFAULT_TO_MEMORY;
if (size > 16 && (*lo_class != CLASS_SSE || *hi_class != CLASS_SSEUP)) goto DEFAULT_TO_MEMORY;
if (*hi_class == CLASS_SSEUP && *lo_class != CLASS_SSE && *lo_class != CLASS_SSEUP)
{
// TODO check this
*hi_class = CLASS_SSE;
}
return;
DEFAULT_TO_MEMORY:
*lo_class = CLASS_MEMORY;
}
void x64_classify_struct_union(Type *type, size_t offset_base, X64Class *current, X64Class *lo_class, X64Class *hi_class, NamedArgument named_arg)
{
size_t size = type_size(type);
// 64 byte max.
if (size > 64) return;
// Re-classify
*current = CLASS_NO_CLASS;
bool is_union = type->type_kind == TYPE_UNION;
Decl **members = type->decl->strukt.members;
VECEACH(members, i)
{
Decl *member = members[i];
size_t offset = offset_base + member->offset;
// The only case a 256-bit or a 512-bit wide vector could be used is when
// the struct contains a single 256-bit or 512-bit element. Early check
// and fallback to memory.
if (size > 16 &&
((!is_union && size != type_size(member->type))
|| size > x64_native_vector_size_for_avx()))
{
*lo_class = CLASS_MEMORY;
x64_classify_post_merge(size, lo_class, hi_class);
return;
}
// Not aligned?
if (offset % type_abi_alignment(member->type))
{
*lo_class = CLASS_MEMORY;
x64_classify_post_merge(size, lo_class, hi_class);
return;
}
X64Class field_lo;
X64Class field_hi;
x64_classify(member->type, offset, &field_lo, &field_hi, named_arg);
*lo_class = x64_merge(*lo_class, field_lo);
*hi_class = x64_merge(*hi_class, field_hi);
if (*lo_class == CLASS_MEMORY || *hi_class == CLASS_MEMORY) break;
}
x64_classify_post_merge(size, lo_class, hi_class);
}
void x64_classify_array(Type *type, size_t offset_base, X64Class *current, X64Class *lo_class, X64Class *hi_class, NamedArgument named_arg)
{
size_t size = type_size(type);
Type *element = type->array.base;
size_t element_size = type_size(element);
// Bigger than 64 bytes => MEM
if (size > 64) return;
if (offset_base % type_abi_alignment(element))
{
*lo_class = CLASS_MEMORY;
x64_classify_post_merge(size, lo_class, hi_class);
return;
}
// Re-classify
*current = CLASS_NO_CLASS;
// The only case a 256-bit or a 512-bit wide vector could be used is when
// the struct contains a single 256-bit or 512-bit element. Early check
// and fallback to memory.
if (size > 16 && (size != type_size(element) || size > x64_native_vector_size_for_avx()))
{
*lo_class = CLASS_MEMORY;
return;
}
size_t offset = offset_base;
for (size_t i = 0; i < type->array.len; i++)
{
X64Class field_lo;
X64Class field_hi;
x64_classify(element, offset, &field_lo, &field_hi, named_arg);
offset_base += element_size;
*lo_class = x64_merge(*lo_class, field_lo);
*hi_class = x64_merge(*hi_class, field_hi);
if (*lo_class == CLASS_MEMORY || *hi_class == CLASS_MEMORY) break;
}
x64_classify_post_merge(size, lo_class, hi_class);
assert(*hi_class != CLASS_SSEUP || *lo_class == CLASS_SSE);
}
void x64_classify_vector(Type *type, size_t offset_base, X64Class *current, X64Class *lo_class, X64Class *hi_class,
NamedArgument named_arg)
{
unsigned size = type_size(type);
// Pass as int
if (size == 1 || size == 2 || size == 4)
{
*current = CLASS_INTEGER;
// Check boundary crossing
size_t lo = offset_base / 8;
size_t hi = (offset_base + size - 1) / 8;
// If it crosses boundary, split it.
if (hi != lo)
{
*hi_class = *lo_class;
}
return;
}
if (size == 8)
{
Type *element = type->vector.base;
// 1 x double passed in memory (by gcc)
if (element->type_kind == TYPE_F64) return;
// 1 x long long is passed different on older clang and
// gcc, we pick SSE which is the GCC and later Clang standard.
*current = CLASS_SSE;
// Split if crossing boundary.
if (offset_base && offset_base != 8)
{
*hi_class = *lo_class;
}
return;
}
if (size == 16 || named_arg || size <= x64_native_vector_size_for_avx())
{
if (build_target.x64.pass_int128_vector_in_mem) return;
*lo_class = CLASS_SSE;
*hi_class = CLASS_SSEUP;
}
// Default pass by mem
}
void x64_classify_complex(Type *type, size_t offset_base, X64Class *current, X64Class *lo_class, X64Class *hi_class)
{
Type *element = type->complex;
size_t element_size = type_size(element);
switch (type->type_kind)
{
case TYPE_I8:
case TYPE_I16:
case TYPE_I32:
case TYPE_I64:
case TYPE_U8:
case TYPE_U16:
case TYPE_U32:
case TYPE_U64:
*current = CLASS_INTEGER;
break;
case TYPE_I128:
case TYPE_U128:
*lo_class = *hi_class = CLASS_INTEGER;
break;
case TYPE_F16:
TODO
case TYPE_F32:
*current = CLASS_SSE;
break;
case TYPE_F64:
*lo_class = *hi_class = CLASS_SSE;
break;
case TYPE_F128:
*current = CLASS_MEMORY;
break;
default:
UNREACHABLE
}
size_t real = offset_base / 8;
size_t imag = (offset_base + element_size) / 8;
// If it crosses boundary, split it.
if (*hi_class == CLASS_NO_CLASS && real != imag)
{
*hi_class = *lo_class;
}
}
Decl *x64_get_member_at_offset(Decl *decl, unsigned offset)
{
if (type_size(decl->type) <= offset) return NULL;
Decl **members = decl->strukt.members;
Decl *last_match = NULL;
VECEACH(members, i)
{
if (members[i]->offset > offset) break;
last_match = members[i];
}
assert(last_match);
return last_match;
}
static void x64_classify(Type *type, size_t offset_base, X64Class *lo_class, X64Class *hi_class, NamedArgument named)
{
*lo_class = CLASS_NO_CLASS;
*hi_class = CLASS_NO_CLASS;
X64Class *current = offset_base < 8 ? lo_class : hi_class;
*current = CLASS_MEMORY;
type = type_lowering(type);
switch (type->type_kind)
{
case TYPE_POISONED:
case TYPE_ENUM:
case TYPE_TYPEDEF:
case TYPE_FXX:
case TYPE_IXX:
case TYPE_TYPEID:
case TYPE_FUNC:
case TYPE_TYPEINFO:
case TYPE_MEMBER:
UNREACHABLE
case TYPE_VOID:
*current = CLASS_NO_CLASS;
break;
case TYPE_I128:
case TYPE_U128:
case TYPE_ERR_UNION:
case TYPE_SUBARRAY:
*lo_class = CLASS_INTEGER;
*hi_class = CLASS_INTEGER;
break;
case TYPE_BOOL:
case TYPE_U8:
case TYPE_U16:
case TYPE_U32:
case TYPE_U64:
case TYPE_I8:
case TYPE_I16:
case TYPE_I32:
case TYPE_I64:
*current = CLASS_INTEGER;
break;
case TYPE_F16:
TODO
case TYPE_F32:
case TYPE_F64:
*current = CLASS_SSE;
break;
case TYPE_F128:
*lo_class = CLASS_SSE;
*hi_class = CLASS_SSEUP;
break;
case TYPE_VARARRAY:
case TYPE_POINTER:
*current = CLASS_INTEGER;
break;
case TYPE_STRUCT:
case TYPE_UNION:
case TYPE_ERRTYPE:
x64_classify_struct_union(type, offset_base, current, lo_class, hi_class, named);
break;
case TYPE_STRING:
TODO
case TYPE_ARRAY:
x64_classify_array(type, offset_base, current, lo_class, hi_class, named);
break;
case TYPE_VECTOR:
x64_classify_vector(type, offset_base, current, lo_class, hi_class, named);
break;
case TYPE_COMPLEX:
x64_classify_complex(type, offset_base, current, lo_class, hi_class);
break;
}
}
bool x64_bits_contain_no_user_data(Type *type, unsigned start, unsigned end)
{
// If the bytes being queried are off the end of the type, there is no user
// data hiding here. This handles analysis of builtins, vectors and other
// types that don't contain interesting padding.
size_t size = type_size(type);
if (size <= start) return true;
if (type->type_kind == TYPE_ARRAY)
{
// Check each element to see if the element overlaps with the queried range.
size_t element_size = type_size(type->array.base);
for (unsigned i = 0; i < type->array.len; i++)
{
// If the element is after the span we care about, then we're done..
size_t offset = i * element_size;
if (offset >= end) break;
unsigned element_start = offset < start ? start - offset : 0;
if (!x64_bits_contain_no_user_data(type->array.base, element_start, end - offset)) return false;
}
// No overlap
return true;
}
if (type->type_kind == TYPE_STRUCT || type->type_kind == TYPE_ERRTYPE || type->type_kind == TYPE_UNION)
{
Decl **members = type->decl->strukt.members;
VECEACH(members, i)
{
Decl *member = members[i];
unsigned offset = member->offset;
if (offset > end) break;
unsigned field_start = offset < start ? start - offset : 0;
if (!x64_bits_contain_no_user_data(member->type, field_start, end - offset)) return false;
}
// No overlap
return true;
}
return false;
}
bool x64_contains_float_at_offset(Type *type, unsigned offset)
{
if (offset == 0 && type->type_kind == TYPE_F32) return true;
// If this is a struct, recurse into the field at the specified offset.
if (type->type_kind == TYPE_ERRTYPE || type->type_kind == TYPE_STRUCT)
{
Decl *member = x64_get_member_at_offset(type->decl, offset);
offset -= member->offset;
return x64_contains_float_at_offset(member->type, offset);
}
if (type->type_kind == TYPE_ARRAY)
{
Type *element_type = type->array.base;
unsigned element_size = type_size(element_type);
offset -= (offset / element_size) * element_size;
return x64_contains_float_at_offset(element_type, offset);
}
return false;
}
AbiType *x64_get_sse_type_at_offset(Type *type, unsigned ir_offset, Type *source_type, unsigned source_offset)
{
// The only three choices we have are either double, <2 x float>, or float. We
// pass as float if the last 4 bytes is just padding. This happens for
// structs that contain 3 floats.
if (x64_bits_contain_no_user_data(source_type, source_offset + 4, source_offset + 8)) return abi_type_new_plain(type_float);
// We want to pass as <2 x float> if the LLVM IR type contains a float at
// offset+0 and offset+4. Walk the LLVM IR type to find out if this is the
// case.
if (x64_contains_float_at_offset(type, ir_offset) &&
x64_contains_float_at_offset(type, ir_offset + 4))
{
return abi_type_new_plain(type_get_vector(type_float, 2));
}
return abi_type_new_plain(type_double);
}
AbiType *x64_get_int_type_at_offset(Type *type, unsigned offset, Type *source_type, unsigned source_offset)
{
switch (type->type_kind)
{
case TYPE_U64:
case TYPE_I64:
case TYPE_VARARRAY:
case TYPE_POINTER:
if (!offset) return abi_type_new_plain(type);
break;
case TYPE_BOOL:
case TYPE_U8:
case TYPE_I8:
case TYPE_I16:
case TYPE_U16:
case TYPE_U32:
case TYPE_I32:
if (!offset) break;
if (x64_bits_contain_no_user_data(source_type,
source_offset + type_size(type),
source_offset + 8))
{
return abi_type_new_plain(type);
}
break;
case TYPE_STRUCT:
case TYPE_ERRTYPE:
{
Decl *member = x64_get_member_at_offset(type->decl, offset);
if (member)
{
return x64_get_int_type_at_offset(member->type, offset - member->offset, source_type, source_offset);
}
break;
}
case TYPE_ERR_UNION:
if (offset < 16) return abi_type_new_plain(type_usize);
break;
case TYPE_SUBARRAY:
case TYPE_STRING:
if (offset < 8) return abi_type_new_plain(type_usize);
if (offset < 16) return abi_type_new_plain(type_voidptr);
break;
case TYPE_ARRAY:
{
Type *element = type->array.base;
size_t element_size = type_size(element);
size_t element_offset = (offset / element_size) * element_size;
return x64_get_int_type_at_offset(element, offset - element_offset, source_type, source_offset);
}
case TYPE_POISONED:
case TYPE_VOID:
case TYPE_FXX:
case TYPE_IXX:
case TYPE_TYPEID:
case TYPE_ENUM:
case TYPE_FUNC:
case TYPE_TYPEDEF:
case TYPE_TYPEINFO:
case TYPE_MEMBER:
UNREACHABLE
case TYPE_I128:
case TYPE_U128:
case TYPE_F16:
case TYPE_F32:
case TYPE_F64:
case TYPE_F128:
case TYPE_UNION:
case TYPE_VECTOR:
case TYPE_COMPLEX:
break;
}
size_t size = type_size(source_type);
assert(size != source_offset);
if (size - source_offset > 8) return abi_type_new_plain(type_ulong);
return abi_type_new_int_bits((size - source_offset) * 8);
}
/**
* This is only called on SSE.
*/
static AbiType *x64_get_byte_vector_type(Type *type)
{
// Wrapper structs/arrays that only contain vectors are passed just like
// vectors; strip them off if present.
Type *inner_type = type_find_single_struct_element(type);
if (inner_type) type = inner_type;
type = type_lowering(type);
// If vector
if (type->type_kind == TYPE_VECTOR)
{
Type *element = type->vector.base->canonical;
if (build_target.x64.pass_int128_vector_in_mem && type_is_int128(element))
{
// Convert to u64
return abi_type_new_plain(type_get_vector(type_ulong, type_size(type) / 8));
}
return abi_type_new_plain(type);
}
if (type->type_kind == TYPE_F128) return abi_type_new_plain(type);
unsigned size = type_size(type);
assert(size == 16 || size == 32 || size == 64);
// Return a vector type based on the size.
return abi_type_new_plain(type_get_vector(type_double, size / 8));
}
static ABIArgInfo *x64_get_argument_pair_return(AbiType *low_type, AbiType *high_type)
{
unsigned low_size = abi_type_size(low_type);
unsigned hi_start = aligned_offset(low_size, abi_type_abi_alignment(high_type));
assert(hi_start == 8 && "Expected aligned with C-style structs.");
return abi_arg_new_direct_pair(low_type, high_type);
}
ABIArgInfo *x64_classify_return(Type *return_type)
{
// AMD64-ABI 3.2.3p4: Rule 1. Classify the return type with the
// classification algorithm.
X64Class hi_class;
X64Class lo_class;
x64_classify(return_type, 0, &lo_class, &hi_class, NAMED);
// Invariants
assert(hi_class != CLASS_MEMORY || lo_class == CLASS_MEMORY);
assert(hi_class != CLASS_SSEUP || lo_class == CLASS_SSE);
AbiType *result_type = NULL;
switch (lo_class)
{
case CLASS_NO_CLASS:
if (hi_class == CLASS_NO_CLASS)
{
return abi_arg_new(ABI_ARG_IGNORE);
}
// If low part is padding, keep type null
assert(hi_class == CLASS_SSE || hi_class == CLASS_INTEGER);
break;
case CLASS_SSEUP:
UNREACHABLE
case CLASS_MEMORY:
// AMD64-ABI 3.2.3p4: Rule 2. Types of class memory are returned via
// hidden argument.
return x64_indirect_return_result(return_type);
case CLASS_INTEGER:
// AMD64-ABI 3.2.3p4: Rule 3. If the class is INTEGER, the next
// available register of the sequence %rax, %rdx is used.
result_type = x64_get_int_type_at_offset(return_type, 0, return_type, 0);
if (hi_class == CLASS_NO_CLASS && abi_type_is_integer(result_type))
{
if (type_is_promotable_integer(return_type))
{
return abi_arg_new_direct_int_ext(return_type);
}
}
break;
case CLASS_SSE:
result_type = x64_get_sse_type_at_offset(return_type, 0, return_type, 0);
break;
}
AbiType *high_part = NULL;
switch (hi_class)
{
case CLASS_MEMORY:
case CLASS_NO_CLASS:
// Previously handled.
break;
case CLASS_INTEGER:
assert(lo_class != CLASS_NO_CLASS);
high_part = x64_get_int_type_at_offset(return_type, 8, return_type, 8);
break;
case CLASS_SSE:
assert(lo_class != CLASS_NO_CLASS);
high_part = x64_get_sse_type_at_offset(return_type, 8, return_type, 8);
break;
case CLASS_SSEUP:
// AMD64-ABI 3.2.3p4: Rule 5. If the class is SSEUP, the eightbyte
// is passed in the next available eightbyte chunk if the last used
// vector register.
//
// SSEUP should always be preceded by SSE, just widen.
assert(lo_class == CLASS_SSE && "Unexpected SSEUp classification.");
result_type = x64_get_byte_vector_type(return_type);
break;
}
// If a high part was specified, merge it together with the low part. It is
// known to pass in the high eightbyte of the result. We do this by forming a
// first class struct aggregate with the high and low part: {low, high}
if (high_part) return x64_get_argument_pair_return(result_type, high_part);
if (result_type->kind == ABI_TYPE_PLAIN &&
return_type->canonical == result_type->type->canonical)
{
return abi_arg_new_direct();
}
return abi_arg_new_direct_coerce(result_type);
}
static ABIArgInfo *x64_classify_argument_type(Type *type, unsigned free_int_regs, Registers *needed_registers, NamedArgument is_named)
{
X64Class hi_class;
X64Class lo_class;
x64_classify(type, 0, &lo_class, &hi_class, NAMED);
// Invariants
assert(hi_class != CLASS_MEMORY || lo_class == CLASS_MEMORY);
assert(hi_class != CLASS_SSEUP || lo_class == CLASS_SSE);
AbiType *result_type = NULL;
*needed_registers = (Registers) { 0, 0 };
switch (lo_class)
{
case CLASS_NO_CLASS:
// Only C++ would leave 8 bytes of padding, so we can ignore that case.
assert(hi_class == CLASS_NO_CLASS);
return abi_arg_new(ABI_ARG_IGNORE);
case CLASS_SSEUP:
UNREACHABLE
case CLASS_MEMORY:
return x64_indirect_result(type, free_int_regs);
case CLASS_INTEGER:
needed_registers->int_registers++;
result_type = x64_get_int_type_at_offset(type, 0, type, 0);
if (hi_class == CLASS_NO_CLASS && abi_type_is_integer(result_type))
{
if (type_is_promotable_integer(type))
{
return abi_arg_new_direct_int_ext(type);
}
}
break;
case CLASS_SSE:
result_type = x64_get_sse_type_at_offset(type, 0, type, 0);
needed_registers->sse_registers++;
break;
}
AbiType *high_part = NULL;
switch (hi_class)
{
case CLASS_MEMORY:
UNREACHABLE
case CLASS_NO_CLASS:
break;
case CLASS_INTEGER:
needed_registers->int_registers++;
high_part = x64_get_int_type_at_offset(type, 8, type, 8);
// Return directly into high part.
assert(lo_class != CLASS_NO_CLASS && "empty first 8 bytes not allowed");
break;
case CLASS_SSE:
needed_registers->sse_registers++;
high_part = x64_get_sse_type_at_offset(type, 8, type, 8);
assert(lo_class != CLASS_NO_CLASS && "empty first 8 bytes not allowed");
break;
case CLASS_SSEUP:
assert(lo_class == CLASS_SSE && "Unexpected SSEUp classification.");
result_type = x64_get_byte_vector_type(type);
break;
}
// If a high part was specified, merge it together with the low part. It is
// known to pass in the high eightbyte of the result. We do this by forming a
// first class struct aggregate with the high and low part: {low, high}
if (high_part) return x64_get_argument_pair_return(result_type, high_part);
if (result_type->kind == ABI_TYPE_PLAIN)
{
Type *result = result_type->type->canonical;
type = type->canonical;
if (type == result) return abi_arg_new_direct();
if (type_is_integer(type) && type_is_integer(result) && type->builtin.bytesize == result->builtin.bytesize)
{
return abi_arg_new_direct();
}
if (type_is_integer(type->canonical) && type_is_integer(result_type->type)
&& result_type->type->canonical == type->canonical)
{
return abi_arg_new_direct();
}
}
return abi_arg_new_direct_coerce(result_type);
}
bool x64_type_is_structure(Type *type)
{
switch (type->type_kind)
{
case TYPE_STRUCT:
case TYPE_ERRTYPE:
case TYPE_ERR_UNION:
case TYPE_STRING:
case TYPE_SUBARRAY:
return true;
default:
return false;
}
}
static ABIArgInfo *x64_classify_return_type(Type *ret_type, Registers *registers, bool is_regcall)
{
ret_type = type_lowering(ret_type);
if (is_regcall && x64_type_is_structure(ret_type))
{
return x64_classify_reg_call_struct_type(ret_type, registers);
}
return x64_classify_return(ret_type);
}
static ABIArgInfo *x64_classify_parameter(Type *type, Registers *available_registers, bool is_regcall)
{
// TODO check "NAMED"
NamedArgument arg = NAMED;
Registers needed_registers = {};
type = type_lowering(type);
ABIArgInfo *info;
// If this is a reg call, use the struct type check.
if (is_regcall && (type_is_structlike(type) || type->type_kind == TYPE_UNION))
{
info = x64_classify_reg_call_struct_type_check(type, &needed_registers);
}
else
{
info = x64_classify_argument_type(type, available_registers->int_registers, &needed_registers, arg);
}
if (!try_use_registers(available_registers, &needed_registers))
{
// use a register?
info = x64_indirect_result(type, available_registers->int_registers);
}
return info;
}
void c_abi_func_create_x64(GenContext *context, FunctionSignature *signature)
{
// TODO 32 bit pointers
// TODO allow override to get win64
bool is_regcall = signature->convention == CALL_CONVENTION_REGCALL;
context->abi.call_convention = signature->convention;
Registers available_registers = {
.int_registers = is_regcall ? 11 : 16,
.sse_registers = is_regcall ? 16 : 8
};
if (signature->failable)
{
signature->failable_abi_info = x64_classify_return_type(type_error, &available_registers, is_regcall);
if (abi_arg_is_indirect(signature->failable_abi_info))
{
available_registers.int_registers--;
}
if (signature->rtype->type->type_kind != TYPE_VOID)
{
signature->ret_abi_info = x64_classify_parameter(type_get_ptr(type_lowering(signature->rtype->type)), &available_registers, is_regcall);
}
}
else
{
signature->ret_abi_info = x64_classify_return_type(signature->rtype->type, &available_registers, is_regcall);
if (abi_arg_is_indirect(signature->ret_abi_info))
{
available_registers.int_registers--;
}
}
Decl **params = signature->params;
VECEACH(params, i)
{
params[i]->var.abi_info = x64_classify_parameter(params[i]->type, &available_registers, is_regcall);
}
}

View File

@@ -0,0 +1,689 @@
// Copyright (c) 2020 Christoffer Lerno. All rights reserved.
// Use of this source code is governed by a LGPLv3.0
// a copy of which can be found in the LICENSE file.
#include "llvm_codegen_c_abi_internal.h"
#define MIN_ABI_STACK_ALIGN 4
static bool x86_try_use_free_regs(GenContext *context, Type *type);
static inline bool type_is_simd_vector(Type *type)
{
type = type->canonical;
return type->type_kind == TYPE_VECTOR && type_size(type) == 16;
}
static bool type_is_union_struct_with_simd_vector(Type *type)
{
if (!type_is_union_struct(type)) return false;
Decl **members = type->decl->strukt.members;
VECEACH(members, i)
{
Type *member_type = members[i]->type;
if (type_is_simd_vector(member_type)) return true;
if (type_is_union_struct_with_simd_vector(type)) return true;
}
return false;
}
static unsigned x86_stack_alignment(Type *type, unsigned alignment)
{
// Less than ABI, use default
if (alignment < MIN_ABI_STACK_ALIGN) return 0;
// On non-Darwin, the stack type alignment is always 4.
if (!build_target.x86.is_darwin_vector_abi) return MIN_ABI_STACK_ALIGN;
// Otherwise, if the type contains an SSE vector type, the alignment is 16.
if (alignment >= 16 && (type_is_simd_vector(type) || type_is_union_struct_with_simd_vector(type)))
{
return 16;
}
return MIN_ABI_STACK_ALIGN;
}
static ABIArgInfo *x86_create_indirect_result(GenContext *context, Type *type, ByVal by_val)
{
if (by_val != BY_VAL)
{
ABIArgInfo *info = abi_arg_new_indirect_not_by_val();
if (context->abi.int_registers)
{
context->abi.int_registers--;
if (!build_target.x86.is_mcu_api) info->attributes.by_reg = true;
}
return info;
}
// From here on everything is by val:
// Compute alignment
unsigned alignment = type_abi_alignment(type);
unsigned stack_alignment = x86_stack_alignment(type, alignment);
// Default alignment
if (stack_alignment == 0) stack_alignment = 4;
// Realign if alignment is greater.
if (alignment > stack_alignment)
{
return abi_arg_new_indirect_realigned(stack_alignment);
}
return abi_arg_new_indirect_by_val();
}
ABIArgInfo *create_indirect_return_x86(GenContext *context)
{
ABIArgInfo *info = abi_arg_new(ABI_ARG_INDIRECT);
if (!context->abi.int_registers) return info;
// Consume a register for the return.
context->abi.int_registers--;
if (build_target.x86.is_mcu_api) return info;
return abi_arg_by_reg_attr(info);
}
static bool x86_should_return_type_in_reg(Type *type)
{
type = type->canonical;
unsigned size = type_size(type);
if (size > 8) return false;
// Require power of two for everything except mcu.
if (!build_target.x86.is_mcu_api && !is_power_of_two(size)) return false;
if (type->type_kind == TYPE_VECTOR)
{
// 64 (and 128 bit) vectors are not returned as registers
return size < 8;
}
switch (type->type_kind)
{
case TYPE_VECTOR:
case TYPE_POISONED:
case TYPE_MEMBER:
case TYPE_VOID:
case TYPE_FUNC:
case TYPE_TYPEDEF:
case TYPE_TYPEINFO:
UNREACHABLE
case ALL_INTS:
case ALL_FLOATS:
case TYPE_BOOL:
case TYPE_ENUM:
case TYPE_POINTER:
case TYPE_TYPEID:
case TYPE_VARARRAY:
case TYPE_ERR_UNION:
case TYPE_STRING:
case TYPE_SUBARRAY:
case TYPE_ERRTYPE:
case TYPE_COMPLEX:
return true;
case TYPE_ARRAY:
// Small arrays <= 8 bytes.
return x86_should_return_type_in_reg(type->array.base);
case TYPE_STRUCT:
case TYPE_UNION:
// Handle below
break;
}
// If all can be passed in registers, then pass in register
// (remember we already limited the size!)
Decl** members = type->decl->strukt.members;
VECEACH (members, i)
{
Type *member_type = members[i]->type;
if (type_is_empty_field(member_type, true)) continue;
if (!x86_should_return_type_in_reg(member_type)) return false;
}
return true;
}
ABIArgInfo *x86_classify_return(GenContext *context, Type *type)
{
if (type == type_void)
{
return abi_arg_new(ABI_ARG_IGNORE);
}
type = type_lowering(type);
Type *base = NULL;
unsigned elements = 0;
if (context->abi.call_convention == CALL_CONVENTION_VECTOR || context->abi.call_convention == CALL_CONVENTION_REGCALL)
{
// Pass in the normal way.
if (type_is_homogenous_aggregate(type, &base, &elements))
{
return abi_arg_new(ABI_ARG_DIRECT_COERCE);
}
}
if (type->type_kind == TYPE_VECTOR)
{
// On Darwin, vectors may be returned in registers.
if (build_target.x86.is_darwin_vector_abi)
{
unsigned size = type_size(type);
if (size == 16)
{
// Special case, convert 128 bit vector to two 64 bit elements.
return abi_arg_new_direct_coerce(abi_type_new_plain(type_get_vector(type_long, 2)));
}
// Always return in register if it fits in a general purpose
// register, or if it is 64 bits and has a single element.
if (size == 1 || size == 2 || size == 4 || (size == 8 && type->vector.len == 1))
{
return abi_arg_new_direct_coerce(abi_type_new_int_bits(size * 8));
}
return create_indirect_return_x86(context);
}
return abi_arg_new(ABI_ARG_DIRECT_COERCE);
}
if (type_is_abi_aggregate(type))
{
// If we don't allow small structs in reg:
if (!build_target.x86.return_small_struct_in_reg_abi && type->type_kind == TYPE_COMPLEX)
{
return create_indirect_return_x86(context);
}
// Ignore empty struct/unions
if (type_is_empty_union_struct(type, true))
{
return abi_arg_new(ABI_ARG_IGNORE);
}
// Check if we can return it in a register.
if (x86_should_return_type_in_reg(type))
{
size_t size = type_size(type);
// Special case is floats and pointers in single element structs (except for MSVC)
Type *single_element = type_find_single_struct_element(type);
if (single_element)
{
if ((type_is_float(single_element) && !build_target.x86.is_win32_float_struct_abi))
{
return abi_arg_new(ABI_ARG_EXPAND);
}
if (type_is_pointer(type))
{
return abi_arg_new(ABI_ARG_EXPAND);
}
}
// This is not a single element struct, so we wrap it in an int.
return abi_arg_new_direct_coerce(abi_type_new_int_bits(size * 8));
}
return create_indirect_return_x86(context);
}
// Is this small enough to need to be extended?
if (type_is_promotable_integer(type))
{
return abi_arg_new_direct_int_ext(type);
}
// If we support something like int128, then this is an indirect return.
if (type_is_integer(type) && type_size(type) > 8) return create_indirect_return_x86(context);
// Otherwise we expect to just pass this nicely in the return.
return abi_arg_new(ABI_ARG_DIRECT_COERCE);
}
static inline bool x86_should_aggregate_use_direct(GenContext *context, Type *type, bool *needs_padding)
{
// On Windows, aggregates other than HFAs are never passed in registers, and
// they do not consume register slots. Homogenous floating-point aggregates
// (HFAs) have already been dealt with at this point.
if (build_target.x86.is_win32_float_struct_abi) return false;
*needs_padding = false;
if (!x86_try_use_free_regs(context, type)) return false;
if (build_target.x86.is_mcu_api) return true;
switch (context->abi.call_convention)
{
case CALL_CONVENTION_FAST:
case CALL_CONVENTION_VECTOR:
case CALL_CONVENTION_REGCALL:
if (type_size(type) <= 4 && context->abi.int_registers)
{
*needs_padding = true;
}
return false;
default:
return true;
}
}
static inline bool x86_is_mmxtype(Type *type)
{
// Return true if the type is an MMX type <2 x i32>, <4 x i16>, or <8 x i8>.
if (type->type_kind != TYPE_VECTOR) return false;
if (type_size(type->vector.base) >= 8) return false;
if (!type_is_integer(type->vector.base)) return false;
return type_size(type) == 8;
}
static inline bool x86_can_expand_indirect_aggregate_arg(Type *type)
{
assert(type_is_abi_aggregate(type));
// Test whether an argument type which is to be passed indirectly (on the
// stack) would have the equivalent layout if it was expanded into separate
// arguments. If so, we prefer to do the latter to avoid inhibiting
// optimizations.
// Error unions can always be expanded since they are two pointers wide.
if (type->canonical->type_kind == TYPE_ERR_UNION) return true;
if (type->canonical->type_kind == TYPE_ERRTYPE) return true;
if (!type_is_union_struct(type)) return false;
size_t size = 0;
Decl **members = type->decl->strukt.members;
VECEACH(members, i)
{
Type *member_type = type_lowering(members[i]->type);
switch (member_type->type_kind)
{
case TYPE_I32:
case TYPE_U32:
case TYPE_F32:
case TYPE_U64:
case TYPE_I64:
case TYPE_F64:
break;
case TYPE_COMPLEX:
{
size_t complex_type_size = type_size(member_type->complex);
if (complex_type_size != 4 && complex_type_size != 8) return false;
size += type_size(member_type);
break;
}
default:
return false;
}
}
return size == type_size(type);
}
static bool x86_try_use_free_regs(GenContext *context, Type *type)
{
// 1. Floats are not passed in regs on soft floats.
if (!build_target.x86.use_soft_float && type_is_float(type)) return false;
unsigned size = type_size(type);
// 2. If the type is empty, don't use a register.
if (!size) return false;
// 3. Calculate the number of registers.
unsigned size_in_regs = (size + 3) / 4;
// 4. The MCU psABI allows passing parameters in-reg even if there are
// earlier parameters that are passed on the stack. Also,
// it does not allow passing >8-byte structs in-register,
// even if there are 3 free registers available.
if (build_target.x86.is_mcu_api)
{
// 4a. Just return if there are not enough registers.
if (size_in_regs > context->abi.int_registers) return false;
// 4b. If the size in regs > 2 then refuse.
if (size_in_regs > 2) return false;
// 4c. Use registers, we're fine.
context->abi.int_registers -= size_in_regs;
return true;
}
// 5. The non-MCU ABI, if we don't have enough registers,
// clear them to prevent register use later on.
if (size_in_regs > context->abi.int_registers)
{
context->abi.int_registers = 0;
return false;
}
// 6. Use registers, we're fine.
context->abi.int_registers -= size_in_regs;
return true;
}
/**
* Check if a primitive should be in reg, if so, remove number of free registers.
* @return true if it should have an inreg attribute, false otherwise.
*/
static bool x86_try_put_primitive_in_reg(GenContext *context, Type *type)
{
// 1. Try to use regs for this type,
// regardless whether we succeed or not, this will update
// the number of registers available.
if (!x86_try_use_free_regs(context, type)) return false;
// 2. On MCU, do not use the inreg attribute.
if (build_target.x86.is_mcu_api) return false;
// 3. Reg/fast/vec calls limit it to 32 bits
// and integer / pointer types.
// for all other calls we're good to go.
// Some questions here though if we use 3 registers on these
// we don't mark it as inreg, however a later register may use a reg.
// to get an inreg attribute. Investigate!
switch (context->abi.call_convention)
{
case CALL_CONVENTION_FAST:
case CALL_CONVENTION_VECTOR:
case CALL_CONVENTION_REGCALL:
if (type_size(type) > 4) return false;
return type_is_integer_kind(type) || type_is_pointer(type);
default:
return true;
}
}
/**
* Handle the vector/regcalls with HVAs.
*/
static inline ABIArgInfo *x86_classify_homogenous_aggregate(GenContext *context, Type *type, unsigned elements, bool is_vec_call)
{
// We now know it's a float/double or a vector,
// since only those are valid for x86
// see type_is_homogenous_base_type()
// If we don't have enough SSE registers,
// just send this by pointer.
if (context->abi.sse_registers < elements)
{
return x86_create_indirect_result(context, type, BY_VAL_SKIP);
}
// Use the SSE registers.
context->abi.sse_registers -= elements;
// In case of a vector call, pass HVA directly and
// don't flatten.
if (is_vec_call)
{
ABIArgInfo *info = abi_arg_new(ABI_ARG_DIRECT_COERCE);
info->attributes.by_reg = true;
return info;
}
// If it is a builtin, then expansion is not needed.
if (type_is_builtin(type->type_kind) || type->type_kind == TYPE_VECTOR)
{
return abi_arg_new_direct();
}
// Otherwise just a normal expand.
return abi_arg_new(ABI_ARG_EXPAND);
}
static inline ABIArgInfo *x86_classify_vector(GenContext *context, Type *type)
{
unsigned size = type_size(type);
// On Windows, vectors are passed directly if registers are available, or
// indirectly if not. This avoids the need to align argument memory. Pass
// user-defined vector types larger than 512 bits indirectly for simplicity.
if (build_target.x86.is_win32_float_struct_abi)
{
if (size < 64 && context->abi.sse_registers)
{
context->abi.sse_registers--;
return abi_arg_by_reg_attr(abi_arg_new_direct());
}
return x86_create_indirect_result(context, type, BY_VAL_SKIP);
}
// On Darwin, some vectors are passed in memory, we handle this by passing
// it as an i8/i16/i32/i64.
if (build_target.x86.is_darwin_vector_abi)
{
if ((size == 1 || size == 2 || size == 4) || (size == 8 && type->vector.len == 1))
{
return abi_arg_new_direct_coerce(abi_type_new_int_bits(size * 8));
}
}
// MMX passed as i64
if (x86_is_mmxtype(type))
{
return abi_arg_new_direct_coerce(abi_type_new_int_bits(64));
}
// Send as a normal parameter
return abi_arg_new_direct();
}
/**
* Handle:
* error type, struct, union, subarray,
* string, array, error union, complex.
*/
static inline ABIArgInfo *x86_classify_aggregate(GenContext *context, Type *type)
{
// Only called for aggregates.
assert(type_is_abi_aggregate(type));
// Ignore empty unions / structs on non-win.
if (!build_target.x86.is_win32_float_struct_abi && type_is_empty_union_struct(type, true))
{
return abi_arg_new(ABI_ARG_IGNORE);
}
unsigned size = type_size(type);
bool needs_padding_in_reg = false;
// Pass over-aligned aggregates on Windows indirectly. This behavior was
// added in MSVC 2015.
if (build_target.x86.is_win32_float_struct_abi && type_abi_alignment(type) > 4)
{
return x86_create_indirect_result(context, type, BY_VAL_SKIP);
}
// See if we can pass aggregates directly.
// this never happens for MSVC
if (x86_should_aggregate_use_direct(context, type, &needs_padding_in_reg))
{
// Here we coerce the aggregate into a struct { i32, i32, ... }
// but we do not generate this struct immediately here.
unsigned size_in_regs = (size + 3) / 4;
ABIArgInfo *info = abi_arg_new_direct_coerce(abi_type_new_int_bits(32));
info->direct_coerce.elements = size_in_regs;
// Not in reg on MCU
if (!build_target.x86.is_mcu_api) info->attributes.by_reg = true;
return info;
}
// Expand small (<= 128-bit) record types when we know that the stack layout
// of those arguments will match the struct. This is important because the
// LLVM backend isn't smart enough to remove byval, which inhibits many
// optimizations.
// Don't do this for the MCU if there are still free integer registers
// (see X86_64 ABI for full explanation).
if (size <= 16 && (!build_target.x86.is_mcu_api || !context->abi.int_registers) &&
x86_can_expand_indirect_aggregate_arg(type))
{
if (!needs_padding_in_reg) return abi_arg_new(ABI_ARG_EXPAND);
// This is padded expansion
ABIArgInfo *info = abi_arg_new_expand_padded(type_int);
bool is_reg_call = context->abi.call_convention == CALL_CONVENTION_REGCALL;
bool is_vec_call = context->abi.call_convention == CALL_CONVENTION_VECTOR;
bool is_fast_call = context->abi.call_convention == CALL_CONVENTION_FAST;
info->expand.padding_by_reg = is_fast_call || is_reg_call || is_vec_call;
return info;
}
return x86_create_indirect_result(context, type, BY_VAL);
}
/**
* Pointer / Vararray / int / float / bool
* @param context
* @param type
* @return
*/
static ABIArgInfo *x86_classify_primitives(GenContext *context, Type *type)
{
// f128 i128 u128 on stack.
if (type_size(type) > 8) return x86_create_indirect_result(context, type, BY_VAL_SKIP);
bool in_reg = x86_try_put_primitive_in_reg(context, type);
if (type_is_promotable_integer(type))
{
ABIArgInfo *info = abi_arg_new_direct_int_ext(type);
info->attributes.by_reg = in_reg;
return info;
}
ABIArgInfo *info = abi_arg_new_direct();
info->attributes.by_reg = in_reg;
return info;
}
/**
* Classify an argument to an x86 function.
*/
static ABIArgInfo *x86_classify_argument(GenContext *context, Type *type)
{
// FIXME: Set alignment on indirect arguments.
// We lower all types here first to avoid enums and typedefs.
type = type_lowering(type);
bool is_reg_call = context->abi.call_convention == CALL_CONVENTION_REGCALL;
bool is_vec_call = context->abi.call_convention == CALL_CONVENTION_VECTOR;
Type *base = NULL;
unsigned elements = 0;
// For vec and reg, check if we have a homogenous aggregate.
if ((is_vec_call || is_reg_call)
&& type_is_homogenous_aggregate(type, &base, &elements))
{
return x86_classify_homogenous_aggregate(context, type, elements, is_vec_call);
}
switch (type->type_kind)
{
case TYPE_POISONED:
case TYPE_TYPEDEF:
case TYPE_VOID:
case TYPE_ENUM:
case TYPE_FUNC:
case TYPE_TYPEID:
UNREACHABLE
case ALL_FLOATS:
case ALL_INTS:
case TYPE_BOOL:
case TYPE_VARARRAY:
case TYPE_POINTER:
return x86_classify_primitives(context, type);
case TYPE_VECTOR:
return x86_classify_vector(context, type);
case TYPE_ERRTYPE:
case TYPE_STRUCT:
case TYPE_UNION:
case TYPE_SUBARRAY:
case TYPE_STRING:
case TYPE_ARRAY:
case TYPE_ERR_UNION:
case TYPE_COMPLEX:
return x86_classify_aggregate(context, type);
case TYPE_TYPEINFO:
case TYPE_MEMBER:
UNREACHABLE
}
UNREACHABLE
}
void c_abi_func_create_x86(GenContext *context, FunctionSignature *signature)
{
context->abi.call_convention = signature->convention;
context->abi.sse_registers = 0;
switch (signature->convention)
{
case CALL_CONVENTION_NORMAL:
case CALL_CONVENTION_SYSCALL:
if (build_target.x86.is_win32_float_struct_abi)
{
context->abi.sse_registers = 3;
}
context->abi.int_registers = build_target.default_number_regs;
break;
case CALL_CONVENTION_REGCALL:
context->abi.int_registers = 5;
context->abi.sse_registers = 8;
break;
case CALL_CONVENTION_VECTOR:
context->abi.int_registers = 2;
context->abi.sse_registers = 6;
break;
case CALL_CONVENTION_FAST:
context->abi.int_registers = 2;
break;
default:
UNREACHABLE
}
if (build_target.x86.is_mcu_api)
{
context->abi.sse_registers = 0;
context->abi.int_registers = 3;
}
if (signature->failable)
{
signature->failable_abi_info = x86_classify_return(context, type_error);
if (signature->rtype->type->type_kind != TYPE_VOID)
{
signature->ret_abi_info = x86_classify_argument(context, type_get_ptr(type_lowering(signature->rtype->type)));
}
}
else
{
signature->ret_abi_info = x86_classify_return(context, signature->rtype->type);
}
/*
* // The chain argument effectively gives us another free register.
if (FI.isChainCall())
++State.FreeRegs;
// For vectorcall, do a first pass over the arguments, assigning FP and vector
// arguments to XMM registers as available.
if (State.CC == llvm::CallingConv::X86_VectorCall)
runVectorCallFirstPass(FI, State);
*/
if (context->abi.call_convention == CALL_CONVENTION_VECTOR)
{
FATAL_ERROR("X86 vector call not supported");
}
else
{
Decl **params = signature->params;
VECEACH(params, i)
{
params[i]->var.abi_info = x86_classify_argument(context, params[i]->type);
}
}
}

View File

@@ -3,69 +3,202 @@
// a copy of which can be found in the LICENSE file.
#include "llvm_codegen_internal.h"
#include "bigint.h"
static inline LLVMMetadataRef gencontext_create_debug_type_from_decl(GenContext *context, Decl *decl)
#define LINE_ZERO 0
static unsigned id_counter = 0;
static inline LLVMMetadataRef llvm_get_debug_type_internal(GenContext *c, Type *type, LLVMMetadataRef scope);
static inline LLVMMetadataRef llvm_get_debug_member(GenContext *c, Type *type, const char *name, unsigned offset, SourceLocation *loc, LLVMMetadataRef scope, LLVMDIFlags flags);
static inline LLVMMetadataRef llvm_get_debug_struct(GenContext *c, Type *type, const char *external_name, LLVMMetadataRef *elements, unsigned element_count, SourceLocation *loc, LLVMMetadataRef scope, LLVMDIFlags flags);
static LLVMMetadataRef llvm_debug_forward_comp(GenContext *c, Type *type, const char *external_name, SourceLocation *loc, LLVMMetadataRef scope, LLVMDIFlags flags);
static inline LLVMMetadataRef llvm_get_debug_struct(GenContext *c, Type *type, const char *external_name, LLVMMetadataRef *elements, unsigned element_count, SourceLocation *loc, LLVMMetadataRef scope, LLVMDIFlags flags)
{
static LLVMMetadataRef debug_params[512];
switch (decl->decl_kind)
size_t external_name_len = strlen(external_name);
LLVMMetadataRef real = LLVMDIBuilderCreateStructType(c->debug.builder,
scope,
external_name_len ? type->name : "", external_name_len ? strlen(type->name) : 0,
loc ? c->debug.file : NULL,
loc ? loc->line : 0,
type_size(type) * 8,
type_abi_alignment(type) * 8,
flags, NULL,
elements, element_count,
c->debug.runtime_version,
NULL, // VTable
external_name, strlen(external_name));
if (type->backend_debug_type)
{
case DECL_POISONED:
case DECL_VAR:
case DECL_ENUM_CONSTANT:
case NON_TYPE_DECLS:
UNREACHABLE;
case DECL_FUNC:
{
VECEACH(decl->func.function_signature.params, i)
{
Type *param_type = decl->func.function_signature.params[i]->type;
debug_params[i + 1] = gencontext_get_debug_type(context, param_type);
}
unsigned param_size = vec_size(decl->func.function_signature.params);
debug_params[0] = decl->func.function_signature.rtype->type->backend_debug_type;
return LLVMDIBuilderCreateSubroutineType(context->debug.builder,
context->debug.file,
debug_params, param_size + 1,
/** TODO **/ 0);
}
case DECL_TYPEDEF:
TODO
case DECL_STRUCT:
TODO
case DECL_UNION:
TODO
case DECL_ENUM:
TODO
case DECL_ERR:
TODO
LLVMMetadataReplaceAllUsesWith(type->backend_debug_type, real);
}
UNREACHABLE
return real;
}
void gencontext_push_debug_scope(GenContext *context, LLVMMetadataRef debug_scope)
static inline LLVMMetadataRef llvm_get_debug_member(GenContext *c, Type *type, const char *name, unsigned offset, SourceLocation *loc, LLVMMetadataRef scope, LLVMDIFlags flags)
{
return LLVMDIBuilderCreateMemberType(
c->debug.builder,
scope,
name, strlen(name),
loc ? c->debug.file : NULL,
loc ? loc->line : 0,
type_size(type) * 8,
type_abi_alignment(type) * 8,
offset * 8, flags, llvm_get_debug_type_internal(c, type, scope));
}
void llvm_debug_scope_push(GenContext *context, LLVMMetadataRef debug_scope)
{
VECADD(context->debug.lexical_block_stack, debug_scope);
}
void gencontext_pop_debug_scope(GenContext *context)
void llvm_debug_scope_pop(GenContext *context)
{
vec_pop(context->debug.lexical_block_stack);
}
void gencontext_emit_debug_location(GenContext *context, SourceSpan location)
LLVMMetadataRef llvm_debug_current_scope(GenContext *context)
{
SourceLocation *source_loc = TOKLOC(location.loc);
LLVMMetadataRef scope;
if (vec_size(context->debug.lexical_block_stack) > 0)
{
scope = VECLAST(context->debug.lexical_block_stack);
return VECLAST(context->debug.lexical_block_stack);
}
else
return context->debug.compile_unit;
}
void llvm_emit_debug_global_var(GenContext *c, Decl *global)
{
SourceLocation *loc = TOKLOC(global->span.loc);
global->var.backend_debug_ref = LLVMDIBuilderCreateGlobalVariableExpression(
c->debug.builder,
c->debug.file,
global->name,
TOKLEN(global->name_token),
global->external_name,
strlen(global->external_name),
c->debug.file,
loc->line,
llvm_get_debug_type(c, global->type),
global->visibility == VISIBLE_LOCAL,
LLVMDIBuilderCreateExpression(c->debug.builder, NULL, 0),
NULL,
global->alignment);
}
void llvm_emit_debug_function(GenContext *c, Decl *decl)
{
LLVMDIFlags flags = LLVMDIFlagZero;
if (!decl->func.body) return;
switch (decl->visibility)
{
scope = context->debug.compile_unit;
case VISIBLE_LOCAL:
flags |= LLVMDIFlagPrivate;
break;
case VISIBLE_MODULE:
flags |= LLVMDIFlagProtected;
break;
case VISIBLE_PUBLIC:
case VISIBLE_EXTERN:
flags |= LLVMDIFlagPublic;
break;
default:
UNREACHABLE
}
flags |= LLVMDIFlagPrototyped;
if (decl->func.attr_noreturn) flags |= LLVMDIFlagNoReturn;
SourceLocation *loc = TOKILOC(decl->span.loc);
c->debug.function = LLVMDIBuilderCreateFunction(c->debug.builder,
c->debug.file,
decl->name, TOKILEN(decl->name_token),
decl->external_name, strlen(decl->external_name),
c->debug.file,
loc->line,
llvm_get_debug_type(c, decl->type),
decl->visibility == VISIBLE_LOCAL,
true,
loc->line,
flags,
build_options.optimization_level != OPTIMIZATION_NONE);
LLVMSetSubprogram(decl->backend_ref, c->debug.function);
}
void llvm_emit_debug_local_var(GenContext *c, Decl *decl)
{
EMIT_LOC(c, decl);
SourceLocation *location = TOKLOC(decl->span.loc);
LLVMMetadataRef var = LLVMDIBuilderCreateAutoVariable(
c->debug.builder,
c->debug.function,
decl->name,
TOKLEN(decl->name_token),
c->debug.file,
location->line,
llvm_get_debug_type(c, decl->type),
build_options.optimization_level != OPTIMIZATION_NONE,
LLVMDIFlagZero,
decl->alignment);
decl->var.backend_debug_ref = var;
LLVMMetadataRef inline_at = NULL;
LLVMDIBuilderInsertDeclareAtEnd(c->debug.builder,
decl->backend_ref, var,
LLVMDIBuilderCreateExpression(c->debug.builder, NULL, 0),
LLVMDIBuilderCreateDebugLocation(c->context, location->line, location->col,
c->debug.function, inline_at),
LLVMGetInsertBlock(c->builder));
}
/**
* Setup a debug parameter for a given index.
* @param c
* @param parameter
* @param index
*/
void llvm_emit_debug_parameter(GenContext *c, Decl *parameter, unsigned index)
{
SourceLocation *loc = TOKLOC(parameter->span.loc);
const char *name = parameter->name ? parameter->name : "anon";
bool always_preserve = false;
parameter->var.backend_debug_ref = LLVMDIBuilderCreateParameterVariable(
c->debug.builder,
c->debug.function,
name,
strlen(name),
index,
c->debug.file,
loc->line,
llvm_get_debug_type(c, parameter->type),
always_preserve,
LLVMDIFlagZero);
LLVMMetadataRef inline_at = NULL;
LLVMDIBuilderInsertDeclareAtEnd(c->debug.builder,
parameter->backend_ref,
parameter->var.backend_debug_ref,
LLVMDIBuilderCreateExpression(c->debug.builder, NULL, 0),
LLVMDIBuilderCreateDebugLocation(c->context, loc->line, loc->col, c->debug.function,
inline_at),
LLVMGetInsertBlock(c->builder));
}
void llvm_emit_debug_location(GenContext *context, SourceSpan location)
{
static TokenId last = { 0 };
// Avoid re-emitting the same location.
if (last.index == location.loc.index) return;
if (!context->builder) return;
SourceLocation *source_loc = TOKLOC(location.loc);
LLVMMetadataRef scope = llvm_debug_current_scope(context);
LLVMMetadataRef loc = LLVMDIBuilderCreateDebugLocation(context->context,
source_loc->line,
@@ -75,9 +208,22 @@ void gencontext_emit_debug_location(GenContext *context, SourceSpan location)
LLVMSetCurrentDebugLocation2(context->builder, loc);
}
void gencontext_debug_push_lexical_scope(GenContext *context, SourceSpan location)
static LLVMMetadataRef llvm_debug_forward_comp(GenContext *c, Type *type, const char *external_name, SourceLocation *loc, LLVMMetadataRef scope, LLVMDIFlags flags)
{
return LLVMDIBuilderCreateReplaceableCompositeType(c->debug.builder, id_counter++,
type->name, strlen(type->name),
scope,
c->debug.file, loc ? loc->line : 0,
build_options.version,
type_size(type) * 8,
type_abi_alignment(type) * 8,
flags,
external_name,
strlen(external_name));
}
void llvm_debug_push_lexical_scope(GenContext *context, SourceSpan location)
{
SourceLocation *source_loc = TOKLOC(location.loc);
LLVMMetadataRef scope;
@@ -95,11 +241,11 @@ void gencontext_debug_push_lexical_scope(GenContext *context, SourceSpan locatio
source_loc->line,
source_loc->col);
gencontext_push_debug_scope(context, block);
llvm_debug_scope_push(context, block);
}
static LLVMMetadataRef gencontext_simple_debug_type(GenContext *context, Type *type, int dwarf_code)
static LLVMMetadataRef llvm_debug_simple_type(GenContext *context, Type *type, int dwarf_code)
{
return type->backend_debug_type = LLVMDIBuilderCreateBasicType(context->debug.builder,
type->name,
@@ -109,23 +255,227 @@ static LLVMMetadataRef gencontext_simple_debug_type(GenContext *context, Type *t
}
static LLVMMetadataRef gencontext_func_debug_type(GenContext *context, Type *type)
static LLVMMetadataRef llvm_debug_pointer_type(GenContext *c, Type *type)
{
if (!type->pointer->backend_debug_type)
{
type->backend_debug_type = llvm_debug_forward_comp(c, type, type->name, NULL, NULL, LLVMDIFlagZero);
}
LLVMMetadataRef real = LLVMDIBuilderCreatePointerType(c->debug.builder,
llvm_get_debug_type(c, type->pointer),
type_size(type) * 8,
type_abi_alignment(type) * 8, 0,
type->name, strlen(type->name));
if (type->backend_debug_type)
{
LLVMMetadataReplaceAllUsesWith(type->backend_debug_type, real);
}
return real;
}
static LLVMMetadataRef llvm_debug_enum_type(GenContext *c, Type *type, LLVMMetadataRef scope)
{
Decl *decl = type->decl;
SourceLocation *location = TOKLOC(decl->span.loc);
LLVMMetadataRef forward = llvm_debug_forward_comp(c, type, decl->external_name, location, scope, LLVMDIFlagZero);
type->backend_debug_type = forward;
Type *enum_real_type = decl->enums.type_info->type->canonical;
LLVMMetadataRef *elements = NULL;
Decl **enums = decl->enums.values;
bool is_unsigned = type_is_unsigned(enum_real_type);
VECEACH(enums, i)
{
Decl *enum_constant = enums[i];
uint64_t val = is_unsigned
? bigint_as_unsigned(&enum_constant->enum_constant.expr->const_expr.i)
: (uint64_t)bigint_as_signed(&enum_constant->enum_constant.expr->const_expr.i);
LLVMMetadataRef debug_info = LLVMDIBuilderCreateEnumerator(
c->debug.builder,
enum_constant->name, TOKLEN(enum_constant->name_token),
val,
is_unsigned);
vec_add(elements, debug_info);
}
LLVMMetadataRef real = LLVMDIBuilderCreateEnumerationType(c->debug.builder,
scope,
type->decl->name, TOKLEN(type->decl->name_token),
c->debug.file, location->line, type_size(type) * 8,
type_abi_alignment(type) * 8,
elements, vec_size(elements),
llvm_get_debug_type(c, enum_real_type));
LLVMMetadataReplaceAllUsesWith(forward, real);
return real;
}
static LLVMMetadataRef llvm_debug_structlike_type(GenContext *c, Type *type, LLVMMetadataRef scope)
{
Decl *decl = type->decl;
LLVMDIFlags flags = 0;
SourceLocation *location = TOKLOC(decl->span.loc);
// Create a forward reference in case of recursive data.
LLVMMetadataRef forward = llvm_debug_forward_comp(c, type, decl->external_name, location, scope, flags);
type->backend_debug_type = forward;
LLVMMetadataRef *elements = NULL;
Decl **members = decl->strukt.members;
VECEACH(members, i)
{
Decl *member = members[i];
SourceLocation *member_loc = TOKLOC(member->span.loc);
LLVMMetadataRef debug_info = llvm_get_debug_member(c,
member->type,
member->name ?: "",
member->offset,
member_loc,
forward,
LLVMDIFlagZero);
vec_add(elements, debug_info);
}
LLVMMetadataRef real;
if (type->type_kind == TYPE_UNION)
{
real = LLVMDIBuilderCreateUnionType(c->debug.builder,
scope,
type->decl->name ?: "", type->decl->name ? TOKLEN(type->decl->name_token) : 0,
c->debug.file, location->line, type_size(type) * 8,
type_abi_alignment(type) * 8,
LLVMDIFlagZero,
elements, vec_size(members),
c->debug.runtime_version,
type->decl->name ? decl->external_name : "",
type->decl->name ? strlen(decl->external_name) : 0);
LLVMMetadataReplaceAllUsesWith(forward, real);
return real;
}
return llvm_get_debug_struct(c, type, decl->name ? decl->external_name : "", elements, vec_size(elements), location, scope, LLVMDIFlagZero);
}
static LLVMMetadataRef llvm_debug_subarray_type(GenContext *c, Type *type)
{
LLVMMetadataRef forward = llvm_debug_forward_comp(c, type, type->name, NULL, NULL, LLVMDIFlagZero);
type->backend_debug_type = forward;
LLVMMetadataRef elements[2] = {
llvm_get_debug_member(c, type_get_ptr(type->array.base), "ptr", 0, NULL, forward, LLVMDIFlagZero),
llvm_get_debug_member(c, type_usize, "len", 0, NULL, forward, LLVMDIFlagZero)
};
return llvm_get_debug_struct(c, type, type->name, elements, 2, NULL, NULL, LLVMDIFlagZero);
}
static LLVMMetadataRef llvm_debug_errunion_type(GenContext *c, Type *type)
{
LLVMMetadataRef forward = llvm_debug_forward_comp(c, type, type->name, NULL, NULL, LLVMDIFlagZero);
type->backend_debug_type = forward;
LLVMMetadataRef elements[2] = {
llvm_get_debug_member(c, type_usize, "domain", 0, NULL, forward, LLVMDIFlagZero),
llvm_get_debug_member(c, type_usize, "err", 0, NULL, forward, LLVMDIFlagZero)
};
return llvm_get_debug_struct(c, type, type->name, elements, 2, NULL, NULL, LLVMDIFlagZero);
}
static LLVMMetadataRef llvm_debug_array_type(GenContext *c, Type *type)
{
LLVMMetadataRef *ranges = NULL;
Type *current_type = type;
while (current_type->canonical->type_kind == TYPE_ARRAY)
{
VECADD(ranges, LLVMDIBuilderGetOrCreateSubrange(c->debug.builder, 0, current_type->canonical->array.len));
current_type = current_type->canonical->array.base;
}
if (!current_type->backend_debug_type)
{
type->backend_debug_type = llvm_debug_forward_comp(c, type, type->name, NULL, NULL, LLVMDIFlagZero);
}
LLVMMetadataRef real = LLVMDIBuilderCreateArrayType(
c->debug.builder,
type_size(type),
type_abi_alignment(current_type),
llvm_get_debug_type(c, current_type),
ranges, vec_size(ranges));
if (type->backend_debug_type)
{
LLVMMetadataReplaceAllUsesWith(type->backend_debug_type, real);
}
return real;
}
static LLVMMetadataRef llvm_debug_typedef_type(GenContext *c, Type *type)
{
Decl *decl = type->decl;
// Is this a primitive typedef? If so, we create it without reference.
if (!decl)
{
return LLVMDIBuilderCreateTypedef(c->debug.builder,
llvm_get_debug_type(c, type->canonical),
type->name, strlen(type->name),
NULL, 0, NULL);
}
SourceLocation *location = TOKLOC(decl->span.loc);
// Use forward references in case we haven't resolved the original type, since we could have this:
if (!type->canonical->backend_debug_type)
{
type->backend_debug_type = llvm_debug_forward_comp(c, type, type->name, location, NULL, LLVMDIFlagZero);
}
LLVMMetadataRef real = LLVMDIBuilderCreateTypedef(c->debug.builder,
llvm_get_debug_type(c, decl->typedef_decl.type_info->type),
decl->name, TOKLEN(decl->name_token),
c->debug.file, location->line,
c->debug.file);
if (type->backend_debug_type)
{
LLVMMetadataReplaceAllUsesWith(type->backend_debug_type, real);
}
return real;
}
static LLVMMetadataRef llvm_debug_vector_type(GenContext *c, Type *type)
{
LLVMMetadataRef *ranges = NULL;
Type *current_type = type;
printf("TODO Handle Recursive Vector type\n");
while (current_type->canonical->type_kind == TYPE_VECTOR)
{
VECADD(ranges, LLVMDIBuilderGetOrCreateSubrange(c->debug.builder, 0, current_type->canonical->vector.len));
current_type = current_type->canonical->vector.base;
}
return LLVMDIBuilderCreateVectorType(
c->debug.builder,
type_size(type),
type_abi_alignment(current_type),
llvm_get_debug_type(c, current_type),
ranges, vec_size(ranges));
}
static LLVMMetadataRef llvm_debug_func_type(GenContext *c, Type *type)
{
FunctionSignature *sig = type->func.signature;
static LLVMMetadataRef *buffer = NULL;
vec_resize(buffer, 0);
vec_add(buffer, llvm_debug_type(sig->rtype->type));
vec_add(buffer, llvm_get_debug_type(c, sig->rtype->type));
VECEACH(sig->params, i)
{
vec_add(buffer, llvm_debug_type(sig->params[i]->type));
vec_add(buffer, llvm_get_debug_type(c, sig->params[i]->type));
}
return LLVMDIBuilderCreateSubroutineType(context->debug.builder,
context->debug.file,
return LLVMDIBuilderCreateSubroutineType(c->debug.builder,
c->debug.file,
buffer,
vec_size(buffer), 0);
}
LLVMMetadataRef gencontext_get_debug_type(GenContext *context, Type *type)
static inline LLVMMetadataRef llvm_get_debug_type_internal(GenContext *c, Type *type, LLVMMetadataRef scope)
{
if (type->backend_debug_type) return type->backend_debug_type;
// Consider special handling of UTF8 arrays.
@@ -139,63 +489,60 @@ LLVMMetadataRef gencontext_get_debug_type(GenContext *context, Type *type)
case TYPE_MEMBER:
UNREACHABLE
case TYPE_BOOL:
return gencontext_simple_debug_type(context, type, DW_ATE_boolean);
return llvm_debug_simple_type(c, type, DW_ATE_boolean);
case TYPE_I8:
return gencontext_simple_debug_type(context, type, DW_ATE_signed_char); // DW_ATE_UTF?
return llvm_debug_simple_type(c, type, DW_ATE_signed_char); // DW_ATE_UTF?
case TYPE_U8:
return gencontext_simple_debug_type(context, type, DW_ATE_unsigned_char);
return llvm_debug_simple_type(c, type, DW_ATE_unsigned_char);
case TYPE_I16:
case TYPE_I32:
case TYPE_I64:
return gencontext_simple_debug_type(context, type, DW_ATE_signed);
case TYPE_I128:
return llvm_debug_simple_type(c, type, DW_ATE_signed);
case TYPE_U16:
case TYPE_U32:
case TYPE_U64:
return gencontext_simple_debug_type(context, type, DW_ATE_unsigned);
case TYPE_U128:
return llvm_debug_simple_type(c, type, DW_ATE_unsigned);
case TYPE_F16:
case TYPE_F32:
case TYPE_F64:
return gencontext_simple_debug_type(context, type, DW_ATE_float);
case TYPE_F128:
return llvm_debug_simple_type(c, type, DW_ATE_float);
case TYPE_COMPLEX:
return llvm_debug_simple_type(c, type, DW_ATE_complex_float);
case TYPE_VECTOR:
return type->backend_debug_type = llvm_debug_vector_type(c, type);
case TYPE_VOID:
return NULL;
case TYPE_POINTER:
return type->backend_debug_type = LLVMDIBuilderCreatePointerType(context->debug.builder, type->pointer->backend_debug_type, type_size(type->canonical->pointer), 0, 0, type->name, strlen(type->name));
return type->backend_debug_type = llvm_debug_pointer_type(c, type);
case TYPE_ENUM:
TODO
case TYPE_ERRTYPE:
TODO
return type->backend_debug_type = llvm_debug_enum_type(c, type, scope);
case TYPE_FUNC:
return type->backend_debug_type = gencontext_func_debug_type(context, type);
return type->backend_debug_type = llvm_debug_func_type(c, type);
case TYPE_ERRTYPE:
case TYPE_STRUCT:
// LLVMDIBuilderCreateStructType(context->debug.builder, NULL, type->decl->name, strlen(type->decl->name), type->decl->module->)
TODO
case TYPE_UNION:
TODO
return type->backend_debug_type = llvm_debug_structlike_type(c, type, scope);
case TYPE_TYPEDEF:
TODO
return type->backend_debug_type = llvm_debug_typedef_type(c, type);
case TYPE_STRING:
TODO
case TYPE_ARRAY:
{
LLVMMetadataRef *ranges = NULL;
Type *current_type = type;
while (current_type->canonical->type_kind == TYPE_ARRAY)
{
VECADD(ranges, LLVMDIBuilderGetOrCreateSubrange(context->debug.builder, 0, current_type->canonical->array.len));
current_type = current_type->canonical->array.base;
}
return type->backend_debug_type = LLVMDIBuilderCreateArrayType(
context->debug.builder,
type->array.len,
type_abi_alignment(type->array.base),
llvm_debug_type(type->array.base),
ranges, vec_size(ranges));
}
return type->backend_debug_type = llvm_debug_array_type(c, type);
case TYPE_VARARRAY:
TODO
case TYPE_SUBARRAY:
TODO
return type->backend_debug_type = llvm_debug_subarray_type(c, type);
case TYPE_ERR_UNION:
TODO
return type->backend_debug_type = llvm_debug_errunion_type(c, type);
}
UNREACHABLE
}
LLVMMetadataRef llvm_get_debug_type(GenContext *c, Type *type)
{
// All types should be generated in the outer scope.
return llvm_get_debug_type_internal(c, type, c->debug.file);
}

File diff suppressed because it is too large Load Diff

View File

@@ -6,7 +6,12 @@
#include "llvm_codegen_internal.h"
#include "bigint.h"
bool gencontext_check_block_branch_emit(GenContext *context)
static void llvm_emit_param_attributes(GenContext *context, LLVMValueRef function, ABIArgInfo *info, bool is_return, int index, int last_index);
static inline void llvm_emit_return_value(GenContext *context, LLVMValueRef value);
static void llvm_expand_from_args(GenContext *c, Type *type, LLVMValueRef ref, unsigned *index);
static inline void llvm_process_parameter_value(GenContext *c, Decl *decl, unsigned *index);
bool llvm_emit_check_block_branch(GenContext *context)
{
if (!context->current_block) return false;
// If it's not used, we can delete the previous block and skip the branch.
@@ -43,24 +48,27 @@ bool gencontext_check_block_branch_emit(GenContext *context)
return true;
};
void gencontext_emit_br(GenContext *context, LLVMBasicBlockRef next_block)
void llvm_emit_br(GenContext *c, LLVMBasicBlockRef next_block)
{
if (!gencontext_check_block_branch_emit(context)) return;
context->current_block = NULL;
LLVMBuildBr(context->builder, next_block);
if (!llvm_emit_check_block_branch(c)) return;
c->current_block = NULL;
LLVMBuildBr(c->builder, next_block);
}
void gencontext_emit_cond_br(GenContext *context, LLVMValueRef value, LLVMBasicBlockRef thenBlock, LLVMBasicBlockRef elseBlock)
void llvm_emit_cond_br(GenContext *context, BEValue *value, LLVMBasicBlockRef then_block, LLVMBasicBlockRef else_block)
{
assert(context->current_block);
LLVMBuildCondBr(context->builder, value, thenBlock, elseBlock);
assert(value->kind == BE_BOOLEAN);
LLVMBuildCondBr(context->builder, value->value, then_block, else_block);
LLVMClearInsertionPosition(context->builder);
context->current_block = NULL;
context->current_block_is_target = false;
}
void gencontext_emit_block(GenContext *context, LLVMBasicBlockRef next_block)
void llvm_emit_block(GenContext *context, LLVMBasicBlockRef next_block)
{
assert(context->current_block == NULL);
LLVMAppendExistingBasicBlock(context->function, next_block);
@@ -69,61 +77,233 @@ void gencontext_emit_block(GenContext *context, LLVMBasicBlockRef next_block)
context->current_block_is_target = false;
}
static void llvm_expand_from_args(GenContext *c, Type *type, LLVMValueRef ref, unsigned *index)
{
switch (type->type_kind)
{
case TYPE_ARRAY:
for (unsigned i = 0; i < type->array.len; i++)
{
LLVMValueRef indices[2] = { llvm_get_zero(c, type_uint), llvm_const_int(c, type_uint, i) };
LLVMValueRef target = LLVMBuildInBoundsGEP2(c->builder, llvm_get_type(c, type), ref, indices, 2, "");
LLVMValueRef cast_addr = llvm_emit_bitcast(c, target, type_get_ptr(type->array.base));
llvm_expand_from_args(c, type->array.base, cast_addr, index);
}
return;
case TYPE_STRUCT:
{
Decl **members = type->decl->strukt.members;
VECEACH(members, i)
{
LLVMValueRef indices[2] = { llvm_get_zero(c, type_uint), llvm_const_int(c, type_uint, i) };
LLVMValueRef target = LLVMBuildInBoundsGEP2(c->builder, llvm_get_type(c, type), ref, indices, 2, "");
LLVMValueRef cast_addr = llvm_emit_bitcast(c, target, type_get_ptr(members[i]->type));
llvm_expand_from_args(c, members[i]->type, cast_addr, index);
}
return;
}
case TYPE_UNION:
{
Type *largest_type = type_find_largest_union_element(type);
LLVMValueRef cast_addr = llvm_emit_bitcast(c, ref, type_get_ptr(largest_type));
llvm_expand_from_args(c, largest_type, cast_addr, index);
return;
}
default:
LLVMBuildStore(c->builder, llvm_get_next_param(c, index), ref);
return;
}
}
static inline void gencontext_emit_parameter(GenContext *context, Decl *decl, unsigned index)
LLVMValueRef llvm_get_next_param(GenContext *context, unsigned *index)
{
return LLVMGetParam(context->function, (*index)++);
}
static inline void llvm_process_parameter_value(GenContext *c, Decl *decl, unsigned *index)
{
ABIArgInfo *info = decl->var.abi_info;
switch (info->kind)
{
case ABI_ARG_IGNORE:
return;
case ABI_ARG_INDIRECT:
{
// A simple memcopy, with alignment respected.
LLVMValueRef pointer = llvm_get_next_param(c, index);
llvm_emit_memcpy_to_decl(c, decl, pointer, info->indirect.realignment);
return;
}
case ABI_ARG_DIRECT_PAIR:
{
// Here we do the following transform:
// lo, hi -> { lo, hi } -> struct
LLVMTypeRef lo = llvm_abi_type(c, info->direct_pair.lo);
LLVMTypeRef hi = llvm_abi_type(c, info->direct_pair.hi);
LLVMTypeRef struct_type = gencontext_get_twostruct(c, lo, hi);
unsigned decl_alignment = decl_abi_alignment(decl);
// Cast to { lo, hi }
LLVMValueRef cast = LLVMBuildBitCast(c->builder, decl->backend_ref, LLVMPointerType(struct_type, 0), "pair");
// Point to the lo value.
LLVMValueRef lo_ptr = LLVMBuildStructGEP2(c->builder, struct_type, cast, 0, "lo");
// Store it in the struct.
unsigned lo_alignment = MIN(llvm_abi_alignment(lo), decl_alignment);
llvm_store_aligned(c, lo_ptr, llvm_get_next_param(c, index), lo_alignment);
// Point to the hi value.
LLVMValueRef hi_ptr = LLVMBuildStructGEP2(c->builder, struct_type, cast, 1, "hi");
// Store it in the struct.
unsigned hi_alignment = MIN(llvm_abi_alignment(hi), decl_alignment);
llvm_store_aligned(c, hi_ptr, llvm_get_next_param(c, index), decl_alignment);
return;
}
case ABI_ARG_DIRECT_COERCE:
{
LLVMTypeRef coerce_type = llvm_get_coerce_type(c, info);
if (!coerce_type || coerce_type == llvm_get_type(c, decl->type))
{
llvm_store_aligned_decl(c, decl, llvm_get_next_param(c, index));
return;
}
// Cast to the coerce type.
LLVMValueRef cast = LLVMBuildBitCast(c->builder, decl->backend_ref, LLVMPointerType(coerce_type, 0), "coerce");
unsigned decl_alignment = decl_abi_alignment(decl);
// If we're not flattening, we simply do a store.
if (!abi_info_should_flatten(info))
{
LLVMValueRef param = llvm_get_next_param(c, index);
// Store it with the alignment of the decl.
llvm_store_aligned_decl(c, decl, param);
return;
}
// In this case we've been flattening the parameter into multiple registers.
LLVMTypeRef element_type = llvm_abi_type(c, info->direct_coerce.type);
// Store each expanded parameter.
for (unsigned idx = 0; idx < info->direct_coerce.elements; idx++)
{
LLVMValueRef element_ptr = LLVMBuildStructGEP2(c->builder, coerce_type, cast, idx, "");
LLVMValueRef value = llvm_get_next_param(c, index);
llvm_store_aligned(c, element_ptr, value, MIN(llvm_abi_alignment(element_type), decl_alignment));
}
return;
}
case ABI_ARG_EXPAND:
{
llvm_expand_from_args(c, decl->type, decl->backend_ref, index);
if (info->expand.padding_type)
{
// Skip the pad.
llvm_get_next_param(c, index);
}
}
}
}
static inline void llvm_emit_parameter(GenContext *context, Decl *decl, unsigned *index, unsigned real_index)
{
assert(decl->decl_kind == DECL_VAR && decl->var.kind == VARDECL_PARAM);
// Allocate room on stack and copy.
const char *name = decl->name ? decl->name : "anon";
decl->backend_ref = gencontext_emit_alloca(context, llvm_type(decl->type), name);
if (gencontext_use_debug(context))
// Allocate room on stack, but do not copy.
decl->backend_ref = llvm_emit_decl_alloca(context, decl);
llvm_process_parameter_value(context, decl, index);
if (llvm_use_debug(context))
{
SourceLocation *loc = TOKLOC(decl->span.loc);
LLVMMetadataRef var = LLVMDIBuilderCreateParameterVariable(
context->debug.builder,
context->debug.function,
name,
strlen(name),
index + 1,
context->debug.file,
loc->line,
gencontext_get_debug_type(context, decl->type),
true, 0 /* flags */
);
decl->var.backend_debug_ref = var;
LLVMDIBuilderInsertDeclareAtEnd(context->debug.builder,
decl->backend_ref, var, LLVMDIBuilderCreateExpression(context->debug.builder, NULL, 0),
LLVMDIBuilderCreateDebugLocation(context->context, loc->line, loc->col, context->debug.function, /* inline at */NULL),
LLVMGetInsertBlock(context->builder));
llvm_emit_debug_parameter(context, decl, real_index);
}
gencontext_emit_store(context, decl, LLVMGetParam(context->function, index));
}
void gencontext_emit_implicit_return(GenContext *context)
static inline void llvm_emit_return_value(GenContext *context, LLVMValueRef value)
{
if (context->cur_func_decl->func.function_signature.failable)
if (!value)
{
LLVMBuildRet(context->builder, gencontext_emit_no_error_union(context));
LLVMBuildRetVoid(context->builder);
}
else
{
if (context->cur_func_decl->func.function_signature.rtype->type != type_void && !context->cur_func_decl->func.function_signature.return_param)
LLVMBuildRet(context->builder, value);
}
context->current_block = NULL;
context->current_block_is_target = false;
}
void llvm_emit_return_abi(GenContext *c, LLVMValueRef return_value, LLVMValueRef failable)
{
FunctionSignature *signature = &c->cur_func_decl->func.function_signature;
ABIArgInfo *info = signature->ret_abi_info;
// If we have a failable it's always the return argument, so we need to copy
// the return value into the return value holder.
LLVMValueRef return_out = c->return_out;
Type *return_type = signature->rtype->type;
// In this case we use the failable as the actual return.
if (signature->failable)
{
if (return_value)
{
LLVMBuildUnreachable(context->builder);
LLVMBuildStore(c->builder, return_value, c->return_out);
}
return_out = c->failable_out;
return_type = type_error;
return_value = failable;
info = signature->failable_abi_info;
}
switch (info->kind)
{
case ABI_ARG_INDIRECT:
LLVMBuildStore(c->builder, return_value, return_out);
if (info->indirect.realignment) TODO
llvm_emit_return_value(c, NULL);
return;
case ABI_ARG_IGNORE:
llvm_emit_return_value(c, NULL);
return;
case ABI_ARG_EXPAND:
// Expands to multiple slots -
// Not applicable to return values.
UNREACHABLE
case ABI_ARG_DIRECT_PAIR:
case ABI_ARG_DIRECT_COERCE:
{
LLVMTypeRef coerce_type = llvm_get_coerce_type(c, info);
if (!coerce_type || coerce_type == llvm_get_type(c, return_type))
{
// The normal return
llvm_emit_return_value(c, return_value);
return;
}
assert(!abi_info_should_flatten(info));
llvm_emit_return_value(c,
gencontext_emit_convert_value_to_coerced(c,
coerce_type,
return_value,
return_type));
return;
}
LLVMBuildRetVoid(context->builder);
}
}
void gencontext_emit_function_body(GenContext *context, Decl *decl)
void llvm_emit_return_implicit(GenContext *c)
{
if (c->cur_func_decl->func.function_signature.rtype->type != type_void)
{
LLVMBuildUnreachable(c->builder);
return;
}
LLVMValueRef failable = c->cur_func_decl->func.function_signature.failable ?
LLVMConstNull(llvm_get_type(c, type_error)) : NULL;
llvm_emit_return_abi(c, NULL, failable);
}
void llvm_emit_function_body(GenContext *context, Decl *decl)
{
DEBUG_LOG("Generating function %s.", decl->external_name);
assert(decl->backend_ref);
bool emit_debug = gencontext_use_debug(context);
bool emit_debug = llvm_use_debug(context);
LLVMValueRef prev_function = context->function;
LLVMBuilderRef prev_builder = context->builder;
@@ -149,9 +329,22 @@ void gencontext_emit_function_body(GenContext *context, Decl *decl)
context->alloca_point = alloca_point;
FunctionSignature *signature = &decl->func.function_signature;
int arg = 0;
unsigned arg = 0;
if (signature->return_param)
if (emit_debug)
{
llvm_debug_scope_push(context, context->debug.function);
}
if (signature->failable && signature->failable_abi_info->kind == ABI_ARG_INDIRECT)
{
context->failable_out = LLVMGetParam(context->function, arg++);
}
else
{
context->failable_out = NULL;
}
if (signature->ret_abi_info && signature->ret_abi_info->kind == ABI_ARG_INDIRECT)
{
context->return_out = LLVMGetParam(context->function, arg++);
}
@@ -160,23 +353,18 @@ void gencontext_emit_function_body(GenContext *context, Decl *decl)
context->return_out = NULL;
}
if (emit_debug)
{
gencontext_push_debug_scope(context, context->debug.function);
}
// Generate LLVMValueRef's for all parameters, so we can use them as local vars in code
VECEACH(decl->func.function_signature.params, i)
{
gencontext_emit_parameter(context, decl->func.function_signature.params[i], arg++);
llvm_emit_parameter(context, decl->func.function_signature.params[i], &arg, i);
}
LLVMSetCurrentDebugLocation2(context->builder, NULL);
VECEACH(decl->func.body->compound_stmt.stmts, i)
{
gencontext_emit_stmt(context, decl->func.body->compound_stmt.stmts[i]);
llvm_emit_stmt(context, decl->func.body->compound_stmt.stmts[i]);
}
if (context->current_block && !LLVMGetFirstInstruction(context->current_block) && !LLVMGetFirstUse(LLVMBasicBlockAsValue(context->current_block)))
@@ -190,8 +378,8 @@ void gencontext_emit_function_body(GenContext *context, Decl *decl)
if (context->current_block && !LLVMGetBasicBlockTerminator(context->current_block))
{
assert(!decl->func.body->compound_stmt.defer_list.end);
gencontext_emit_defer(context, decl->func.body->compound_stmt.defer_list.start, 0);
gencontext_emit_implicit_return(context);
llvm_emit_defer(context, decl->func.body->compound_stmt.defer_list.start, 0);
llvm_emit_return_implicit(context);
}
// erase alloca point
@@ -203,40 +391,116 @@ void gencontext_emit_function_body(GenContext *context, Decl *decl)
LLVMDisposeBuilder(context->builder);
if (gencontext_use_debug(context))
if (llvm_use_debug(context))
{
gencontext_pop_debug_scope(context);
llvm_debug_scope_pop(context);
}
context->builder = prev_builder;
context->function = prev_function;
}
void gencontext_emit_function_decl(GenContext *context, Decl *decl)
static void llvm_emit_param_attributes(GenContext *context, LLVMValueRef function, ABIArgInfo *info, bool is_return, int index, int last_index)
{
assert(last_index == index || info->kind == ABI_ARG_DIRECT_PAIR || info->kind == ABI_ARG_IGNORE
|| info->kind == ABI_ARG_EXPAND);
if (info->attributes.zeroext)
{
// Direct only
assert(index == last_index);
llvm_attribute_add(context, function, attribute_zext, index);
}
if (info->attributes.signext)
{
// Direct only
assert(index == last_index);
llvm_attribute_add(context, function, attribute_sext, index);
}
if (info->attributes.by_reg)
{
llvm_attribute_add_range(context, function, attribute_inreg, index, last_index);
}
switch (info->kind)
{
case ABI_ARG_EXPAND:
case ABI_ARG_IGNORE:
case ABI_ARG_DIRECT_COERCE:
case ABI_ARG_DIRECT_PAIR:
break;
case ABI_ARG_INDIRECT:
if (info->indirect.realignment)
{
llvm_attribute_add_int(context, function, attribute_align, info->indirect.realignment, index);
}
if (is_return)
{
llvm_attribute_add(context, function, attribute_sret, index);
}
else
{
// TODO then type attributes are added to LLVM-C, use that for byval.
if (info->indirect.by_val) llvm_attribute_add(context, function, attribute_byval, index);
llvm_attribute_add(context, function, attribute_noalias, index);
}
break;
}
}
void llvm_emit_function_decl(GenContext *c, Decl *decl)
{
assert(decl->decl_kind == DECL_FUNC);
// Resolve function backend type for function.
LLVMValueRef function = LLVMAddFunction(context->module, decl->cname ?: decl->external_name, llvm_type(decl->type));
LLVMValueRef function = LLVMAddFunction(c->module, decl->cname ?: decl->external_name, llvm_get_type(c, decl->type));
decl->backend_ref = function;
if (decl->func.function_signature.return_param)
FunctionSignature *signature = &decl->func.function_signature;
FunctionSignature *type_signature = decl->type->func.signature;
// We only resolve 1 function signature, so we might have functions
// with the same signature (but different default values!)
// that we have in common. So overwrite the data from the type here.
if (signature != type_signature)
{
if (!decl->func.function_signature.failable)
// Store the params.
Decl **params = signature->params;
// Copy the rest.
*signature = *type_signature;
signature->params = params;
VECEACH(params, i)
{
gencontext_add_attribute(context, function, sret_attribute, 1);
Decl *sig_param = type_signature->params[i];
Decl *param = params[i];
param->var.abi_info = sig_param->var.abi_info;
}
gencontext_add_attribute(context, function, noalias_attribute, 1);
signature->params = params;
}
ABIArgInfo *ret_abi_info = signature->failable_abi_info ?: signature->ret_abi_info;
llvm_emit_param_attributes(c, function, ret_abi_info, true, 0, 0);
Decl **params = signature->params;
if (signature->failable_abi_info && signature->ret_abi_info)
{
ABIArgInfo *info = signature->ret_abi_info;
llvm_emit_param_attributes(c, function, info, false, info->param_index_start + 1, info->param_index_end);
}
VECEACH(params, i)
{
Decl *param = params[i];
ABIArgInfo *info = param->var.abi_info;
llvm_emit_param_attributes(c, function, info, false, info->param_index_start + 1, info->param_index_end);
}
if (decl->func.attr_inline)
{
gencontext_add_attribute(context, function, alwaysinline_attribute, -1);
llvm_attribute_add(c, function, attribute_alwaysinline, -1);
}
if (decl->func.attr_noinline)
{
gencontext_add_attribute(context, function, noinline_attribute, -1);
llvm_attribute_add(c, function, attribute_noinline, -1);
}
if (decl->func.attr_noreturn)
{
gencontext_add_attribute(context, function, noreturn_attribute, -1);
llvm_attribute_add(c, function, attribute_noreturn, -1);
}
if (decl->alignment)
{
@@ -246,7 +510,7 @@ void gencontext_emit_function_decl(GenContext *context, Decl *decl)
{
LLVMSetSection(function, decl->section);
}
gencontext_add_attribute(context, function, nounwind_attribute, -1);
llvm_attribute_add(c, function, attribute_nounwind, -1);
if (decl->func.attr_stdcall && (build_target.os == OS_TYPE_WIN32))
{
@@ -270,66 +534,36 @@ void gencontext_emit_function_decl(GenContext *context, Decl *decl)
LLVMSetVisibility(function, LLVMDefaultVisibility);
break;;
}
if (context->debug.builder)
if (llvm_use_debug(c))
{
LLVMDIFlags flags = LLVMDIFlagZero;
if (!decl->func.body) flags |= LLVMDIFlagPrototyped;
switch (decl->visibility)
{
case VISIBLE_LOCAL:
case VISIBLE_EXTERN:
flags |= LLVMDIFlagPrivate;
break;
case VISIBLE_MODULE:
flags |= LLVMDIFlagProtected;
break;
case VISIBLE_PUBLIC:
flags |= LLVMDIFlagPublic;
break;
}
flags |= LLVMDIFlagPrototyped;
SourceLocation *loc = TOKILOC(decl->span.loc);
context->debug.function = LLVMDIBuilderCreateFunction(context->debug.builder,
context->debug.file,
decl->name, TOKILEN(decl->name_token),
decl->external_name, strlen(decl->external_name),
context->debug.file,
loc->line,
llvm_debug_type(decl->type),
decl->visibility == VISIBLE_LOCAL,
true,
loc->line,
flags,
build_options.optimization_level != OPTIMIZATION_NONE);
LLVMSetSubprogram(function, context->debug.function);
llvm_emit_debug_function(c, decl);
}
}
void gencontext_emit_extern_decl(GenContext *context, Decl *decl)
void llvm_emit_extern_decl(GenContext *context, Decl *decl)
{
switch (decl->decl_kind)
{
case DECL_POISONED:
case DECL_TYPEDEF:
UNREACHABLE;
case DECL_FUNC:
decl->backend_ref = LLVMAddFunction(context->module, decl->cname ?: decl->external_name,
llvm_type(decl->type));
llvm_get_type(context, decl->type));
LLVMSetVisibility(decl->backend_ref, LLVMDefaultVisibility);
break;
case DECL_VAR:
decl->backend_ref = LLVMAddGlobal(context->module, llvm_type(decl->type), decl->cname ?: decl->external_name);
decl->backend_ref = LLVMAddGlobal(context->module, llvm_get_type(context, decl->type), decl->cname ?: decl->external_name);
LLVMSetVisibility(decl->backend_ref, LLVMDefaultVisibility);
break;
case DECL_TYPEDEF:
UNREACHABLE
case DECL_ENUM_CONSTANT:
TODO
case DECL_STRUCT:
case DECL_UNION:
case DECL_ERR:
llvm_type(decl->type);
llvm_get_type(context, decl->type);
TODO // Fix typeid
break;
case DECL_ENUM:
@@ -338,4 +572,3 @@ void gencontext_emit_extern_decl(GenContext *context, Decl *decl)
UNREACHABLE
}
}

View File

@@ -21,6 +21,23 @@
#include <llvm-c/Comdat.h>
#include "dwarf.h"
typedef enum
{
BE_VALUE,
BE_ADDRESS,
BE_ADDRESS_FAILABLE,
BE_BOOLEAN,
} BackendValueKind;
typedef struct
{
BackendValueKind kind : 5;
unsigned alignment : 16;
Type *type;
LLVMValueRef value;
LLVMValueRef failable;
} BEValue;
typedef struct
{
LLVMBasicBlockRef continue_block;
@@ -28,14 +45,78 @@ typedef struct
LLVMBasicBlockRef next_block;
} BreakContinue;
typedef struct
typedef enum
{
Decl *decl;
LLVMBasicBlockRef catch_block;
} Catch;
ABI_ARG_IGNORE,
ABI_ARG_DIRECT_PAIR,
ABI_ARG_DIRECT_COERCE,
ABI_ARG_INDIRECT,
ABI_ARG_EXPAND,
} ABIKind;
typedef enum
{
ABI_TYPE_PLAIN,
ABI_TYPE_INT_BITS
} AbiTypeKind;
typedef struct
{
AbiTypeKind kind : 2;
union
{
Type *type;
unsigned int_bits;
};
} AbiType;
typedef struct ABIArgInfo_
{
unsigned param_index_start : 16;
unsigned param_index_end : 16;
ABIKind kind : 6;
struct
{
bool by_reg : 1;
bool zeroext : 1;
bool signext : 1;
} attributes;
union
{
struct
{
bool padding_by_reg : 1;
Type *padding_type;
} expand;
struct
{
AbiType *lo;
AbiType *hi;
} direct_pair;
struct
{
AbiType *partial_type;
};
struct
{
AbiType *type;
unsigned elements : 3;
bool prevent_flatten : 1;
} direct_coerce;
struct
{
// We may request a certain alignment of the parameters.
unsigned realignment : 16;
bool by_val : 1;
} indirect;
};
} ABIArgInfo;
typedef struct
{
unsigned runtime_version : 8;
LLVMDIBuilderRef builder;
LLVMMetadataRef file;
LLVMMetadataRef compile_unit;
@@ -55,8 +136,10 @@ typedef struct
LLVMBuilderRef builder;
LLVMBasicBlockRef current_block;
LLVMBasicBlockRef catch_block;
// The recipient of the error value in a catch(err = ...) expression.
LLVMValueRef error_var;
LLVMTypeRef bool_type;
LLVMTypeRef byte_type;
Decl *cur_code_decl;
Decl *cur_func_decl;
TypeInfo *current_return_type;
@@ -70,87 +153,172 @@ typedef struct
DebugContext debug;
Context *ast_context;
LLVMValueRef return_out;
LLVMValueRef failable_out;
LLVMBasicBlockRef error_exit_block;
LLVMBasicBlockRef expr_block_exit;
bool current_block_is_target : 1;
bool did_call_stack_save : 1;
LLVMTypeRef type_data_definitions[TYPE_KINDS];
struct
{
unsigned int_registers;
unsigned sse_registers;
unsigned simd_registers;
int args;
CallConvention call_convention;
} abi;
} GenContext;
extern unsigned sadd_overflow_intrinsic_id;
extern unsigned uadd_overflow_intrinsic_id;
extern unsigned ssub_overflow_intrinsic_id;
extern unsigned usub_overflow_intrinsic_id;
extern unsigned smul_overflow_intrinsic_id;
extern unsigned umul_overflow_intrinsic_id;
extern unsigned trap_intrinsic_id;
extern unsigned assume_intrinsic_id;
// LLVM Intrinsics
extern unsigned intrinsic_id_sadd_overflow;
extern unsigned intrinsic_id_uadd_overflow;
extern unsigned intrinsic_id_ssub_overflow;
extern unsigned intrinsic_id_usub_overflow;
extern unsigned intrinsic_id_smul_overflow;
extern unsigned intrinsic_id_umul_overflow;
extern unsigned intrinsic_id_trap;
extern unsigned intrinsic_id_assume;
// No function inlining
extern unsigned noinline_attribute;
// Force inlining
extern unsigned alwaysinline_attribute;
// "Inline possibly"
extern unsigned inlinehint_attribute;
// No function return
extern unsigned noreturn_attribute;
// No exceptions
extern unsigned nounwind_attribute;
// Argument (no writes through the pointer) or function (no writes)
extern unsigned writeonly_attribute;
// Argument (no reads through the pointer) or function (no reads)
extern unsigned readonly_attribute;
// Disable optimization.
extern unsigned optnone_attribute;
// Sret (pointer)
extern unsigned sret_attribute;
// Noalias (pointer)
extern unsigned noalias_attribute;
// LLVM Attributes
extern unsigned attribute_noinline; // No function inlining
extern unsigned attribute_alwaysinline; // Force inlining
extern unsigned attribute_inlinehint; // "Inline possibly"
extern unsigned attribute_noreturn; // No function return
extern unsigned attribute_nounwind; // No exceptions
extern unsigned attribute_writeonly; // No writes on pointer
extern unsigned attribute_readonly; // No reads on pointer
extern unsigned attribute_optnone; // Disable optimization.
extern unsigned attribute_sret; // struct return pointer
extern unsigned attribute_align; // align
extern unsigned attribute_noalias; // noalias (pointer)
extern unsigned attribute_zext; // zero extend
extern unsigned attribute_sext; // sign extend
extern unsigned attribute_byval; // ByVal (param)
extern unsigned attribute_inreg; // inreg (param)
void gencontext_begin_module(GenContext *context);
void gencontext_end_module(GenContext *context);
// BE value
void llvm_value_addr(GenContext *c, BEValue *value);
static inline bool llvm_value_is_addr(BEValue *value) { return value->kind == BE_ADDRESS || value->kind == BE_ADDRESS_FAILABLE; }
static inline bool llvm_value_is_bool(BEValue *value) { return value->kind == BE_BOOLEAN; }
bool llvm_value_is_const(BEValue *value);
void llvm_value_rvalue(GenContext *context, BEValue *value);
void llvm_value_set_bool(BEValue *value, LLVMValueRef llvm_value);
void llvm_value_set(BEValue *value, LLVMValueRef llvm_value, Type *type);
void llvm_value_set_address_align(BEValue *value, LLVMValueRef llvm_value, Type *type, unsigned alignment);
void llvm_value_set_address(BEValue *value, LLVMValueRef llvm_value, Type *type);
void
gencontext_add_attribute(GenContext *context, LLVMValueRef value_to_add_attribute_to, unsigned attribute_id, int index);
void gencontext_emit_stmt(GenContext *context, Ast *ast);
LLVMValueRef bevalue_store_value(GenContext *c, BEValue *value);
void gencontext_generate_catch_block_if_needed(GenContext *context, Ast *ast);
LLVMValueRef
gencontext_emit_call_intrinsic(GenContext *context, unsigned intrinsic_id, LLVMTypeRef *types, unsigned type_count,
LLVMValueRef *values, unsigned arg_count);
void gencontext_emit_panic_on_true(GenContext *context, LLVMValueRef value, const char *panic_name);
void gencontext_emit_defer(GenContext *context, AstId defer_start, AstId defer_end);
LLVMTypeRef llvm_abi_type(GenContext *c, AbiType *type);
unsigned llvm_abi_size(LLVMTypeRef type);
unsigned llvm_abi_alignment(LLVMTypeRef type);
void llvm_attribute_add_range(GenContext *c, LLVMValueRef value_to_add_attribute_to, unsigned attribute_id, int index_start, int index_end);
void llvm_attribute_add(GenContext *c, LLVMValueRef value_to_add_attribute_to, unsigned attribute_id, int index);
void llvm_attribute_add_string(GenContext *c, LLVMValueRef value_to_add_attribute_to, const char *attribute, const char *value, int index);
void llvm_attribute_add_int(GenContext *c, LLVMValueRef value_to_add_attribute_to, unsigned attribute_id, uint64_t val, int index);
LLVMBasicBlockRef llvm_basic_block_new(GenContext *c, const char *name);
static inline LLVMValueRef llvm_const_int(GenContext *c, Type *type, uint64_t val);
LLVMValueRef gencontext_emit_expr(GenContext *context, Expr *expr);
LLVMValueRef gencontext_emit_assign_expr(GenContext *context, LLVMValueRef ref, Expr *expr, LLVMValueRef failable);
LLVMMetadataRef gencontext_get_debug_type(GenContext *context, Type *type);
void gencontext_emit_debug_location(GenContext *context, SourceSpan location);
void gencontext_debug_push_lexical_scope(GenContext *context, SourceSpan location);
void gencontext_push_debug_scope(GenContext *context, LLVMMetadataRef debug_scope);
void gencontext_pop_debug_scope(GenContext *context);
LLVMValueRef llvm_emit_alloca(GenContext *context, LLVMTypeRef type, unsigned alignment, const char *name);
LLVMValueRef llvm_emit_alloca_aligned(GenContext *c, Type *type, const char *name);
LLVMValueRef llvm_emit_assign_expr(GenContext *context, LLVMValueRef ref, Expr *expr, LLVMValueRef failable);
static inline LLVMValueRef llvm_emit_bitcast(GenContext *context, LLVMValueRef value, Type *type);
void llvm_emit_block(GenContext *c, LLVMBasicBlockRef next_block);
void llvm_emit_br(GenContext *c, LLVMBasicBlockRef next_block);
void llvm_emit_compound_stmt(GenContext *context, Ast *ast);
LLVMValueRef llvm_emit_convert_value_from_coerced(GenContext *context, LLVMTypeRef coerced, LLVMValueRef value, Type *original_type);
LLVMValueRef llvm_emit_decl_alloca(GenContext *c, Decl *decl);
void llvm_emit_function_body(GenContext *context, Decl *decl);
void llvm_emit_function_decl(GenContext *c, Decl *decl);
LLVMValueRef llvm_emit_call_intrinsic(GenContext *c, unsigned intrinsic_id, LLVMTypeRef *types, unsigned type_count, LLVMValueRef *values, unsigned arg_count);
void llvm_emit_cast(GenContext *c, CastKind cast_kind, BEValue *value, Type *to_type, Type *from_type);
void llvm_emit_cond_br(GenContext *context, BEValue *value, LLVMBasicBlockRef then_block, LLVMBasicBlockRef else_block);
void llvm_emit_debug_function(GenContext *c, Decl *decl);
void llvm_emit_debug_location(GenContext *context, SourceSpan location);
void llvm_emit_debug_parameter(GenContext *c, Decl *parameter, unsigned index);
void llvm_emit_debug_local_var(GenContext *c, Decl *var);
void llvm_emit_debug_global_var(GenContext *c, Decl *global);
void llvm_emit_defer(GenContext *c, AstId defer_start, AstId defer_end);
void llvm_emit_extern_decl(GenContext *context, Decl *decl);
LLVMValueRef llvm_emit_is_no_error(GenContext *c, LLVMValueRef error);
LLVMValueRef llvm_emit_load_aligned(GenContext *c, LLVMTypeRef type, LLVMValueRef pointer, unsigned alignment, const char *name);
void llvm_emit_expr(GenContext *c, BEValue *value, Expr *expr);
LLVMValueRef llvm_emit_memclear_size_align(GenContext *c, LLVMValueRef ref, uint64_t size, unsigned align, bool bitcast);
LLVMValueRef llvm_emit_memclear(GenContext *c, LLVMValueRef ref, Type *type);
void llvm_emit_memcpy_to_decl(GenContext *c, Decl *decl, LLVMValueRef source, unsigned source_alignment);
void llvm_emit_stmt(GenContext *c, Ast *ast);
static inline LLVMValueRef llvm_emit_store(GenContext *context, Decl *decl, LLVMValueRef value);
void llvm_emit_panic_on_true(GenContext *c, LLVMValueRef value, const char *panic_name);
void llvm_emit_return_abi(GenContext *c, LLVMValueRef return_value, LLVMValueRef failable);
void llvm_emit_return_implicit(GenContext *c);
LLVMMetadataRef gencontext_create_builtin_debug_type(GenContext *context, Type *builtin_type);
LLVMValueRef gencontext_emit_alloca(GenContext *context, LLVMTypeRef type, const char *name);
void gencontext_emit_compound_stmt(GenContext *context, Ast *ast);
void gencontext_emit_block(GenContext *context, LLVMBasicBlockRef next_block);
LLVMValueRef gencontext_emit_memclear_size_align(GenContext *context, LLVMValueRef ref, uint64_t size, unsigned align, bool bitcast);
LLVMValueRef gencontext_emit_memclear(GenContext *context, LLVMValueRef ref, Type *type);
LLVMValueRef llvm_get_next_param(GenContext *context, unsigned *index);
LLVMTypeRef llvm_get_coerce_type(GenContext *c, ABIArgInfo *arg_info);
static inline LLVMBasicBlockRef llvm_get_current_block_if_in_use(GenContext *context);
LLVMMetadataRef llvm_get_debug_type(GenContext *c, Type *type);
static inline LLVMTypeRef llvm_get_ptr_type(GenContext *c, Type *type);
LLVMTypeRef llvm_get_type(GenContext *c, Type *any_type);
static inline LLVMValueRef llvm_get_zero(GenContext *c, Type *type);
void gencontext_emit_br(GenContext *context, LLVMBasicBlockRef next_block);
bool gencontext_check_block_branch_emit(GenContext *context);
void gencontext_emit_cond_br(GenContext *context, LLVMValueRef value, LLVMBasicBlockRef thenBlock, LLVMBasicBlockRef elseBlock);
static inline LLVMBasicBlockRef gencontext_create_free_block(GenContext *context, const char *name)
void llvm_debug_scope_push(GenContext *context, LLVMMetadataRef debug_scope);
void llvm_debug_scope_pop(GenContext *context);
void llvm_debug_push_lexical_scope(GenContext *context, SourceSpan location);
LLVMMetadataRef llvm_debug_current_scope(GenContext *context);
void c_abi_func_create(GenContext *context, FunctionSignature *signature);
bool llvm_emit_check_block_branch(GenContext *context);
unsigned llvm_store_size(LLVMTypeRef type);
void llvm_store_bevalue(GenContext *c, BEValue *destination, BEValue *value);
void llvm_store_bevalue_raw(GenContext *c, BEValue *destination, LLVMValueRef raw_value);
void llvm_store_bevalue_dest_aligned(GenContext *c, LLVMValueRef destination, BEValue *value);
void llvm_store_bevalue_aligned(GenContext *c, LLVMValueRef destination, BEValue *value, unsigned alignment);
void llvm_store_self_aligned(GenContext *context, LLVMValueRef pointer, LLVMValueRef value, Type *type);
void llvm_store_aligned(GenContext *context, LLVMValueRef pointer, LLVMValueRef value, unsigned alignment);
void llvm_store_aligned_decl(GenContext *context, Decl *decl, LLVMValueRef value);
LLVMTypeRef gencontext_get_twostruct(GenContext *context, LLVMTypeRef lo, LLVMTypeRef hi);
LLVMValueRef gencontext_emit_convert_value_to_coerced(GenContext *context, LLVMTypeRef coerced, LLVMValueRef value, Type *original_type);
static inline LLVMValueRef gencontext_emit_load(GenContext *c, Type *type, LLVMValueRef value)
{
return LLVMCreateBasicBlockInContext(context->context, name);
assert(llvm_get_type(c, type) == LLVMGetElementType(LLVMTypeOf(value)));
return LLVMBuildLoad2(c->builder, llvm_get_type(c, type), value, "");
}
static inline bool block_in_use(LLVMBasicBlockRef block)
static inline LLVMValueRef decl_failable_ref(Decl *decl)
{
return LLVMGetFirstUse(LLVMBasicBlockAsValue(block)) != NULL;
assert(decl->decl_kind == DECL_VAR);
if (decl->var.kind == VARDECL_ALIAS) return decl_failable_ref(decl->var.alias);
if (!decl->var.failable) return NULL;
return decl->var.failable_ref;
}
static inline LLVMBasicBlockRef gencontext_current_block_if_in_use(GenContext *context)
static inline LLVMValueRef decl_ref(Decl *decl)
{
if (decl->decl_kind == DECL_VAR && decl->var.kind == VARDECL_ALIAS) return decl_ref(decl->var.alias);
return decl->backend_ref;
}
static inline LLVMValueRef llvm_emit_store(GenContext *context, Decl *decl, LLVMValueRef value)
{
return LLVMBuildStore(context->builder, value, decl_ref(decl));
}
static inline LLVMValueRef llvm_emit_bitcast(GenContext *context, LLVMValueRef value, Type *type)
{
return LLVMBuildBitCast(context->builder, value, llvm_get_type(context, type), "");
}
static inline bool llvm_use_debug(GenContext *context) { return context->debug.builder != NULL; }
static inline LLVMBasicBlockRef llvm_get_current_block_if_in_use(GenContext *context)
{
LLVMBasicBlockRef block = context->current_block;
if (!LLVMGetFirstInstruction(block) && !LLVMGetFirstUse(LLVMBasicBlockAsValue(block)))
@@ -163,64 +331,6 @@ static inline LLVMBasicBlockRef gencontext_current_block_if_in_use(GenContext *c
return block;
}
#define PUSH_ERROR() \
LLVMBasicBlockRef _old_catch = context->catch_block; \
LLVMValueRef _old_error_var = context->error_var
#define POP_ERROR() \
context->catch_block = _old_catch; \
context->error_var = _old_error_var
void gencontext_emit_function_body(GenContext *context, Decl *decl);
void gencontext_emit_implicit_return(GenContext *context);
void gencontext_emit_function_decl(GenContext *context, Decl *decl);
void gencontext_emit_extern_decl(GenContext *context, Decl *decl);
LLVMValueRef gencontext_emit_address(GenContext *context, Expr *expr);
LLVMTypeRef gencontext_get_llvm_type(GenContext *context, Type *type);
static inline LLVMValueRef gencontext_emit_load(GenContext *context, Type *type, LLVMValueRef value)
{
assert(gencontext_get_llvm_type(context, type) == LLVMGetElementType(LLVMTypeOf(value)));
return LLVMBuildLoad2(context->builder, gencontext_get_llvm_type(context, type), value, "");
}
static inline void gencontext_emit_return_value(GenContext *context, LLVMValueRef value)
{
LLVMBuildRet(context->builder, value);
context->current_block = NULL;
context->current_block_is_target = false;
}
static inline LLVMValueRef decl_failable_ref(Decl *decl)
{
assert(decl->decl_kind == DECL_VAR);
if (decl->var.kind == VARDECL_ALIAS) return decl_failable_ref(decl->var.alias);
return decl->var.failable_ref;
}
static inline LLVMValueRef decl_ref(Decl *decl)
{
if (decl->decl_kind == DECL_VAR && decl->var.kind == VARDECL_ALIAS) return decl_ref(decl->var.alias);
return decl->backend_ref;
}
static inline void gencontext_emit_store(GenContext *context, Decl *decl, LLVMValueRef value)
{
LLVMBuildStore(context->builder, value, decl_ref(decl));
}
LLVMValueRef gencontext_emit_cast(GenContext *context, CastKind cast_kind, LLVMValueRef value, Type *to_type, Type *from_type);
static inline bool gencontext_func_pass_return_by_param(GenContext *context, Type *first_param_type) { return false; };
static inline bool gencontext_func_pass_param_by_reference(GenContext *context, Type *param_type) { return false; }
static inline LLVMValueRef gencontext_emit_bitcast(GenContext *context, LLVMValueRef value, Type *type)
{
return LLVMBuildBitCast(context->builder, value, gencontext_get_llvm_type(context, type), "");
}
static inline bool gencontext_use_debug(GenContext *context)
{
return context->debug.builder != NULL;
}
static inline bool call_supports_variadic(CallABI abi)
{
switch (abi)
@@ -282,21 +392,37 @@ static inline LLVMCallConv llvm_call_convention_from_call(CallABI abi)
}
#define llvm_type(type) gencontext_get_llvm_type(context, type)
#define llvm_debug_type(type) gencontext_get_debug_type(context, type)
static inline LLVMValueRef gencontext_emit_no_error_union(GenContext *context)
static inline LLVMTypeRef llvm_get_ptr_type(GenContext *c, Type *type)
{
return LLVMConstInt(llvm_type(type_error), 0, false);
return llvm_get_type(c, type_get_ptr(type));
}
static inline LLVMValueRef gencontext_emit_const_int(GenContext *context, Type *type, uint64_t val)
static inline LLVMValueRef llvm_get_zero(GenContext *c, Type *type)
{
return LLVMConstNull(llvm_get_type(c, type));
}
static inline LLVMValueRef llvm_const_int(GenContext *c, Type *type, uint64_t val)
{
type = type->canonical;
if (type == type_error) type = type_usize->canonical;
assert(type_is_any_integer(type) || type->type_kind == TYPE_BOOL);
return LLVMConstInt(llvm_type(type), val, type_is_signed_integer(type));
return LLVMConstInt(llvm_get_type(c, type), val, type_is_integer_signed(type));
}
#define llvm_int(_type, _val) gencontext_emit_const_int(context, _type, _val)
LLVMValueRef gencontext_emit_typeid(GenContext *context, Expr *expr);
#define EMIT_LOC(c, x) do { if (c->debug.builder) llvm_emit_debug_location(c, x->span); } while (0);
#define PUSH_ERROR() \
LLVMBasicBlockRef _old_catch = c->catch_block; \
LLVMValueRef _old_error_var = c->error_var
#define POP_ERROR() \
c->catch_block = _old_catch; \
c->error_var = _old_error_var
static inline bool abi_info_should_flatten(ABIArgInfo *info);
static inline bool abi_info_should_flatten(ABIArgInfo *info)
{
return info->kind == ABI_ARG_DIRECT_COERCE && info->direct_coerce.elements > 1U && !info->direct_coerce.prevent_flatten;
}

View File

@@ -5,38 +5,6 @@
#include "llvm_codegen_internal.h"
static inline LLVMTypeRef gencontext_create_basic_llvm_type(GenContext *context, Type *type)
{
switch (type->type_kind)
{
case TYPE_TYPEID:
return LLVMIntTypeInContext(context->context, type->builtin.bitsize);
case TYPE_BOOL:
return LLVMInt1TypeInContext(context->context);
case TYPE_I8:
case TYPE_U8:
return LLVMInt8TypeInContext(context->context);
case TYPE_I16:
case TYPE_U16:
return LLVMInt16TypeInContext(context->context);
case TYPE_I32:
case TYPE_U32:
return LLVMInt32TypeInContext(context->context);
case TYPE_I64:
case TYPE_U64:
return LLVMInt64TypeInContext(context->context);
case TYPE_F32:
return LLVMFloatTypeInContext(context->context);
case TYPE_F64:
return LLVMDoubleTypeInContext(context->context);
case TYPE_VOID:
return LLVMVoidTypeInContext(context->context);
default:
UNREACHABLE
}
}
void gencontext_begin_module(GenContext *context)
{
assert(!context->module && "Expected no module");
@@ -51,6 +19,8 @@ void gencontext_begin_module(GenContext *context)
{
const char *filename = context->ast_context->file->name;
const char *dir_path = context->ast_context->file->dir_path;
// Set runtime version here.
context->debug.runtime_version = 1;
context->debug.builder = LLVMCreateDIBuilder(context->module);
context->debug.file = LLVMDIBuilderCreateFile(context->debug.builder, filename, strlen(filename), dir_path, strlen(dir_path));
@@ -82,7 +52,7 @@ void gencontext_begin_module(GenContext *context)
void gencontext_end_module(GenContext *context)
{
if (context->debug.builder)
if (llvm_use_debug(context))
{
LLVMDIBuilderFinalize(context->debug.builder);
}

File diff suppressed because it is too large Load Diff

View File

@@ -4,9 +4,8 @@
#include "llvm_codegen_internal.h"
LLVMTypeRef llvm_get_type(LLVMContextRef context, Type *any_type);
static inline LLVMTypeRef llvm_type_from_decl(LLVMContextRef context, Decl *decl)
static inline LLVMTypeRef llvm_type_from_decl(GenContext *context, Decl *decl)
{
static LLVMTypeRef params[MAX_PARAMS];
switch (decl->decl_kind)
@@ -34,7 +33,7 @@ static inline LLVMTypeRef llvm_type_from_decl(LLVMContextRef context, Decl *decl
case DECL_STRUCT:
{
LLVMTypeRef *types = NULL;
LLVMTypeRef type = LLVMStructCreateNamed(context, decl->external_name);
LLVMTypeRef type = LLVMStructCreateNamed(context->context, decl->external_name);
// Avoid recursive issues.
decl->type->backend_type = type;
VECEACH(decl->strukt.members, i)
@@ -48,7 +47,7 @@ static inline LLVMTypeRef llvm_type_from_decl(LLVMContextRef context, Decl *decl
{
Decl *max_type = NULL;
unsigned long long max_size = 0;
LLVMTypeRef type = LLVMStructCreateNamed(context, decl->external_name);
LLVMTypeRef type = LLVMStructCreateNamed(context->context, decl->external_name);
// Avoid recursive issues.
decl->type->backend_type = type;
VECEACH(decl->strukt.members, i)
@@ -76,7 +75,7 @@ static inline LLVMTypeRef llvm_type_from_decl(LLVMContextRef context, Decl *decl
return llvm_get_type(context, decl->type);
case DECL_ERR:
{
LLVMTypeRef err_type = LLVMStructCreateNamed(context, decl->external_name);
LLVMTypeRef err_type = LLVMStructCreateNamed(context->context, decl->external_name);
// Avoid recursive issues.
decl->type->backend_type = err_type;
LLVMTypeRef *types = NULL;
@@ -96,7 +95,7 @@ static inline LLVMTypeRef llvm_type_from_decl(LLVMContextRef context, Decl *decl
unsigned padding = type_size(type_error) - size;
if (padding > 0)
{
vec_add(types, LLVMIntTypeInContext(context, padding * 8));
vec_add(types, LLVMIntTypeInContext(context->context, padding * 8));
}
LLVMStructSetBody(err_type, types, vec_size(types), false);
return err_type;
@@ -104,66 +103,190 @@ static inline LLVMTypeRef llvm_type_from_decl(LLVMContextRef context, Decl *decl
}
UNREACHABLE
}
static inline LLVMTypeRef llvm_type_from_ptr(LLVMContextRef context, Type *type)
{
LLVMTypeRef base_llvm_type = llvm_get_type(context, type->pointer);
if (type->canonical != type)
{
return type->backend_type = llvm_get_type(context, type->canonical);
}
return type->backend_type = LLVMPointerType(base_llvm_type, /** TODO **/0);
}
static inline LLVMTypeRef llvm_type_from_array(LLVMContextRef context, Type *type)
static inline LLVMTypeRef llvm_type_from_ptr(GenContext *context, Type *type)
{
if (type->canonical != type)
{
return type->backend_type = llvm_get_type(context, type->canonical);
}
LLVMTypeRef base_llvm_type = llvm_get_type(context, type->array.base);
return type->backend_type = LLVMArrayType(base_llvm_type, type->array.len);
return type->backend_type = LLVMPointerType(llvm_get_type(context, type->pointer), /** TODO **/0);
}
LLVMTypeRef llvm_func_type(LLVMContextRef context, Type *type)
static inline LLVMTypeRef llvm_type_from_array(GenContext *context, Type *type)
{
if (type->canonical != type)
{
return type->backend_type = llvm_get_type(context, type->canonical);
}
return type->backend_type = LLVMArrayType(llvm_get_type(context, type->array.base), type->array.len);
}
static void param_expand(GenContext *context, LLVMTypeRef** params_ref, Type *type)
{
switch (type->type_kind)
{
case TYPE_TYPEDEF:
UNREACHABLE
case TYPE_ARRAY:
for (size_t i = type->array.len; i > 0; i--)
{
param_expand(context, params_ref, type->array.base);
}
return;
case TYPE_STRUCT:
{
Decl **members = type->decl->strukt.members;
VECEACH(members, i)
{
param_expand(context, params_ref, members[i]->type);
}
return;
}
case TYPE_ENUM:
param_expand(context, params_ref, type_lowering(type));
return;
case TYPE_ERR_UNION:
param_expand(context, params_ref, type_usize->canonical);
param_expand(context, params_ref, type_usize->canonical);
return;
case TYPE_ERRTYPE:
// TODO
param_expand(context, params_ref, type_usize->canonical);
return;
case TYPE_UNION:
{
size_t largest = 0;
Type *largest_type = NULL;
Decl **members = type->decl->strukt.members;
VECEACH(members, i)
{
if (type_size(type) > largest)
{
largest = type_size(type);
type = type->canonical;
}
}
if (!largest) return;
param_expand(context, params_ref, largest_type);
return;
}
default:
// Type complex: return 2;
vec_add(*params_ref, llvm_get_type(context, type));
return;
}
}
static inline void add_func_type_param(GenContext *context, Type *param_type, ABIArgInfo *arg_info, LLVMTypeRef **params)
{
arg_info->param_index_start = vec_size(*params);
switch (arg_info->kind)
{
case ABI_ARG_IGNORE:
break;
case ABI_ARG_INDIRECT:
vec_add(*params, llvm_get_ptr_type(context, param_type));
break;
case ABI_ARG_EXPAND:
// Expanding a structs
param_expand(context, params, param_type->canonical);
// If we have padding, add it here.
if (arg_info->expand.padding_type)
{
vec_add(*params, llvm_get_type(context, arg_info->expand.padding_type));
}
break;
case ABI_ARG_DIRECT_COERCE:
{
// Normal direct.
if (!arg_info->direct_coerce.type)
{
vec_add(*params, llvm_get_type(context, param_type));
break;
}
LLVMTypeRef coerce_type = llvm_abi_type(context, arg_info->direct_coerce.type);
if (!abi_info_should_flatten(arg_info))
{
vec_add(*params, coerce_type);
break;
}
for (unsigned idx = 0; idx < arg_info->direct_coerce.elements; idx++)
{
vec_add(*params, coerce_type);
}
break;
}
case ABI_ARG_DIRECT_PAIR:
// Pairs are passed by param.
vec_add(*params, llvm_abi_type(context, arg_info->direct_pair.lo));
vec_add(*params, llvm_abi_type(context, arg_info->direct_pair.hi));
break;
}
arg_info->param_index_end = vec_size(*params);
}
LLVMTypeRef llvm_func_type(GenContext *context, Type *type)
{
LLVMTypeRef *params = NULL;
FunctionSignature *signature = type->func.signature;
unsigned parameters = vec_size(signature->params);
if (signature->return_param) parameters++;
if (parameters)
c_abi_func_create(context, signature);
LLVMTypeRef return_type = NULL;
Type *real_return_type = signature->failable ? type_error : signature->rtype->type->canonical;
ABIArgInfo *ret_arg_info = signature->failable ? signature->failable_abi_info : signature->ret_abi_info;
ret_arg_info->param_index_end = 0;
ret_arg_info->param_index_start = 0;
switch (ret_arg_info->kind)
{
params = malloc_arena(sizeof(LLVMTypeRef) * parameters);
unsigned index = 0;
if (signature->return_param)
case ABI_ARG_EXPAND:
UNREACHABLE;
case ABI_ARG_INDIRECT:
vec_add(params, llvm_get_ptr_type(context, real_return_type));
FALLTHROUGH;
case ABI_ARG_IGNORE:
return_type = llvm_get_type(context, type_void);
break;
case ABI_ARG_DIRECT_PAIR:
{
params[index++] = llvm_get_type(context, type_get_ptr(signature->rtype->type));
}
VECEACH(signature->params, i)
{
params[index++] = llvm_get_type(context, signature->params[i]->type->canonical);
LLVMTypeRef lo = llvm_abi_type(context, ret_arg_info->direct_pair.lo);
LLVMTypeRef hi = llvm_abi_type(context, ret_arg_info->direct_pair.hi);
return_type = gencontext_get_twostruct(context, lo, hi);
break;
}
case ABI_ARG_DIRECT_COERCE:
assert(!abi_info_should_flatten(ret_arg_info));
return_type = llvm_get_coerce_type(context, ret_arg_info) ?: llvm_get_type(context, real_return_type);
break;
}
LLVMTypeRef ret_type;
if (signature->failable)
// If it's failable and it's not void (meaning ret_abi_info will be NULL)
if (signature->failable && signature->ret_abi_info)
{
ret_type = llvm_get_type(context, type_error);
add_func_type_param(context, type_get_ptr(signature->rtype->type), signature->ret_abi_info, &params);
}
else
// Add in all of the required arguments.
VECEACH(signature->params, i)
{
ret_type = signature->return_param
? llvm_get_type(context, type_void)
: llvm_get_type(context, type->func.signature->rtype->type);
add_func_type_param(context, signature->params[i]->type, signature->params[i]->var.abi_info, &params);
}
return LLVMFunctionType(ret_type, params, parameters, signature->variadic);
return LLVMFunctionType(return_type, params, vec_size(params), signature->variadic);
}
LLVMTypeRef llvm_get_type(LLVMContextRef context, Type *any_type)
LLVMTypeRef llvm_get_type(GenContext *c, Type *any_type)
{
if (any_type->backend_type && LLVMGetTypeContext(any_type->backend_type) == context)
if (any_type->backend_type && LLVMGetTypeContext(any_type->backend_type) == c->context)
{
return any_type->backend_type;
}
@@ -175,66 +298,104 @@ LLVMTypeRef llvm_get_type(LLVMContextRef context, Type *any_type)
case TYPE_MEMBER:
UNREACHABLE
case TYPE_TYPEID:
return any_type->backend_type = LLVMIntTypeInContext(context, any_type->builtin.bitsize);
return any_type->backend_type = LLVMIntTypeInContext(c->context, any_type->builtin.bitsize);
case TYPE_TYPEDEF:
return any_type->backend_type = llvm_get_type(context, any_type->canonical);
return any_type->backend_type = llvm_get_type(c, any_type->canonical);
case TYPE_ENUM:
return any_type->backend_type = llvm_get_type(context, any_type->decl->enums.type_info->type->canonical);
return any_type->backend_type = llvm_get_type(c, any_type->decl->enums.type_info->type->canonical);
case TYPE_ERR_UNION:
return any_type->backend_type = LLVMIntTypeInContext(context, any_type->builtin.bitsize);
{
LLVMTypeRef elements[2] = { llvm_get_type(c, type_usize->canonical), llvm_get_type(c, type_usize->canonical) };
return any_type->backend_type = LLVMStructTypeInContext(c->context, elements, 2, false);
}
case TYPE_STRUCT:
case TYPE_UNION:
case TYPE_ERRTYPE:
return any_type->backend_type = llvm_type_from_decl(context, any_type->decl);
return any_type->backend_type = llvm_type_from_decl(c, any_type->decl);
case TYPE_FUNC:
return any_type->backend_type = llvm_func_type(context, any_type);
return any_type->backend_type = llvm_func_type(c, any_type);
case TYPE_VOID:
return any_type->backend_type = LLVMVoidTypeInContext(context);
return any_type->backend_type = LLVMVoidTypeInContext(c->context);
case TYPE_F64:
case TYPE_FXX:
return any_type->backend_type = LLVMDoubleTypeInContext(context);
return any_type->backend_type = LLVMDoubleTypeInContext(c->context);
case TYPE_F16:
return any_type->backend_type = LLVMHalfTypeInContext(c->context);
case TYPE_F32:
return any_type->backend_type = LLVMFloatTypeInContext(context);
case TYPE_U64:
case TYPE_I64:
return any_type->backend_type = LLVMIntTypeInContext(context, 64U);
case TYPE_U32:
case TYPE_I32:
return any_type->backend_type = LLVMFloatTypeInContext(c->context);
case TYPE_F128:
return any_type->backend_type = LLVMFP128TypeInContext(c->context);
case ALL_SIGNED_INTS:
case ALL_UNSIGNED_INTS:
return any_type->backend_type = LLVMIntTypeInContext(c->context, any_type->builtin.bitsize);
case TYPE_IXX:
return any_type->backend_type = LLVMIntTypeInContext(context, 32U);
case TYPE_U16:
case TYPE_I16:
return any_type->backend_type = LLVMIntTypeInContext(context, 16U);
case TYPE_U8:
case TYPE_I8:
return any_type->backend_type = LLVMIntTypeInContext(context, 8U);
return any_type->backend_type = LLVMIntTypeInContext(c->context, 32U);
case TYPE_BOOL:
return any_type->backend_type = LLVMIntTypeInContext(context, 1U);
return any_type->backend_type = LLVMIntTypeInContext(c->context, 8U);
case TYPE_POINTER:
return any_type->backend_type = llvm_type_from_ptr(context, any_type);
return any_type->backend_type = llvm_type_from_ptr(c, any_type);
case TYPE_STRING:
// TODO
return any_type->backend_type = LLVMPointerType(llvm_get_type(context, type_char), 0);
return any_type->backend_type = LLVMPointerType(llvm_get_type(c, type_char), 0);
case TYPE_ARRAY:
return any_type->backend_type = llvm_type_from_array(context, any_type);
return any_type->backend_type = llvm_type_from_array(c, any_type);
case TYPE_SUBARRAY:
{
LLVMTypeRef base_type = llvm_get_type(context, type_get_ptr(any_type->array.base));
LLVMTypeRef size_type = llvm_get_type(context, type_usize);
LLVMTypeRef array_type = LLVMStructCreateNamed(context, any_type->name);
LLVMTypeRef base_type = llvm_get_type(c, type_get_ptr(any_type->array.base));
LLVMTypeRef size_type = llvm_get_type(c, type_usize);
LLVMTypeRef array_type = LLVMStructCreateNamed(c->context, any_type->name);
LLVMTypeRef types[2] = { base_type, size_type };
LLVMStructSetBody(array_type, types, 2, false);
return any_type->backend_type = array_type;
}
case TYPE_VARARRAY:
return any_type->backend_type = llvm_get_type(context, type_get_ptr(any_type->array.base));
return any_type->backend_type = llvm_get_type(c, type_get_ptr(any_type->array.base));
case TYPE_VECTOR:
return any_type->backend_type = LLVMVectorType(llvm_get_type(c, any_type->vector.base), any_type->vector.len);
case TYPE_COMPLEX:
return any_type->backend_type = gencontext_get_twostruct(c, llvm_get_type(c, any_type->complex), llvm_get_type(c, any_type->complex));
}
UNREACHABLE;
}
LLVMTypeRef gencontext_get_llvm_type(GenContext *context, Type *type)
LLVMTypeRef llvm_get_coerce_type(GenContext *c, ABIArgInfo *arg_info)
{
// gencontext_get_debug_type(context, type);
return llvm_get_type(context->context, type);
if (arg_info->kind == ABI_ARG_DIRECT_COERCE)
{
if (!arg_info->direct_coerce.type) return NULL;
LLVMTypeRef coerce_type = llvm_abi_type(c, arg_info->direct_coerce.type);
if (arg_info->direct_coerce.elements < 2U) return coerce_type;
LLVMTypeRef *refs = MALLOC(sizeof(LLVMValueRef) * arg_info->direct_coerce.elements);
for (unsigned i = 0; i < arg_info->direct_coerce.elements; i++)
{
refs[i] = coerce_type;
}
return LLVMStructTypeInContext(c->context, refs, arg_info->direct_coerce.elements, false);
}
if (arg_info->kind == ABI_ARG_DIRECT_PAIR)
{
LLVMTypeRef lo = llvm_abi_type(c, arg_info->direct_pair.lo);
LLVMTypeRef hi = llvm_abi_type(c, arg_info->direct_pair.hi);
return gencontext_get_twostruct(c, lo, hi);
}
UNREACHABLE
}
LLVMTypeRef gencontext_get_twostruct(GenContext *context, LLVMTypeRef lo, LLVMTypeRef hi)
{
LLVMTypeRef types[2] = { lo, hi };
return LLVMStructTypeInContext(context->context, types, 2, false);
}
LLVMTypeRef llvm_abi_type(GenContext *c, AbiType *type)
{
switch (type->kind)
{
case ABI_TYPE_PLAIN:
return llvm_get_type(c, type->type);
case ABI_TYPE_INT_BITS:
return LLVMIntTypeInContext(c->context, type->int_bits);
}
UNREACHABLE
}

View File

@@ -452,6 +452,15 @@ static Expr *parse_ct_ident(Context *context, Expr *left)
return expr;
}
static Expr *parse_hash_ident(Context *context, Expr *left)
{
assert(!left && "Unexpected left hand side");
Expr *expr = EXPR_NEW_TOKEN(EXPR_HASH_IDENT, context->tok);
expr->ct_ident_expr.identifier = TOKSTR(context->tok);
advance(context);
return expr;
}
static Expr *parse_identifier(Context *context, Expr *left)
{
assert(!left && "Unexpected left hand side");
@@ -868,6 +877,7 @@ static Expr* parse_expr_block(Context *context, Expr *left)
if (!ast_ok(stmt)) return poisoned_expr;
vec_add(expr->expr_block.stmts, stmt);
}
RANGE_EXTEND_PREV(expr);
return expr;
}
@@ -956,6 +966,7 @@ ParseRule rules[TOKEN_EOF + 1] = {
[TOKEN_CONST_IDENT] = { parse_identifier, NULL, PREC_NONE },
[TOKEN_CT_CONST_IDENT] = { parse_ct_ident, NULL, PREC_NONE },
[TOKEN_CT_TYPE_IDENT] = { parse_type_identifier, NULL, PREC_NONE },
[TOKEN_HASH_IDENT] = { parse_hash_ident, NULL, PREC_NONE },
//[TOKEN_HASH_TYPE_IDENT] = { parse_type_identifier(, NULL, PREC_NONE }
};

View File

@@ -1433,6 +1433,9 @@ static inline Decl *parse_typedef_declaration(Context *context, Visibility visib
return decl;
}
/**
* macro ::= MACRO type? identifier '(' macro_params ')' compound_statement
*/
static inline Decl *parse_macro_declaration(Context *context, Visibility visibility)
{
advance_and_verify(context, TOKEN_MACRO);
@@ -1458,27 +1461,39 @@ static inline Decl *parse_macro_declaration(Context *context, Visibility visibil
TEST_TYPE:
switch (context->tok.type)
{
// normal foo
case TOKEN_IDENT:
param_kind = VARDECL_PARAM;
break;
// ct_var $foo
case TOKEN_CT_IDENT:
param_kind = VARDECL_PARAM_CT;
break;
case TOKEN_AND:
// reference &foo
case TOKEN_AMP:
advance(context);
if (!TOKEN_IS(TOKEN_IDENT))
{
SEMA_TOKEN_ERROR(context->tok, "Only normal variables may be passed by reference.");
return poisoned_decl;
}
param_kind = VARDECL_PARAM_REF;
break;
// #Foo (not allowed)
case TOKEN_HASH_TYPE_IDENT:
SEMA_TOKEN_ERROR(context->tok, "An unevaluated expression can never be a type, did you mean to use $Type?");
return poisoned_decl;
// expression #foo
case TOKEN_HASH_IDENT:
// Note that the HASH_TYPE_IDENT will be an error later on.
param_kind = VARDECL_PARAM_EXPR;
break;
case TOKEN_HASH_TYPE_IDENT:
param_kind = VARDECL_PARAM_EXPR;
// Compile time type $Type
case TOKEN_CT_TYPE_IDENT:
param_kind = VARDECL_PARAM_CT_TYPE;
break;
case TOKEN_ELLIPSIS:
// varargs
TODO
default:
if (parm_type)

View File

@@ -7,23 +7,22 @@
#define EXIT_T_MISMATCH() return sema_type_mismatch(context, left, canonical, cast_type)
#define IS_EXPLICT()
#define RETURN_NON_CONST_CAST(kind) do { if (left->expr_kind != EXPR_CONST) { insert_cast(left, kind, canonical); return true; } } while (0)
#define RETURN_NON_CONST_CAST(kind) do { if (left->expr_kind != EXPR_CONST) { insert_cast(left, kind, type); return true; } } while (0)
#define REQUIRE_EXPLICIT_CAST(_cast_type)\
do { if (_cast_type == CAST_TYPE_EXPLICIT) break;\
if (_cast_type == CAST_TYPE_OPTIONAL_IMPLICIT) return true;\
EXIT_T_MISMATCH(); } while (0)
static inline void insert_cast(Expr *expr, CastKind kind, Type *canonical)
static inline void insert_cast(Expr *expr, CastKind kind, Type *type)
{
assert(expr->resolve_status == RESOLVE_DONE);
assert(expr->type);
assert(canonical->canonical == canonical);
Expr *inner = COPY(expr);
expr->expr_kind = EXPR_CAST;
expr->cast_expr.kind = kind;
expr->cast_expr.expr = inner;
expr->cast_expr.type_info = NULL;
expr->type = canonical;
expr->type = type;
}
static bool sema_type_mismatch(Context *context, Expr *expr, Type *type, CastType cast_type)
@@ -306,6 +305,11 @@ bool fpxi(Context *context, Expr *left, Type *canonical, Type *type, CastType ca
*/
bool ixxxi(Context *context, Expr *left, Type *canonical, Type *type, CastType cast_type)
{
if (left->expr_kind != EXPR_CONST)
{
SEMA_ERROR(left, "This expression could not be resolved to a concrete type. Please add more type annotations.");
return false;
}
bool is_signed = canonical->type_kind < TYPE_U8;
int bitsize = canonical->builtin.bitsize;
if (!is_signed && bigint_cmp_zero(&left->const_expr.i) == CMP_LT)
@@ -578,7 +582,7 @@ bool enfp(Context *context, Expr* left, Type *from, Type *canonical, Type *type,
REQUIRE_EXPLICIT_CAST(cast_type);
Type *enum_type = from->decl->enums.type_info->type;
Type *enum_type_canonical = enum_type->canonical;
if (type_is_unsigned_integer(enum_type_canonical))
if (type_is_integer_unsigned(enum_type_canonical))
{
return uifp(context, left, enum_type_canonical, type);
}
@@ -742,22 +746,16 @@ CastKind cast_to_bool_kind(Type *type)
return CAST_ERROR;
case TYPE_BOOL:
UNREACHABLE
case TYPE_IXX:
case TYPE_I8:
case TYPE_I16:
case TYPE_I32:
case TYPE_I64:
case TYPE_U8:
case TYPE_U16:
case TYPE_U32:
case TYPE_U64:
case ALL_INTS:
return CAST_INTBOOL;
case TYPE_F32:
case TYPE_F64:
case TYPE_FXX:
case TYPE_COMPLEX:
return CAST_CXBOOL;
case ALL_FLOATS:
return CAST_FPBOOL;
case TYPE_POINTER:
return CAST_PTRBOOL;
case TYPE_VECTOR:
return CAST_ERROR;
}
UNREACHABLE
}
@@ -787,35 +785,40 @@ bool cast(Context *context, Expr *expr, Type *to_type, CastType cast_type)
break;
case TYPE_IXX:
// Compile time integers may convert into ints, floats, bools
if (expr->expr_kind != EXPR_CONST && !expr->reeval)
{
expr->resolve_status = RESOLVE_NOT_DONE;
expr->reeval = true;
return sema_analyse_expr(context, to_type, expr);
}
if (type_is_integer(canonical)) return ixxxi(context, expr, canonical, to_type, cast_type);
if (type_is_float(canonical)) return ixxfp(context, expr, canonical, to_type, cast_type);
if (canonical == type_bool) return ixxbo(context, expr, to_type);
if (canonical->type_kind == TYPE_POINTER) return xipt(context, expr, from_type, canonical, to_type, cast_type);
if (canonical->type_kind == TYPE_ENUM) return ixxen(context, expr, canonical, to_type, cast_type);
break;
case TYPE_I8:
case TYPE_I16:
case TYPE_I32:
case TYPE_I64:
if (type_is_unsigned_integer(canonical)) return siui(context, expr, canonical, to_type, cast_type);
if (type_is_signed_integer(canonical)) return sisi(context, expr, from_type, canonical, to_type, cast_type);
case ALL_SIGNED_INTS:
if (type_is_integer_unsigned(canonical)) return siui(context, expr, canonical, to_type, cast_type);
if (type_is_integer_signed(canonical)) return sisi(context, expr, from_type, canonical, to_type, cast_type);
if (type_is_float(canonical)) return sifp(context, expr, canonical, to_type);
if (canonical == type_bool) return xibo(context, expr, canonical, to_type, cast_type);
if (canonical->type_kind == TYPE_POINTER) return xipt(context, expr, from_type, canonical, to_type, cast_type);
break;
case TYPE_U8:
case TYPE_U16:
case TYPE_U32:
case TYPE_U64:
if (type_is_unsigned_integer(canonical)) return uiui(context, expr, from_type, canonical, to_type, cast_type);
if (type_is_signed_integer(canonical)) return uisi(context, expr, from_type, canonical, to_type, cast_type);
case ALL_UNSIGNED_INTS:
if (type_is_integer_unsigned(canonical)) return uiui(context, expr, from_type, canonical, to_type, cast_type);
if (type_is_integer_signed(canonical)) return uisi(context, expr, from_type, canonical, to_type, cast_type);
if (type_is_float(canonical)) return uifp(context, expr, canonical, to_type);
if (canonical == type_bool) return xibo(context, expr, canonical, to_type, cast_type);
if (canonical->type_kind == TYPE_POINTER) return xipt(context, expr, from_type, canonical, to_type, cast_type);
break;
case TYPE_F32:
case TYPE_F64:
case TYPE_FXX:
case ALL_FLOATS:
// Compile time integers may convert into ints, floats, bools
if (from_type->type_kind == TYPE_FXX && expr->expr_kind != EXPR_CONST && !expr->reeval)
{
expr->resolve_status = RESOLVE_NOT_DONE;
expr->reeval = true;
return sema_analyse_expr(context, to_type, expr);
}
if (type_is_integer(canonical)) return fpxi(context, expr, canonical, to_type, cast_type);
if (canonical == type_bool) return fpbo(context, expr, canonical, to_type, cast_type);
if (type_is_float(canonical)) return fpfp(context, expr, from_type, canonical, to_type, cast_type);
@@ -863,6 +866,10 @@ bool cast(Context *context, Expr *expr, Type *to_type, CastType cast_type)
case TYPE_SUBARRAY:
if (canonical->type_kind == TYPE_POINTER) return sapt(context, expr, from_type, canonical, to_type, cast_type);
break;
case TYPE_VECTOR:
TODO
case TYPE_COMPLEX:
TODO
}
if (cast_type == CAST_TYPE_OPTIONAL_IMPLICIT) return true;
return sema_type_mismatch(context, expr, canonical, cast_type);

View File

@@ -8,51 +8,9 @@
static inline void sema_set_struct_size(Decl *decl)
{
// TODO packed
uint64_t size = 0;
uint64_t alignment = 0;
VECEACH(decl->strukt.members, i)
{
Decl *member = decl->strukt.members[i];
Type *canonical = member->type->canonical;
uint64_t member_size = type_size(canonical);
uint64_t member_alignment = type_abi_alignment(canonical);
assert(member_size > 0);
// Add padding.
if (member_alignment && (size % member_alignment))
{
size += member_alignment - size % member_alignment;
}
// Add size.
size += member_size;
if (member_alignment > alignment) alignment = member_alignment;
}
decl->strukt.abi_alignment = alignment;
if (alignment && size % alignment)
{
size += alignment - size % alignment;
}
decl->strukt.size = size;
}
static inline void sema_set_union_size(Decl *decl)
{
uint64_t size = 0;
uint64_t alignment = 0;
VECEACH(decl->strukt.members, i)
{
Decl *member = decl->strukt.members[i];
Type *canonical = member->type->canonical;
uint64_t member_size = type_size(canonical);
uint64_t member_alignment = type_abi_alignment(canonical);
if (member_size > size) size = member_size;
if (member_alignment > alignment) alignment = member_alignment;
}
decl->strukt.abi_alignment = alignment;
decl->strukt.size = size;
}
static AttributeType sema_analyse_attribute(Context *context, Attr *attr, AttributeDomain domain);
static bool sema_analyse_struct_union(Context *context, Decl *decl);
@@ -88,8 +46,64 @@ static inline bool sema_analyse_struct_member(Context *context, Decl *decl)
static bool sema_analyse_struct_union(Context *context, Decl *decl)
{
AttributeDomain domain;
switch (decl->decl_kind)
{
case DECL_STRUCT:
domain = ATTR_STRUCT;
break;
case DECL_UNION:
domain = ATTR_UNION;
break;
case DECL_ERR:
domain = ATTR_ERROR;
break;
default:
UNREACHABLE
}
VECEACH(decl->attributes, i)
{
Attr *attr = decl->attributes[i];
AttributeType attribute = sema_analyse_attribute(context, attr, domain);
if (attribute == ATTRIBUTE_NONE) return decl_poison(decl);
bool had = false;
#define SET_ATTR(_X) had = decl->func._X; decl->func._X = true; break
switch (attribute)
{
case ATTRIBUTE_CNAME:
had = decl->cname != NULL;
decl->cname = attr->expr->const_expr.string.chars;
break;
case ATTRIBUTE_SECTION:
had = decl->section != NULL;
decl->section = attr->expr->const_expr.string.chars;
break;
case ATTRIBUTE_ALIGN:
had = decl->alignment != 0;
decl->alignment = attr->alignment;
break;
case ATTRIBUTE_PACKED:
had = decl->is_packed;
decl->is_packed = true;
break;
default:
UNREACHABLE
}
#undef SET_ATTR
if (had)
{
SEMA_TOKID_ERROR(attr->name, "Attribute occurred twice, please remove one.");
return decl_poison(decl);
}
}
DEBUG_LOG("Beginning analysis of %s.", decl->name ? decl->name : "anon");
assert(decl->decl_kind == DECL_STRUCT || decl->decl_kind == DECL_UNION);
size_t offset = 0;
// Default alignment is 1.
size_t alignment = 1;
size_t size = 0;
if (decl->name) context_push_scope(context);
VECEACH(decl->strukt.members, i)
{
@@ -106,9 +120,38 @@ static bool sema_analyse_struct_union(Context *context, Decl *decl)
decl_poison(decl);
continue;
}
decl_poison(decl);
continue;
}
size_t member_alignment = type_abi_alignment(member->type);
size_t member_size = type_size(member->type);
if (member_alignment > alignment) alignment = member_alignment;
if (decl->decl_kind == DECL_UNION)
{
if (member_size > size) size = member_size;
member->offset = 0;
}
else
{
if (!decl->is_packed)
{
offset = aligned_offset(offset, member_alignment);
}
member->offset = offset;
offset += member_size;
}
}
if (!decl->alignment) decl->alignment = alignment;
if (decl->decl_kind != DECL_UNION)
{
size = offset;
if (!decl->is_packed)
{
size = aligned_offset(size, decl->alignment);
}
}
decl->strukt.size = size;
DEBUG_LOG("Struct/union size %d, alignment %d.", (int)size, (int)decl->alignment);
if (decl->name) context_pop_scope(context);
DEBUG_LOG("Analysis complete.");
return decl_ok(decl);
@@ -207,17 +250,6 @@ static inline Type *sema_analyse_function_signature(Context *context, FunctionSi
if (!all_ok) return NULL;
Type *return_type = signature->rtype->type->canonical;
signature->return_param = false;
if (return_type->type_kind != TYPE_VOID)
{
// TODO fix this number with ABI compatibility
if (signature->failable || type_size(return_type) > 8 * 2)
{
signature->return_param = true;
}
}
TokenType type = TOKEN_INVALID_TOKEN;
signature->mangled_signature = symtab_add(buffer, buffer_write_offset, fnv1a(buffer, buffer_write_offset), &type);
Type *func_type = stable_get(&context->local_symbols, signature->mangled_signature);
@@ -402,7 +434,7 @@ static AttributeType sema_analyse_attribute(Context *context, Attr *attr, Attrib
[ATTRIBUTE_WEAK] = ATTR_FUNC | ATTR_CONST | ATTR_VAR,
[ATTRIBUTE_CNAME] = 0xFF,
[ATTRIBUTE_SECTION] = ATTR_FUNC | ATTR_CONST | ATTR_VAR,
[ATTRIBUTE_PACKED] = ATTR_STRUCT | ATTR_UNION,
[ATTRIBUTE_PACKED] = ATTR_STRUCT | ATTR_UNION | ATTR_ERROR,
[ATTRIBUTE_NORETURN] = ATTR_FUNC,
[ATTRIBUTE_ALIGN] = ATTR_FUNC | ATTR_CONST | ATTR_VAR | ATTR_STRUCT | ATTR_UNION,
[ATTRIBUTE_INLINE] = ATTR_FUNC,
@@ -552,8 +584,30 @@ static inline bool sema_analyse_macro(Context *context, Decl *decl)
{
Decl *param = decl->macro_decl.parameters[i];
assert(param->decl_kind == DECL_VAR);
assert(param->var.kind == VARDECL_PARAM);
if (param->var.type_info && !sema_resolve_type_info(context, param->var.type_info)) return false;
switch (param->var.kind)
{
case VARDECL_PARAM:
case VARDECL_PARAM_EXPR:
case VARDECL_PARAM_CT:
case VARDECL_PARAM_REF:
if (param->var.type_info && !sema_resolve_type_info(context, param->var.type_info)) return false;
break;
case VARDECL_PARAM_CT_TYPE:
if (param->var.type_info)
{
SEMA_ERROR(param->var.type_info, "A compile time type parameter cannot have a type itself.");
return false;
}
break;
case VARDECL_CONST:
case VARDECL_GLOBAL:
case VARDECL_LOCAL:
case VARDECL_MEMBER:
case VARDECL_LOCAL_CT:
case VARDECL_LOCAL_CT_TYPE:
case VARDECL_ALIAS:
UNREACHABLE
}
}
return true;
}
@@ -649,35 +703,13 @@ static inline bool sema_analyse_define(Context *context, Decl *decl)
static inline bool sema_analyse_error(Context *context __unused, Decl *decl)
{
Decl **members = decl->strukt.members;
unsigned member_count = vec_size(members);
bool success = true;
unsigned error_size = 0;
context_push_scope(context);
for (unsigned i = 0; i < member_count; i++)
{
Decl *member = members[i];
success = sema_analyse_struct_member(context, member);
if (!success) continue;
unsigned alignment = type_abi_alignment(member->type);
unsigned size = type_size(member->type);
if (error_size % alignment != 0)
{
error_size += alignment - (error_size % alignment);
}
error_size += size;
}
context_pop_scope(context);
if (!success) return false;
sema_set_struct_size(decl);
if (!sema_analyse_struct_union(context, decl)) return false;
if (decl->strukt.size > type_size(type_usize))
{
SEMA_ERROR(decl, "Error type may not exceed pointer size (%d bytes) it was %d bytes.", type_size(type_usize), error_size);
SEMA_ERROR(decl, "Error type may not exceed pointer size (%d bytes) it was %d bytes.", type_size(type_usize), decl->strukt.size);
return false;
}
decl->strukt.abi_alignment = type_abi_alignment(type_voidptr);
decl->strukt.size = type_size(type_error);
return success;
return true;
}
@@ -698,13 +730,8 @@ bool sema_analyse_decl(Context *context, Decl *decl)
switch (decl->decl_kind)
{
case DECL_STRUCT:
if (!sema_analyse_struct_union(context, decl)) return decl_poison(decl);
sema_set_struct_size(decl);
decl_set_external_name(decl);
break;
case DECL_UNION:
if (!sema_analyse_struct_union(context, decl)) return decl_poison(decl);
sema_set_union_size(decl);
decl_set_external_name(decl);
break;
case DECL_FUNC:

View File

@@ -16,6 +16,7 @@ static Ast *ast_copy_from_macro(Context *context, Ast *source);
static Ast **ast_copy_list_from_macro(Context *context, Ast **to_copy);
static Decl *decl_copy_local_from_macro(Context *context, Decl *to_copy);
static TypeInfo *type_info_copy_from_macro(Context *context, TypeInfo *source);
static inline bool sema_cast_rvalue(Context *context, Type *to, Expr *expr);
#define MACRO_COPY_DECL(x) x = decl_copy_local_from_macro(context, x)
#define MACRO_COPY_EXPR(x) x = expr_copy_from_macro(context, x)
@@ -25,7 +26,6 @@ static TypeInfo *type_info_copy_from_macro(Context *context, TypeInfo *source);
#define MACRO_COPY_AST_LIST(x) x = ast_copy_list_from_macro(context, x)
#define MACRO_COPY_AST(x) x = ast_copy_from_macro(context, x)
bool sema_analyse_expr_may_be_function(Context *context, Expr *expr);
static inline bool sema_expr_analyse_binary(Context *context, Type *to, Expr *expr);
static inline bool sema_analyse_expr_value(Context *context, Type *to, Expr *expr);
static inline bool expr_const_int_valid(Expr *expr, Type *type)
@@ -114,6 +114,7 @@ static bool expr_is_ltype(Expr *expr)
case VARDECL_LOCAL:
case VARDECL_GLOBAL:
case VARDECL_PARAM:
case VARDECL_PARAM_REF:
return true;
case VARDECL_CONST:
default:
@@ -190,13 +191,15 @@ static inline bool sema_cast_ident_rvalue(Context *context, Type *to, Expr *expr
case VARDECL_CONST:
expr_replace(expr, expr_copy_from_macro(context, decl->var.init_expr));
return sema_analyse_expr(context, to, expr);
case VARDECL_PARAM_REF:
TODO
case VARDECL_PARAM_EXPR:
expr_replace(expr, expr_copy_from_macro(context, decl->var.init_expr));
return sema_analyse_expr(context, to, expr);
assert(decl->var.init_expr->resolve_status == RESOLVE_DONE);
return true;
case VARDECL_PARAM_CT_TYPE:
TODO
case VARDECL_PARAM_REF:
expr_replace(expr, expr_copy_from_macro(context, decl->var.init_expr));
return sema_cast_rvalue(context, to, expr);
case VARDECL_PARAM:
case VARDECL_GLOBAL:
case VARDECL_LOCAL:
@@ -633,6 +636,41 @@ static inline bool sema_expr_analyse_ct_identifier(Context *context, Type *to __
return true;
}
static inline bool sema_expr_analyse_hash_identifier(Context *context, Type *to __unused, Expr *expr)
{
Decl *ambiguous_decl = NULL;
Decl *private_symbol = NULL;
expr->pure = true;
DEBUG_LOG("Now resolving %s", expr->hash_ident_expr.identifier);
Decl *decl = sema_resolve_symbol(context,
expr->hash_ident_expr.identifier,
NULL,
&ambiguous_decl,
&private_symbol);
assert(!ambiguous_decl && !private_symbol);
if (!decl)
{
SEMA_ERROR(expr, "Compile time variable '%s' could not be found.", expr->ct_ident_expr.identifier);
return false;
}
// Already handled
if (!decl_ok(decl))
{
return expr_poison(expr);
}
DEBUG_LOG("Resolution successful of %s.", decl->name);
assert(decl->decl_kind == DECL_VAR);
assert(decl->resolve_status == RESOLVE_DONE);
assert(decl->var.init_expr->resolve_status == RESOLVE_DONE);
expr_replace(expr, expr_copy_from_macro(context, decl->var.init_expr));
return sema_analyse_expr(context, to, expr);
}
static inline bool sema_expr_analyse_binary_sub_expr(Context *context, Type *to, Expr *left, Expr *right)
{
return sema_analyse_expr(context, to, left) & sema_analyse_expr(context, to, right);
@@ -861,6 +899,7 @@ static bool sema_check_stmt_compile_time(Context *context, Ast *ast)
return false;
}
}
static inline bool sema_expr_analyse_macro_call(Context *context, Type *to, Expr *call_expr, Decl *decl)
{
// TODO failable
@@ -917,7 +956,12 @@ static inline bool sema_expr_analyse_macro_call(Context *context, Type *to, Expr
break;
case VARDECL_PARAM_EXPR:
// #foo
param->var.init_expr = arg;
// We push a scope here as this will prevent the expression from modifying
// compile time variables during evaluation:
context_push_scope(context);
bool ok = sema_analyse_expr_of_required_type(context, param->type, arg, false);
context_pop_scope(context);
if (!ok) return false;
break;
case VARDECL_PARAM_CT:
// $foo
@@ -929,7 +973,15 @@ static inline bool sema_expr_analyse_macro_call(Context *context, Type *to, Expr
}
break;
case VARDECL_PARAM_CT_TYPE:
TODO
// $Foo
if (!sema_analyse_expr_value(context, NULL, arg)) return false;
// TODO check typeof
if (arg->expr_kind != EXPR_TYPEINFO)
{
SEMA_ERROR(arg, "A type, like 'int' or 'double' was expected for the parameter '%s'.", param->name);
return false;
}
break;
case VARDECL_CONST:
case VARDECL_GLOBAL:
case VARDECL_LOCAL:
@@ -939,16 +991,13 @@ static inline bool sema_expr_analyse_macro_call(Context *context, Type *to, Expr
case VARDECL_ALIAS:
UNREACHABLE
}
if (param->var.kind != VARDECL_PARAM_EXPR)
if (param->type)
{
if (param->type)
{
if (!cast_implicit(context, arg, param->type)) return false;
}
else
{
param->type = arg->type;
}
if (!cast_implicit(context, arg, param->type)) return false;
}
else
{
param->type = arg->type;
}
param->var.init_expr = arg;
param->resolve_status = RESOLVE_DONE;
@@ -1018,7 +1067,7 @@ static inline bool sema_expr_analyse_macro_call(Context *context, Type *to, Expr
}
call_expr->expr_kind = EXPR_MACRO_BLOCK;
call_expr->macro_block.stmts = body->compound_stmt.stmts;
call_expr->macro_block.params = func_params;
call_expr->macro_block.params = params;
call_expr->macro_block.args = args;
EXIT:
context_pop_scope(context);
@@ -2221,7 +2270,7 @@ static inline bool sema_expr_analyse_ct_identifier_lvalue(Context *context, Expr
if ((intptr_t)decl->var.scope < (intptr_t)context->current_scope)
{
SEMA_ERROR(expr, "Cannot modify '%s' inside of a deeper scope.", decl->name);
SEMA_ERROR(expr, "Cannot modify '%s' inside of a runtime scope.", decl->name);
return false;
}
expr->ct_ident_expr.decl = decl;
@@ -3156,15 +3205,15 @@ static void cast_to_max_bit_size(Context *context, Expr *left, Expr *right, Type
if (bit_size_left < bit_size_right)
{
Type *to = left->type->type_kind < TYPE_U8
? type_signed_int_by_bitsize(bit_size_right)
: type_unsigned_int_by_bitsize(bit_size_right);
? type_int_signed_by_bitsize(bit_size_right)
: type_int_unsigned_by_bitsize(bit_size_right);
bool success = cast_implicit(context, left, to);
assert(success);
return;
}
Type *to = right->type->type_kind < TYPE_U8
? type_signed_int_by_bitsize(bit_size_right)
: type_unsigned_int_by_bitsize(bit_size_right);
? type_int_signed_by_bitsize(bit_size_right)
: type_int_unsigned_by_bitsize(bit_size_right);
bool success = cast_implicit(context, right, to);
assert(success);
}
@@ -3244,6 +3293,10 @@ static bool sema_expr_analyse_comp(Context *context, Expr *expr, Expr *left, Exp
return false;
}
break;
case TYPE_COMPLEX:
TODO
case TYPE_VECTOR:
TODO
}
}
@@ -3561,17 +3614,12 @@ static bool sema_expr_analyse_not(Context *context, Type *to, Expr *expr, Expr *
case TYPE_POINTER:
case TYPE_VARARRAY:
case TYPE_SUBARRAY:
case TYPE_COMPLEX:
case TYPE_BOOL:
case TYPE_I8:
case TYPE_I16:
case TYPE_I32:
case TYPE_I64:
case TYPE_U8:
case TYPE_U16:
case TYPE_U32:
case TYPE_U64:
case TYPE_F32:
case TYPE_F64:
case TYPE_VECTOR:
case ALL_REAL_FLOATS:
case ALL_UNSIGNED_INTS:
case ALL_SIGNED_INTS:
return true;
case TYPE_STRUCT:
case TYPE_UNION:
@@ -3628,6 +3676,10 @@ static inline bool sema_expr_analyse_ct_incdec(Context *context, Expr *expr, Exp
return true;
}
/**
* Analyse foo++ foo-- --foo ++foo
* @return false if analysis fails.
*/
static inline bool sema_expr_analyse_incdec(Context *context, Expr *expr, Expr *inner)
{
expr->constant = false;
@@ -3938,6 +3990,7 @@ static Expr *expr_copy_from_macro(Context *context, Expr *source_expr)
case EXPR_MACRO_IDENTIFIER:
case EXPR_CT_IDENT:
case EXPR_MACRO_CT_IDENTIFIER:
case EXPR_HASH_IDENT:
// TODO
return expr;
case EXPR_TYPEINFO:
@@ -4078,6 +4131,7 @@ static TypeInfo** type_info_copy_list_from_macro(Context *context, TypeInfo **to
static Ast *ast_copy_from_macro(Context *context, Ast *source)
{
if (!source) return NULL;
Ast *ast = ast_shallow_copy(source);
switch (source->ast_kind)
{
@@ -4387,6 +4441,8 @@ static inline bool sema_analyse_expr_dispatch(Context *context, Type *to, Expr *
case EXPR_ENUM_CONSTANT:
case EXPR_MEMBER_ACCESS:
UNREACHABLE
case EXPR_HASH_IDENT:
return sema_expr_analyse_hash_identifier(context, to, expr);
case EXPR_MACRO_CT_IDENTIFIER:
case EXPR_CT_IDENT:
return sema_expr_analyse_ct_identifier(context, to, expr);
@@ -4482,7 +4538,7 @@ bool sema_analyse_expr_of_required_type(Context *context, Type *to, Expr *expr,
static inline bool sema_cast_ct_ident_rvalue(Context *context, Type *to, Expr *expr)
{
Decl *decl = expr->ct_ident_expr.decl;
Expr *copy = MACRO_COPY_EXPR(decl->var.init_expr);
Expr *copy = expr_copy_from_macro(context, decl->var.init_expr);
if (!sema_analyse_expr(context, to, copy)) return false;
expr_replace(expr, copy);
return true;
@@ -4554,20 +4610,3 @@ bool sema_analyse_expr(Context *context, Type *to, Expr *expr)
return sema_analyse_expr_value(context, to, expr) && sema_cast_rvalue(context, to, expr);
}
bool sema_analyse_expr_may_be_function(Context *context, Expr *expr)
{
switch (expr->resolve_status)
{
case RESOLVE_NOT_DONE:
expr->resolve_status = RESOLVE_RUNNING;
break;
case RESOLVE_RUNNING:
SEMA_ERROR(expr, "Recursive resolution of expression");
return expr_poison(expr);
case RESOLVE_DONE:
return expr_ok(expr);
}
if (!sema_analyse_expr_dispatch(context, NULL, expr)) return expr_poison(expr);
expr->resolve_status = RESOLVE_DONE;
return true;
}

View File

@@ -134,6 +134,10 @@ void sema_analysis_pass_decls(Context *context)
{
sema_analyse_decl(context, context->types[i]);
}
VECEACH(context->macros, i)
{
sema_analyse_decl(context, context->macros[i]);
}
VECEACH(context->methods, i)
{
sema_analyse_decl(context, context->methods[i]);

View File

@@ -1591,7 +1591,6 @@ bool sema_analyse_function_body(Context *context, Decl *func)
context->expected_block_type = NULL;
context->last_local = &context->locals[0];
context->in_volatile_section = 0;
context->in_macro = 0;
context->macro_counter = 0;
context->macro_nesting = 0;
context->continue_target = 0;

View File

@@ -123,8 +123,16 @@ static bool sema_resolve_type_identifier(Context *context, TypeInfo *type_info)
return true;
case DECL_POISONED:
return type_info_poison(type_info);
case DECL_FUNC:
case DECL_VAR:
if (decl->var.kind == VARDECL_PARAM_CT_TYPE || decl->var.kind == VARDECL_LOCAL_CT_TYPE)
{
assert(decl->var.init_expr->expr_kind == EXPR_TYPEINFO);
assert(decl->var.init_expr->resolve_status == RESOLVE_DONE);
*type_info = *decl->var.init_expr->type_expr;
return true;
}
FALLTHROUGH;
case DECL_FUNC:
case DECL_ENUM_CONSTANT:
case DECL_ARRAY_VALUE:
case DECL_IMPORT:

View File

@@ -39,6 +39,7 @@ const char *attribute_list[NUMBER_OF_ATTRIBUTES];
const char *kw_main;
const char *kw_sizeof;
const char *kw_alignof;
const char *kw_align;
const char *kw_offsetof;
const char *kw_nameof;
const char *kw_qnameof;
@@ -86,12 +87,13 @@ void symtab_init(uint32_t capacity)
kw_qnameof = KW_DEF("qnameof");
kw_kindof = KW_DEF("kindof");
kw_len = KW_DEF("len");
kw_align = KW_DEF("align");
kw_ordinal = KW_DEF("ordinal");
attribute_list[ATTRIBUTE_INLINE] = KW_DEF("inline");
attribute_list[ATTRIBUTE_NOINLINE] = KW_DEF("noinline");
attribute_list[ATTRIBUTE_STDCALL] = KW_DEF("stdcall");
attribute_list[ATTRIBUTE_NORETURN] = KW_DEF("noreturn");
attribute_list[ATTRIBUTE_ALIGN] = KW_DEF("align");
attribute_list[ATTRIBUTE_ALIGN] = kw_align;
attribute_list[ATTRIBUTE_PACKED] = KW_DEF("packed");
attribute_list[ATTRIBUTE_SECTION] = KW_DEF("section");
attribute_list[ATTRIBUTE_CNAME] = KW_DEF("cname");

File diff suppressed because it is too large Load Diff

View File

@@ -70,6 +70,7 @@ typedef enum
CTYPE_LONG,
CTYPE_LONG_LONG
} CType;
typedef enum
{
OS_TYPE_UNKNOWN,
@@ -111,6 +112,42 @@ typedef enum
OS_TYPE_LAST = OS_TYPE_EMSCRIPTEN
} OsType;
typedef enum
{
ENV_TYPE_UNKNOWN,
ENV_TYPE_GNU,
ENV_TYPE_GNUABIN32,
ENV_TYPE_GNUABI64,
ENV_TYPE_GNUEABI,
ENV_TYPE_GNUEABIHF,
ENV_TYPE_GNUX32,
ENV_TYPE_CODE16,
ENV_TYPE_EABI,
ENV_TYPE_EABIHF,
ENV_TYPE_ELFV1,
ENV_TYPE_ELFV2,
ENV_TYPE_ANDROID,
ENV_TYPE_MUSL,
ENV_TYPE_MUSLEABI,
ENV_TYPE_MUSLEABIHF,
ENV_TYPE_MSVC,
ENV_TYPE_ITANIUM,
ENV_TYPE_CYGNUS,
ENV_TYPE_CORECLR,
ENV_TYPE_SIMULATOR,
ENV_TYPE_MACABI,
ENV_TYPE_LAST = ENV_TYPE_MACABI
} EnvironmentType;
typedef enum
{
OBJ_FORMAT_COFF,
OBJ_FORMAT_ELF,
OBJ_FORMAT_MACHO,
OBJ_FORMAT_WASM,
OBJ_FORMAT_XCOFF
} ObjectFormatType;
typedef enum
{
VENDOR_UNKNOWN,
@@ -133,6 +170,50 @@ typedef enum
VENDOR_LAST = VENDOR_OPEN_EMBEDDED
} VendorType;
typedef enum
{
ABI_UNKNOWN,
ABI_X64,
ABI_WIN64,
ABI_X86,
ABI_AARCH64,
ABI_WASM,
ABI_ARM,
ABI_PPC32,
ABI_PPC64_SVR4,
ABI_RISCV,
} ABI;
typedef enum
{
FLOAT_ABI_NONE,
FLOAT_ABI_SOFT,
FLOAT_ABI_HARD,
} FloatABI;
typedef enum
{
AVX_NONE,
AVX,
AVX_512,
} AVXLevel;
typedef enum
{
ARM_AAPCS,
ARM_AAPCS16,
ARM_APCS_GNU,
ARM_AAPCS_LINUX,
} ARMVariant;
typedef enum
{
ARM_ABI_AAPCS,
ARM_ABI_APCS,
ARM_ABI_AAPCS16_VFP,
ARM_ABI_AAPCS_VFP,
} ARMABIVariant;
typedef struct
{
@@ -145,17 +226,87 @@ typedef struct
const char *os_name;
VendorType vendor;
const char *vendor_name;
EnvironmentType environment_type;
const char *environment_name;
ObjectFormatType object_format;
int alloca_address_space;
ABI abi;
FloatABI float_abi : 3;
unsigned default_number_regs : 8;
union
{
struct
{
bool is_darwin_vector_abi : 1;
bool return_small_struct_in_reg_abi : 1;
bool is_win32_float_struct_abi : 1;
bool use_soft_float : 1;
bool is_win_api : 1;
bool is_mcu_api : 1;
} x86;
struct
{
AVXLevel avx_level : 3;
bool is_win64 : 1;
bool is_mingw64 : 1;
bool pass_int128_vector_in_mem : 1;
} x64;
struct
{
bool is_32_bit : 1;
} mips;
struct
{
bool is_aapcs : 1;
bool is_darwin_pcs : 1;
} aarch64;
struct
{
bool is_darwin : 1;
bool is_win32 : 1;
} aarch;
struct
{
bool is_win32 : 1;
ARMVariant variant : 3;
ARMABIVariant abi_variant : 3;
} arm;
struct
{
bool is_softfp : 1;
} ppc;
struct
{
bool is_softfp : 1;
bool is_elfv2 : 1;
bool has_qpx : 1;
} ppc64;
struct
{
unsigned xlen;
unsigned abiflen;
} riscv;
struct
{
bool has_vector : 1;
} systemz;
};
bool little_endian;
bool tls_supported;
bool asm_supported;
bool float_128;
bool float_16;
bool vec_128i;
bool vec_64i;
bool vec_128f;
bool vec_64f;
bool int_128;
unsigned align_pref_pointer;
unsigned align_pref_byte;
unsigned align_pref_short;
unsigned align_pref_int;
unsigned align_pref_long;
unsigned align_pref_i128;
unsigned align_pref_half;
unsigned align_pref_float;
unsigned align_pref_double;
@@ -165,6 +316,7 @@ typedef struct
unsigned align_short;
unsigned align_int;
unsigned align_long;
unsigned align_i128;
unsigned align_half;
unsigned align_float;
unsigned align_double;
@@ -191,7 +343,9 @@ typedef struct
unsigned sse_reg_param_max;
unsigned builtin_ms_valist;
unsigned aarch64sve_types;
unsigned max_size_for_return;
char *platform_name;
} Target;
extern Target build_target;

View File

@@ -4,9 +4,9 @@
#include "compiler_internal.h"
static Type t_u0, t_str, t_u1, t_i8, t_i16, t_i32, t_i64, t_ixx;
static Type t_u8, t_u16, t_u32, t_u64;
static Type t_f32, t_f64, t_fxx;
static Type t_u0, t_str, t_u1, t_i8, t_i16, t_i32, t_i64, t_i128, t_ixx;
static Type t_u8, t_u16, t_u32, t_u64, t_u128;
static Type t_f16, t_f32, t_f64, t_f128, t_fxx;
static Type t_usz, t_isz;
static Type t_cus, t_cui, t_cul, t_cull;
static Type t_cs, t_ci, t_cl, t_cll;
@@ -16,19 +16,23 @@ Type *type_bool = &t_u1;
Type *type_void = &t_u0;
Type *type_string = &t_str;
Type *type_voidptr = &t_voidstar;
Type *type_half = &t_f16;
Type *type_float = &t_f32;
Type *type_double = &t_f64;
Type *type_quad = &t_f128;
Type *type_typeid = &t_typeid;
Type *type_typeinfo = &t_typeinfo;
Type *type_char = &t_i8;
Type *type_short = &t_i16;
Type *type_int = &t_i32;
Type *type_long = &t_i64;
Type *type_i128 = &t_i128;
Type *type_isize = &t_isz;
Type *type_byte = &t_u8;
Type *type_ushort = &t_u16;
Type *type_uint = &t_u32;
Type *type_ulong = &t_u64;
Type *type_u128 = &t_u128;
Type *type_usize = &t_usz;
Type *type_compint = &t_ixx;
Type *type_compfloat = &t_fxx;
@@ -52,7 +56,7 @@ unsigned alignment_error_code;
#define VAR_ARRAY_OFFSET 2
#define ARRAY_OFFSET 3
Type *type_signed_int_by_bitsize(unsigned bytesize)
Type *type_int_signed_by_bitsize(unsigned bytesize)
{
switch (bytesize)
{
@@ -60,10 +64,11 @@ Type *type_signed_int_by_bitsize(unsigned bytesize)
case 16: return type_short;
case 32: return type_int;
case 64: return type_long;
case 128: return type_i128;
default: FATAL_ERROR("Illegal bitsize %d", bytesize);
}
}
Type *type_unsigned_int_by_bitsize(unsigned bytesize)
Type *type_int_unsigned_by_bitsize(unsigned bytesize)
{
switch (bytesize)
{
@@ -71,6 +76,7 @@ Type *type_unsigned_int_by_bitsize(unsigned bytesize)
case 16: return type_ushort;
case 32: return type_uint;
case 64: return type_ulong;
case 128: return type_u128;
default: FATAL_ERROR("Illegal bitsize %d", bytesize);
}
}
@@ -87,18 +93,8 @@ const char *type_to_error_string(Type *type)
case TYPE_STRUCT:
case TYPE_VOID:
case TYPE_BOOL:
case TYPE_I8:
case TYPE_I16:
case TYPE_I32:
case TYPE_I64:
case TYPE_IXX:
case TYPE_U8:
case TYPE_U16:
case TYPE_U32:
case TYPE_U64:
case TYPE_F32:
case TYPE_F64:
case TYPE_FXX:
case ALL_INTS:
case ALL_FLOATS:
case TYPE_UNION:
case TYPE_ERRTYPE:
return type->name;
@@ -114,6 +110,9 @@ const char *type_to_error_string(Type *type)
return strcat_arena(buffer, ")");
}
case TYPE_COMPLEX:
case TYPE_VECTOR:
TODO
case TYPE_MEMBER:
return "member";
case TYPE_TYPEINFO:
@@ -174,6 +173,10 @@ size_t type_size(Type *type)
{
switch (type->type_kind)
{
case TYPE_VECTOR:
return type_size(type->vector.base) * type->vector.len;
case TYPE_COMPLEX:
return type_size(type->complex) * 2;
case TYPE_POISONED:
case TYPE_TYPEINFO:
case TYPE_MEMBER:
@@ -215,15 +218,389 @@ const char *type_generate_qname(Type *type)
return strformat("%s::%s", type->decl->module->name->module, type->name);
}
bool type_is_union_struct(Type *type)
{
TypeKind kind = type->canonical->type_kind;
return kind == TYPE_STRUCT || kind == TYPE_UNION;
}
bool type_is_empty_field(Type *type, bool allow_array)
{
type = type->canonical;
if (allow_array)
{
while (type->type_kind == TYPE_ARRAY)
{
if (type->array.len == 0) return true;
type = type->array.base->canonical;
}
}
return type_is_union_struct(type) && type_is_empty_union_struct(type, allow_array);
}
bool type_is_empty_union_struct(Type *type, bool allow_array)
{
if (!type_is_union_struct(type)) return false;
Decl **members = type->decl->strukt.members;
VECEACH(members, i)
{
if (!type_is_empty_field(members[i]->type, allow_array)) return false;
}
return true;
}
bool type_is_int128(Type *type)
{
TypeKind kind = type->canonical->type_kind;
return kind == TYPE_U128 || kind == TYPE_I128;
}
Type *type_find_single_struct_element(Type *type)
{
if (!type_is_union_struct(type)) return NULL;
Type *found = NULL;
Decl **members = type->decl->strukt.members;
VECEACH(members, i)
{
// Ignore empty arrays
if (type_is_empty_field(members[i]->type, true)) continue;
// Already one element found, not single element.
if (found) return NULL;
Type *field_type = members[i]->type->canonical;
while (field_type->type_kind == TYPE_ARRAY)
{
if (field_type->array.len != 1) break;
field_type = field_type->array.base;
}
if (type_is_union_struct(field_type))
{
field_type = type_find_single_struct_element(field_type);
if (!field_type) return NULL;
}
found = field_type;
}
// If there is some padding? Then ignore.
if (found && type_size(type) != type_size(found)) found = NULL;
return found;
}
static bool type_is_qpx_vector(Type *type)
{
if (build_target.abi != ABI_PPC64_SVR4 || !build_target.ppc64.has_qpx) return false;
type = type->canonical;
if (type->type_kind != TYPE_VECTOR) return false;
if (type->vector.len == 1) return false;
switch (type->vector.base->type_kind)
{
case TYPE_F64:
return type_size(type) >= 256 / 8;
case TYPE_F32:
return type_size(type) <= 128 / 8;
default:
return false;
}
}
bool type_is_abi_aggregate(Type *type)
{
switch (type->type_kind)
{
case TYPE_POISONED:
return false;
case TYPE_TYPEDEF:
return type_is_abi_aggregate(type->canonical);
case ALL_FLOATS:
case TYPE_VOID:
case ALL_INTS:
case TYPE_BOOL:
case TYPE_VARARRAY:
case TYPE_TYPEID:
case TYPE_POINTER:
case TYPE_ENUM:
case TYPE_FUNC:
case TYPE_STRING:
case TYPE_VECTOR:
return false;
case TYPE_ERRTYPE:
case TYPE_STRUCT:
case TYPE_UNION:
case TYPE_SUBARRAY:
case TYPE_ARRAY:
case TYPE_ERR_UNION:
case TYPE_COMPLEX:
return true;
case TYPE_TYPEINFO:
case TYPE_MEMBER:
UNREACHABLE
}
UNREACHABLE
}
bool type_is_homogenous_base_type(Type *type)
{
type = type->canonical;
switch (build_target.abi)
{
case ABI_PPC64_SVR4:
switch (type->type_kind)
{
case TYPE_F128:
if (!build_target.float_128) return false;
FALLTHROUGH;
case TYPE_F32:
case TYPE_F64:
return !build_target.ppc64.is_softfp;
case TYPE_VECTOR:
return type_size(type) == 128 / 8 || type_is_qpx_vector(type);
default:
return false;
}
case ABI_X64:
case ABI_WIN64:
case ABI_X86:
switch (type->type_kind)
{
case TYPE_F64:
case TYPE_F32:
return true;
case TYPE_VECTOR:
switch (type_size(type))
{
case 16:
case 32:
case 64:
// vec128 256 512 ok
return true;
default:
return false;
}
default:
return false;
}
case ABI_AARCH64:
switch (type->type_kind)
{
case ALL_FLOATS:
return true;
case TYPE_VECTOR:
switch (type_size(type))
{
case 8:
case 16:
// vector 64, 128 => true
return true;
default:
return false;
}
default:
return false;
}
case ABI_ARM:
switch (type->type_kind)
{
case TYPE_F32:
case TYPE_F64:
case TYPE_F128:
return true;
case TYPE_VECTOR:
switch (type_size(type))
{
case 8:
case 16:
return true;
}
default:
return false;
}
case ABI_UNKNOWN:
case ABI_WASM:
case ABI_PPC32:
case ABI_RISCV:
return false;
}
UNREACHABLE
}
bool type_homogenous_aggregate_small_enough(Type *type, unsigned members)
{
switch (build_target.abi)
{
case ABI_PPC64_SVR4:
if (type->type_kind == TYPE_F128 && build_target.float_128) return members <= 8;
if (type->type_kind == TYPE_VECTOR) return members <= 8;
return ((type_size(type) + 7) / 8) * members <= 8;
case ABI_X64:
case ABI_WIN64:
case ABI_X86:
case ABI_AARCH64:
case ABI_ARM:
return members <= 4;
case ABI_UNKNOWN:
case ABI_WASM:
case ABI_PPC32:
case ABI_RISCV:
return false;
}
UNREACHABLE
}
bool type_is_homogenous_aggregate(Type *type, Type **base, unsigned *elements)
{
*elements = 0;
switch (type->type_kind)
{
case TYPE_COMPLEX:
*base = type->complex;
*elements = 2;
break;
case TYPE_FXX:
case TYPE_POISONED:
case TYPE_IXX:
case TYPE_VOID:
case TYPE_TYPEINFO:
case TYPE_MEMBER:
case TYPE_TYPEID:
case TYPE_FUNC:
case TYPE_STRING:
case TYPE_SUBARRAY:
return false;
case TYPE_ERR_UNION:
*base = type_usize->canonical;
*elements = 2;
return true;
case TYPE_ERRTYPE:
*base = type_usize->canonical;
*elements = 1;
return true;
case TYPE_TYPEDEF:
return type_is_homogenous_aggregate(type->canonical, base, elements);
case TYPE_STRUCT:
case TYPE_UNION:
*elements = 0;
{
Decl **members = type->decl->strukt.members;
VECEACH(members, i)
{
unsigned member_mult = 1;
Type *member_type = members[i]->type->canonical;
while (member_type->type_kind == TYPE_ARRAY)
{
if (member_type->array.len == 0) return false;
member_mult *= member_type->array.len;
member_type = member_type->array.base;
}
unsigned member_members = 0;
if (type_is_empty_field(member_type, true)) continue;
if (!type_is_homogenous_aggregate(member_type, base, &member_members)) return false;
member_members *= member_mult;
if (type->type_kind == TYPE_UNION)
{
*elements = MAX(*elements, member_members);
}
else
{
*elements += member_members;
}
}
assert(base);
// Ensure no padding
if (type_size(*base) * *elements != type_size(type)) return false;
}
goto TYPECHECK;
case TYPE_ARRAY:
if (type->array.len == 0) return false;
if (!type_is_homogenous_aggregate(type->array.base, base, elements)) return false;
*elements *= type->array.len;
goto TYPECHECK;
case TYPE_ENUM:
// Lower enum to underlying type
type = type->decl->enums.type_info->type;
break;
case TYPE_BOOL:
// Lower bool to unsigned char
type = type_byte;
break;
case ALL_SIGNED_INTS:
// Lower signed to unsigned
type = type_int_unsigned_by_bitsize(type->builtin.bytesize);
break;
case ALL_UNSIGNED_INTS:
case ALL_REAL_FLOATS:
case TYPE_VECTOR:
break;
case TYPE_POINTER:
case TYPE_VARARRAY:
// All pointers are the same.
type = type_voidptr;
break;
}
*elements = 1;
if (!type_is_homogenous_base_type(type)) return false;
if (!*base)
{
*base = type;
// Special handling of non-power-of-2 vectors
if (type->type_kind == TYPE_VECTOR)
{
// Expand to actual size.
unsigned vec_elements = type_size(type) / type_size(type->vector.base);
*base = type_get_vector(type->vector.base, vec_elements);
}
}
// One is vector - other isn't => failure
if (((*base)->type_kind == TYPE_VECTOR) != (type->type_kind == TYPE_VECTOR)) return false;
// Size does not match => failure
if (type_size(*base) != type_size(type)) return false;
TYPECHECK:
if (*elements == 0) return false;
return type_homogenous_aggregate_small_enough(type, *elements);
}
unsigned int type_alloca_alignment(Type *type)
{
return type_abi_alignment(type);
}
Type *type_find_largest_union_element(Type *type)
{
assert(type->type_kind == TYPE_UNION);
size_t largest = 0;
Type *largest_type = NULL;
Decl **members = type->decl->strukt.members;
VECEACH(members, i)
{
if (type_size(type) > largest)
{
largest = type_size(type);
largest_type = type->canonical;
}
}
return largest_type;
}
unsigned int type_abi_alignment(Type *type)
{
switch (type->type_kind)
{
case TYPE_POISONED:
case TYPE_VOID:
case TYPE_TYPEINFO:
case TYPE_MEMBER:
UNREACHABLE;
case TYPE_VECTOR:
case TYPE_COMPLEX:
TODO
case TYPE_VOID:
return 1;
case TYPE_TYPEDEF:
return type_abi_alignment(type->canonical);
case TYPE_ENUM:
@@ -232,7 +609,7 @@ unsigned int type_abi_alignment(Type *type)
return alignment_error_code;
case TYPE_STRUCT:
case TYPE_UNION:
return type->decl->strukt.abi_alignment;
return type->decl->alignment;
case TYPE_TYPEID:
case TYPE_BOOL:
case ALL_INTS:
@@ -252,12 +629,11 @@ unsigned int type_abi_alignment(Type *type)
UNREACHABLE
}
static inline void create_type_cache(Type *canonical_type)
static inline void create_type_cache(Type *type)
{
assert(canonical_type->canonical == canonical_type);
for (int i = 0; i < ARRAY_OFFSET; i++)
{
vec_add(canonical_type->type_cache, NULL);
vec_add(type->type_cache, NULL);
}
}
@@ -394,40 +770,61 @@ Type *type_get_indexed_type(Type *type)
}
Type *type_create_array(Type *arr_type, uint64_t len, bool canonical)
static Type *type_create_array(Type *element_type, uint64_t len, bool vector, bool canonical)
{
if (canonical) arr_type = arr_type->canonical;
if (!arr_type->type_cache)
if (canonical) element_type = element_type->canonical;
if (!element_type->type_cache)
{
create_type_cache(arr_type);
create_type_cache(element_type);
}
int entries = (int)vec_size(arr_type->type_cache);
int entries = (int)vec_size(element_type->type_cache);
for (int i = ARRAY_OFFSET; i < entries; i++)
{
Type *ptr = arr_type->type_cache[i];
if (ptr->array.len == len)
Type *ptr_vec = element_type->type_cache[i];
if (vector)
{
return ptr;
if (ptr_vec->type_kind != TYPE_VECTOR) continue;
if (ptr_vec->vector.len == len) return ptr_vec;
}
else
{
if (ptr_vec->type_kind == TYPE_VECTOR) continue;
if (ptr_vec->array.len == len) return ptr_vec;
}
}
Type *array = type_new(TYPE_ARRAY, strformat("%s[%llu]", arr_type->name, len));
array->array.base = arr_type;
array->array.len = len;
if (arr_type->canonical == arr_type)
Type *vec_arr;
if (vector)
{
array->canonical = array;
vec_arr = type_new(TYPE_VECTOR, strformat("%s[<%llu>]", element_type->name, len));
vec_arr->vector.base = element_type;
vec_arr->vector.len = len;
}
else
{
array->canonical = type_create_array(arr_type, len, true);
vec_arr = type_new(TYPE_ARRAY, strformat("%s[%llu]", element_type->name, len));
vec_arr->array.base = element_type;
vec_arr->array.len = len;
}
VECADD(arr_type->type_cache, array);
return array;
if (element_type->canonical == element_type)
{
vec_arr->canonical = vec_arr;
}
else
{
vec_arr->canonical = type_create_array(element_type, len, vector, true);
}
VECADD(element_type->type_cache, vec_arr);
return vec_arr;
}
Type *type_get_array(Type *arr_type, uint64_t len)
{
return type_create_array(arr_type, len, false);
return type_create_array(arr_type, len, false, false);
}
Type *type_get_vector(Type *vector_type, unsigned len)
{
return type_create_array(vector_type, len, true, false);
}
static void type_create(const char *name, Type *location, TypeKind kind, unsigned bitsize,
@@ -475,11 +872,13 @@ type_create(#_name, &_shortname, _type, _bits, target->align_ ## _align, target-
DEF_TYPE(short, t_i16, TYPE_I16, 16, short);
DEF_TYPE(int, t_i32, TYPE_I32, 32, int);
DEF_TYPE(long, t_i64, TYPE_I64, 64, long);
DEF_TYPE(i128, t_i128, TYPE_I128, 128, i128);
DEF_TYPE(byte, t_u8, TYPE_U8, 8, byte);
DEF_TYPE(ushort, t_u16, TYPE_U16, 16, short);
DEF_TYPE(uint, t_u32, TYPE_U32, 32, int);
DEF_TYPE(ulong, t_u64, TYPE_U64, 64, long);
DEF_TYPE(u128, t_u128, TYPE_U128, 128, i128);
DEF_TYPE(void, t_u0, TYPE_VOID, 8, byte);
DEF_TYPE(string, t_str, TYPE_STRING, target->width_pointer, pointer);
@@ -495,18 +894,18 @@ type_create(#_name, &_shortname, _type, _bits, target->align_ ## _align, target-
type_create("compint", &t_ixx, TYPE_IXX, 32, 0, 0);
type_create("compfloat", &t_fxx, TYPE_FXX, 64, 0, 0);
type_create_alias("usize", &t_usz, type_unsigned_int_by_bitsize(target->width_pointer));
type_create_alias("isize", &t_isz, type_signed_int_by_bitsize(target->width_pointer));
type_create_alias("usize", &t_usz, type_int_unsigned_by_bitsize(target->width_pointer));
type_create_alias("isize", &t_isz, type_int_signed_by_bitsize(target->width_pointer));
type_create_alias("c_ushort", &t_cus, type_unsigned_int_by_bitsize(target->width_c_short));
type_create_alias("c_uint", &t_cui, type_unsigned_int_by_bitsize(target->width_c_int));
type_create_alias("c_ulong", &t_cul, type_unsigned_int_by_bitsize(target->width_c_long));
type_create_alias("c_ulonglong", &t_cull, type_unsigned_int_by_bitsize(target->width_c_long_long));
type_create_alias("c_ushort", &t_cus, type_int_unsigned_by_bitsize(target->width_c_short));
type_create_alias("c_uint", &t_cui, type_int_unsigned_by_bitsize(target->width_c_int));
type_create_alias("c_ulong", &t_cul, type_int_unsigned_by_bitsize(target->width_c_long));
type_create_alias("c_ulonglong", &t_cull, type_int_unsigned_by_bitsize(target->width_c_long_long));
type_create_alias("c_short", &t_cs, type_signed_int_by_bitsize(target->width_c_short));
type_create_alias("c_int", &t_ci, type_signed_int_by_bitsize(target->width_c_int));
type_create_alias("c_long", &t_cl, type_signed_int_by_bitsize(target->width_c_long));
type_create_alias("c_longlong", &t_cll, type_signed_int_by_bitsize(target->width_c_long_long));
type_create_alias("c_short", &t_cs, type_int_signed_by_bitsize(target->width_c_short));
type_create_alias("c_int", &t_ci, type_int_signed_by_bitsize(target->width_c_int));
type_create_alias("c_long", &t_cl, type_int_signed_by_bitsize(target->width_c_long));
type_create_alias("c_longlong", &t_cll, type_int_signed_by_bitsize(target->width_c_long_long));
alignment_subarray = MAX(type_abi_alignment(&t_voidstar), type_abi_alignment(t_usz.canonical));
size_subarray = alignment_subarray * 2;
@@ -562,22 +961,27 @@ typedef enum
Type *type_find_max_num_type(Type *num_type, Type *other_num)
{
if (other_num->type_kind < TYPE_I8 || other_num->type_kind > TYPE_FXX) return NULL;
assert(num_type->type_kind >= TYPE_I8 && num_type->type_kind <= TYPE_FXX);
static MaxType max_conv[TYPE_FXX - TYPE_I8 + 1][TYPE_FXX - TYPE_BOOL + 1] = {
// I8 I16 I32 I64 U8 U16 U32 U64 IXX F32 F64 FXX
{ L, R, R, R, X, X, X, X, L, R, R, FL }, // I8
{ L, L, R, R, L, X, X, X, L, R, R, FL }, // I16
{ L, L, L, R, L, L, X, X, L, R, R, FL }, // I32
{ L, L, L, L, L, L, L, X, L, R, R, FL }, // I64
{ X, R, R, R, L, R, R, R, L, R, R, FL }, // U8
{ X, X, R, R, L, L, R, R, L, R, R, FL }, // U16
{ X, X, X, R, L, L, L, R, L, R, R, FL }, // U32
{ X, X, X, X, L, L, L, L, L, R, R, FL }, // U64
{ R, R, R, R, R, R, R, R, L, R, R, R }, // IXX
{ L, L, L, L, L, L, L, L, L, L, R, L }, // F32
{ L, L, L, L, L, L, L, L, L, L, L, L }, // F64
{ FL, FL, FL, FL, FL, FL, FL, FL, FL, R, R, L }, // FXX
TypeKind kind = num_type->type_kind;
TypeKind other_kind = other_num->type_kind;
if (other_kind < TYPE_I8 || other_kind > TYPE_FXX) return NULL;
static MaxType max_conv[TYPE_FXX - TYPE_I8 + 1][TYPE_FXX - TYPE_I8 + 1] = {
// I8 I16 I32 I64 I128 U8 U16 U32 U64 U128 IXX F16 F32 F64 F128 FXX
{ L, R, R, R, R, X, X, X, X, X, L, R, R, R, R, FL }, // I8
{ L, L, R, R, R, L, X, X, X, X, L, R, R, R, R, FL }, // I16
{ L, L, L, R, R, L, L, X, X, X, L, R, R, R, R, FL }, // I32
{ L, L, L, L, R, L, L, L, X, X, L, R, R, R, R, FL }, // I64
{ L, L, L, L, L, L, L, L, X, X, L, R, R, R, R, FL }, // I128
{ X, R, R, R, R, L, R, R, R, R, L, R, R, R, R, FL }, // U8
{ X, X, R, R, R, L, L, R, R, R, L, R, R, R, R, FL }, // U16
{ X, X, X, R, R, L, L, L, R, R, L, R, R, R, R, FL }, // U32
{ X, X, X, X, R, L, L, L, L, R, L, R, R, R, R, FL }, // U64
{ X, X, X, X, X, L, L, L, L, L, L, R, R, R, R, FL }, // U128
{ R, R, R, R, R, R, R, R, R, R, L, R, R, R, R, R }, // IXX
{ L, L, L, L, L, L, L, L, L, L, L, L, R, R, R, L }, // F16
{ L, L, L, L, L, L, L, L, L, L, L, L, L, R, R, L }, // F32
{ L, L, L, L, L, L, L, L, L, L, L, L, L, L, R, L }, // F64
{ L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L }, // F128
{ FL, FL, FL, FL, FL, FL, FL, FL, FL, FL, FL, R, R, R, R, L }, // FXX
};
MaxType conversion = max_conv[num_type->type_kind - TYPE_I8][other_num->type_kind - TYPE_I8];
switch (conversion)
@@ -700,20 +1104,10 @@ Type *type_find_max_type(Type *type, Type *other)
case TYPE_TYPEINFO:
case TYPE_MEMBER:
return NULL;
case TYPE_I8:
case TYPE_I16:
case TYPE_I32:
case TYPE_I64:
case TYPE_IXX:
case TYPE_U8:
case TYPE_U16:
case TYPE_U32:
case TYPE_U64:
case ALL_INTS:
if (other->type_kind == TYPE_ENUM) return type_find_max_type(type, other->decl->enums.type_info->type->canonical);
FALLTHROUGH;
case TYPE_F32:
case TYPE_F64:
case TYPE_FXX:
case ALL_FLOATS:
return type_find_max_num_type(type, other);
case TYPE_POINTER:
return type_find_max_ptr_type(type, other);
@@ -741,6 +1135,12 @@ Type *type_find_max_type(Type *type, Type *other)
return type_find_max_vararray_type(type, other);
case TYPE_SUBARRAY:
TODO
case TYPE_VECTOR:
// No implicit conversion between vectors
return NULL;
case TYPE_COMPLEX:
// Implicit conversion or not?
TODO;
}
UNREACHABLE
}

View File

@@ -42,41 +42,7 @@ typedef enum
} SubArchType;
typedef enum
{
ENV_TYPE_UNKNOWN,
ENV_TYPE_GNU,
ENV_TYPE_GNUABIN32,
ENV_TYPE_SNUABI64,
ENV_TYPE_GNUEABI,
ENV_TYPE_GNUEABIHF,
ENV_TYPE_GNUX32,
ENV_TYPE_CODE16,
ENV_TYPE_EABI,
ENV_TYPE_EABIHF,
ENV_TYPE_ELFV1,
ENV_TYPE_ELFV2,
ENV_TYPE_ANDROID,
ENV_TYPE_MUSL,
ENV_TYPE_MUSLEABI,
ENV_TYPE_MUSLEABIHF,
ENV_TYPE_MSVC,
ENV_TYPE_ITANIUM,
ENV_TYPE_CYGNUS,
ENV_TYPE_CORECLR,
ENV_TYPE_SIMULATOR,
ENV_TYPE_MACABI,
ENV_TYPE_LAST = ENV_TYPE_MACABI
} EnvironmentType;
typedef enum
{
OBJ_FORMAT_COFF,
OBJ_FORMAT_ELF,
OBJ_FORMAT_MACHO,
OBJ_FORMAT_WASM,
OBJ_FORMAT_XCOFF
} ObjectFormatType;
typedef struct

View File

@@ -30,7 +30,7 @@ static inline bool is_power_of_two(uint64_t x)
return x != 0 && (x & (x - 1)) == 0;
}
static inline uint32_t nextHighestPowerOf2(uint32_t v)
static inline uint32_t next_highest_power_of_2(uint32_t v)
{
v--;
v |= v >> 1U;
@@ -374,3 +374,7 @@ char *strcopy(const char *start, size_t len);
typeof(_b) __b__ = (_b); \
__a__ > __b__ ? __a__ : __b__; })
#define MIN(_a, _b) ({ \
typeof(_a) __a__ = (_a); \
typeof(_b) __b__ = (_b); \
__a__ < __b__ ? __a__ : __b__; })