- Fix issue where a compile time parameter is followed by "...".

- Fix issue with some conversions to untyped list.
- Experimental change: add `+++` `&&&` `|||` as replacement for `$concat`, `$and` and `$or`.
This commit is contained in:
Christoffer Lerno
2024-08-04 23:16:25 +02:00
parent b49b60ab5f
commit 2748cf99b3
26 changed files with 772 additions and 131 deletions

View File

@@ -594,7 +594,7 @@ fn void* tmalloc(usz size, usz alignment = 0) @builtin @inline @nodiscard
/**
* @require $vacount < 2 : "Too many arguments."
* @require $or($vacount == 0, $assignable($vaexpr(0), $Type)) : "The second argument must be an initializer for the type"
* @require $vacount == 0 ||| $assignable($vaexpr(0), $Type) : "The second argument must be an initializer for the type"
* @require $Type.alignof <= DEFAULT_MEM_ALIGNMENT : "Types with alignment exceeding the default must use 'alloc_aligned' instead"
**/
macro new($Type, ...) @nodiscard
@@ -612,7 +612,7 @@ macro new($Type, ...) @nodiscard
* Allocate using an aligned allocation. This is necessary for types with a default memory alignment
* exceeding DEFAULT_MEM_ALIGNMENT. IMPORTANT! It must be freed using free_aligned.
* @require $vacount < 2 : "Too many arguments."
* @require $or($vacount == 0, $assignable($vaexpr(0), $Type)) : "The second argument must be an initializer for the type"
* @require $vacount == 0 ||| $assignable($vaexpr(0), $Type) : "The second argument must be an initializer for the type"
**/
macro new_aligned($Type, ...) @nodiscard
{
@@ -644,7 +644,7 @@ macro alloc_aligned($Type) @nodiscard
/**
* @require $vacount < 2 : "Too many arguments."
* @require $or($vacount == 0, $assignable($vaexpr(0), $Type)) : "The second argument must be an initializer for the type"
* @require $vacount == 0 ||| $assignable($vaexpr(0), $Type) : "The second argument must be an initializer for the type"
**/
macro temp_new($Type, ...) @nodiscard
{

View File

@@ -149,7 +149,7 @@ macro void free_aligned(Allocator allocator, void* ptr)
/**
* @require $Type.alignof <= mem::DEFAULT_MEM_ALIGNMENT : "Types with alignment exceeding the default must use 'new_aligned' instead"
* @require $vacount < 2 : "Too many arguments."
* @require $or($vacount == 0, $assignable($vaexpr(0), $Type)) : "The second argument must be an initializer for the type"
* @require $vacount == 0 ||| $assignable($vaexpr(0), $Type) : "The second argument must be an initializer for the type"
**/
macro new(Allocator allocator, $Type, ...) @nodiscard
{
@@ -165,7 +165,7 @@ macro new(Allocator allocator, $Type, ...) @nodiscard
/**
* @require $Type.alignof <= mem::DEFAULT_MEM_ALIGNMENT : "Types with alignment exceeding the default must use 'new_aligned' instead"
* @require $vacount < 2 : "Too many arguments."
* @require $or($vacount == 0, $assignable($vaexpr(0), $Type)) : "The second argument must be an initializer for the type"
* @require $vacount == 0 ||| $assignable($vaexpr(0), $Type) : "The second argument must be an initializer for the type"
**/
macro new_try(Allocator allocator, $Type, ...) @nodiscard
{
@@ -182,7 +182,7 @@ macro new_try(Allocator allocator, $Type, ...) @nodiscard
* Allocate using an aligned allocation. This is necessary for types with a default memory alignment
* exceeding DEFAULT_MEM_ALIGNMENT. IMPORTANT! It must be freed using free_aligned.
* @require $vacount < 2 : "Too many arguments."
* @require $or($vacount == 0, $assignable($vaexpr(0), $Type)) : "The second argument must be an initializer for the type"
* @require $vacount == 0 ||| $assignable($vaexpr(0), $Type) : "The second argument must be an initializer for the type"
**/
macro new_aligned($Type, ...) @nodiscard
{

View File

@@ -30,7 +30,7 @@ macro promote_int_same(x, y)
{
$if @is_int(x):
$switch
$case $and(@is_vector(y), $typeof(y).inner == float.typeid):
$case @is_vector(y) &&& $typeof(y).inner == float.typeid:
return (float)x;
$case $typeof(y).typeid == float.typeid:
return (float)x;

View File

@@ -54,7 +54,7 @@ struct Stack_t
extern fn CInt sigaltstack(Stack_t* ss, Stack_t* old_ss);
extern fn CInt sigaction(CInt signum, Sigaction *action, Sigaction *oldaction);
module libc::termios @if($and(env::LIBC, env::POSIX));
module libc::termios @if(env::LIBC &&& env::POSIX);
distinct Cc = char;
distinct Speed = CUInt;

View File

@@ -1,4 +1,4 @@
module libc::termios @if($and(env::LIBC, env::POSIX));
module libc::termios @if(env::LIBC &&& env::POSIX);
fn int sendBreak(Fd fd, int duration) => tcsendbreak(fd, duration);
fn int drain(Fd fd) => tcdrain(fd);
@@ -11,7 +11,7 @@ fn int Termios.setISpeed(Termios* self, Speed speed) => cfsetispeed(self, speed)
fn int Termios.getAttr(Termios* self, Fd fd) => tcgetattr(fd, self);
fn int Termios.setAttr(Termios* self, Fd fd, int optional_actions) => tcsetattr(fd, optional_actions, self);
module libc::termios @if($or(!env::LIBC, !env::POSIX));
module libc::termios @if(!env::LIBC ||| !env::POSIX);
distinct Cc = char;
distinct Speed = CUInt;

View File

@@ -32,7 +32,7 @@ def Indexs = char[256] @private;
def ElementType = $typeof(Type{}[0]);
const bool NO_KEY_FN @private = types::is_same(KeyFn, EmptySlot);
const bool KEY_BY_VALUE @private = $or(NO_KEY_FN, $assignable(Type{}[0], $typefrom(KeyFn.params[0])));
const bool KEY_BY_VALUE @private = NO_KEY_FN ||| $assignable(Type{}[0], $typefrom(KeyFn.params[0]));
const bool LIST_HAS_REF @private = $defined(&Type{}[0]);
def KeyFnReturnType = $typefrom(KeyFn.returns) @if(!NO_KEY_FN);

View File

@@ -20,7 +20,7 @@ fn void isort(Type list, usz low, usz high, CmpFn comp, Context context)
{
var $has_cmp = @is_valid_macro_slot(comp);
var $has_context = @is_valid_macro_slot(context);
var $cmp_by_value = $and($has_cmp, $assignable(list[0], $typefrom(CmpFn.params[0])));
var $cmp_by_value = $has_cmp &&& $assignable(list[0], $typefrom(CmpFn.params[0]));
var $has_get_ref = $defined(&list[0]);
for (usz i = low; i < high; ++i)
{

View File

@@ -31,7 +31,8 @@ fn void qsort(Type list, isz low, isz high, CmpFn cmp, Context context)
{
var $has_cmp = @is_valid_macro_slot(cmp);
var $has_context = @is_valid_macro_slot(context);
var $cmp_by_value = $and($has_cmp, $assignable(list[0], $typefrom(CmpFn.params[0])));
var $cmp_by_value = $has_cmp &&& $assignable(list[0], $typefrom(CmpFn.params[0]));
if (low >= 0 && high >= 0 && low < high)
{
Stack stack;

View File

@@ -20,7 +20,7 @@ macro bool @is_sortable(#list)
return false;
$default:
return true;
$endswitch;
$endswitch
}
macro bool @is_valid_context(#cmp, #context)
@@ -34,7 +34,7 @@ macro bool @is_valid_cmp_fn(#cmp, #list, #context)
var $no_context = @is_empty_macro_slot(#context);
$switch
$case @is_empty_macro_slot(#cmp): return true;
$case $or($Type.kindof != FUNC, $Type.returns.kindof != SIGNED_INT): return false;
$case $Type.kindof != FUNC ||| $Type.returns.kindof != SIGNED_INT: return false;
$case $defined(#cmp(#list[0], #list[0], #context)): return true;
$case $defined(#cmp(#list[0], #list[0])): return $no_context;
$case $defined(#cmp(&#list[0], &#list[0], #context)): return true;

View File

@@ -23,6 +23,7 @@
- Add a `--run-once` option to delete the output file after running it.
- Add `@const` attribute for macros, for better error messages with constant macros.
- Add `wincrt` setting to libraries.
- Add `+++` `&&&` `|||` as replacement for `$concat`, `$and` and `$or`.
### Fixes
@@ -38,6 +39,8 @@
- Distinct inline would not implement protocol if the inlined implemented it. #1292
- Distinct inline can now be called if it is aliasing a function pointer.
- Bug in List add_array when reserving memory.
- Fix issue where a compile time parameter is followed by "...".
- Fix issue with some conversions to untyped list.
### Stdlib changes

View File

@@ -192,6 +192,9 @@ BinaryOp binary_op[TOKEN_LAST + 1] = {
[TOKEN_SHR] = BINARYOP_SHR,
[TOKEN_AND] = BINARYOP_AND,
[TOKEN_OR] = BINARYOP_OR,
[TOKEN_CT_AND] = BINARYOP_CT_AND,
[TOKEN_CT_OR] = BINARYOP_CT_OR,
[TOKEN_CT_CONCAT] = BINARYOP_CT_CONCAT,
[TOKEN_QUESTQUEST] = BINARYOP_ELSE,
[TOKEN_AMP] = BINARYOP_BIT_AND,
[TOKEN_BIT_OR] = BINARYOP_BIT_OR,

View File

@@ -2317,6 +2317,7 @@ void expr_rewrite_to_binary(Expr *expr_to_rewrite, Expr *left, Expr *right, Bina
bool expr_const_in_range(const ExprConst *left, const ExprConst *right, const ExprConst *right_to);
bool expr_const_compare(const ExprConst *left, const ExprConst *right, BinaryOp op);
void expr_contract_array(ExprConst *expr_const, ConstKind contract_type);
bool expr_const_will_overflow(const ExprConst *expr, TypeKind kind);
const char *expr_const_to_error_string(const ExprConst *expr);
bool expr_const_float_fits_type(const ExprConst *expr_const, TypeKind kind);

View File

@@ -34,6 +34,9 @@ typedef enum
BINARYOP_AND,
BINARYOP_OR,
BINARYOP_ELSE,
BINARYOP_CT_AND,
BINARYOP_CT_OR,
BINARYOP_CT_CONCAT,
// Don't change the ordering for GT to EQ or things will break
BINARYOP_GT,
BINARYOP_GE,
@@ -487,7 +490,9 @@ typedef enum
TOKEN_ELLIPSIS, // ...
TOKEN_SHL_ASSIGN, // <<=
TOKEN_SHR_ASSIGN, // >>=
TOKEN_CT_AND, // &&&
TOKEN_CT_CONCAT, // +++
TOKEN_CT_OR, // |||
// Literals.
TOKEN_IDENT, // Any normal ident.
TOKEN_CONST_IDENT, // Any purely uppercase ident,
@@ -587,12 +592,12 @@ typedef enum
TOKEN_LAST_NON_CT_KEYWORD = TOKEN_WHILE,
TOKEN_CT_ALIGNOF, // $alignof
TOKEN_CT_AND, // $and
TOKEN_CT_ANDFN, // $and
TOKEN_CT_APPEND, // $append
TOKEN_CT_ASSERT, // $assert
TOKEN_CT_ASSIGNABLE, // $assignable
TOKEN_CT_CASE, // $case
TOKEN_CT_CONCAT, // $concat
TOKEN_CT_CONCATFN, // $concat
TOKEN_CT_DEFAULT, // $default
TOKEN_CT_DEFINED, // $defined
TOKEN_CT_ECHO, // $echo
@@ -615,7 +620,7 @@ typedef enum
TOKEN_CT_IS_CONST, // $is_const
TOKEN_CT_NAMEOF, // $nameof
TOKEN_CT_OFFSETOF, // $offsetof
TOKEN_CT_OR, // $or
TOKEN_CT_ORFN, // $or
TOKEN_CT_QNAMEOF, // $qnameof
TOKEN_CT_SIZEOF, // $sizeof
TOKEN_CT_STRINGIFY, // $stringify
@@ -699,6 +704,7 @@ typedef enum
TYPE_BITSTRUCT,
TYPE_FAULTTYPE,
TYPE_TYPEDEF,
TYPE_UNTYPED_LIST,
TYPE_SLICE,
TYPE_ARRAY,
TYPE_FIRST_ARRAYLIKE = TYPE_ARRAY,
@@ -707,7 +713,6 @@ typedef enum
TYPE_VECTOR,
TYPE_INFERRED_VECTOR,
TYPE_LAST_ARRAYLIKE = TYPE_INFERRED_VECTOR,
TYPE_UNTYPED_LIST,
TYPE_OPTIONAL,
TYPE_WILDCARD,
TYPE_TYPEINFO,

View File

@@ -179,7 +179,7 @@ static bool add_error_token_at_current(Lexer *lexer, const char *message, ...)
}
// Add a new regular token.
static inline bool return_token(Lexer *lexer, TokenType type, const char *string)
static inline bool new_token(Lexer *lexer, TokenType type, const char *string)
{
set_generic_token(lexer, type);
lexer->data.string = string;
@@ -341,7 +341,7 @@ static inline bool scan_ident(Lexer *lexer, TokenType normal, TokenType const_to
uint32_t len = (uint32_t)(lexer->current - lexer->lexing_start);
if (!type)
{
if (!prefix && len == 1) return return_token(lexer, TOKEN_UNDERSCORE, "_");
if (!prefix && len == 1) return new_token(lexer, TOKEN_UNDERSCORE, "_");
if (prefix && len == 1)
{
return add_error_token(lexer, "An identifier was expected after the '%c'.", prefix);
@@ -357,7 +357,7 @@ static inline bool scan_ident(Lexer *lexer, TokenType normal, TokenType const_to
default:
break;
}
return return_token(lexer, type, interned_string);
return new_token(lexer, type, interned_string);
}
// --- Number scanning
@@ -446,7 +446,7 @@ static bool scan_oct(Lexer *lexer)
{
return add_error_token(lexer, "Octal literals cannot have a floating point suffix.");
}
return return_token(lexer, TOKEN_INTEGER, lexer->lexing_start);
return new_token(lexer, TOKEN_INTEGER, lexer->lexing_start);
}
/**
@@ -470,7 +470,7 @@ static bool scan_binary(Lexer *lexer)
{
return add_error_token(lexer, "Binary literals cannot have a floating point suffix.");
}
return return_token(lexer, TOKEN_INTEGER, lexer->lexing_start);
return new_token(lexer, TOKEN_INTEGER, lexer->lexing_start);
}
/**
@@ -541,7 +541,7 @@ static inline bool scan_hex(Lexer *lexer)
return add_error_token_at_current(lexer, "The number ended with '_', which isn't allowed, please remove it.");
}
if (!scan_number_suffix(lexer, &is_float)) return false;
return return_token(lexer, is_float ? TOKEN_REAL : TOKEN_INTEGER, lexer->lexing_start);
return new_token(lexer, is_float ? TOKEN_REAL : TOKEN_INTEGER, lexer->lexing_start);
}
/**
@@ -588,7 +588,7 @@ static inline bool scan_dec(Lexer *lexer)
return add_error_token_at_current(lexer, "The number ended with '_', which isn't allowed, please remove it.");
}
if (!scan_number_suffix(lexer, &is_float)) return false;
return return_token(lexer, is_float ? TOKEN_REAL : TOKEN_INTEGER, lexer->lexing_start);
return new_token(lexer, is_float ? TOKEN_REAL : TOKEN_INTEGER, lexer->lexing_start);
}
/**
@@ -989,7 +989,7 @@ static inline bool scan_string(Lexer *lexer)
// Skip the `"`
next(lexer);
destination[len] = 0;
return_token(lexer, TOKEN_STRING, destination);
new_token(lexer, TOKEN_STRING, destination);
lexer->data.strlen = len;
return true;
}
@@ -1025,7 +1025,7 @@ static inline bool scan_raw_string(Lexer *lexer)
destination[len++] = c;
}
destination[len] = 0;
return_token(lexer, TOKEN_STRING, destination);
new_token(lexer, TOKEN_STRING, destination);
lexer->data.strlen = len;
return true;
}
@@ -1069,7 +1069,7 @@ static inline bool scan_hex_array(Lexer *lexer)
{
return add_error_token(lexer, "The hexadecimal string is not an even length, did you miss a digit somewhere?");
}
if (!return_token(lexer, TOKEN_BYTES, lexer->lexing_start)) return false;
if (!new_token(lexer, TOKEN_BYTES, lexer->lexing_start)) return false;
lexer->data.is_base64 = false;
lexer->data.bytes_len = (uint64_t)len / 2;
return true;
@@ -1149,7 +1149,7 @@ static inline bool scan_base64(Lexer *lexer)
"- only need 1 or 2 bytes of extra padding.");
}
uint64_t decoded_len = (3 * len - end_len) / 4;
if (!return_token(lexer, TOKEN_BYTES, lexer->lexing_start)) return false;
if (!new_token(lexer, TOKEN_BYTES, lexer->lexing_start)) return false;
lexer->data.is_base64 = true;
lexer->data.bytes_len = decoded_len;
return true;
@@ -1215,7 +1215,7 @@ RETRY:;
{
lexer->mode = LEX_NORMAL;
next(lexer);
return return_token(lexer, TOKEN_DOCS_END, "*/");
return new_token(lexer, TOKEN_DOCS_END, "*/");
}
// We need to skip any space afterwards
@@ -1240,7 +1240,7 @@ RETRY:;
{
next(lexer);
lexer->mode = LEX_NORMAL;
return return_token(lexer, TOKEN_DOCS_END, "*/");
return new_token(lexer, TOKEN_DOCS_END, "*/");
}
// If we find the end of the line we start from the beginning.
if (c == '\n')
@@ -1263,7 +1263,7 @@ EOF_REACHED:
static bool parse_doc_start(Lexer *lexer)
{
// Add the doc start token.
return_token(lexer, TOKEN_DOCS_START, lexer->lexing_start);
new_token(lexer, TOKEN_DOCS_START, lexer->lexing_start);
skip_to_doc_line_end(lexer);
lexer->mode = LEX_DOCS;
return true;
@@ -1277,7 +1277,7 @@ static bool lexer_scan_token_inner(Lexer *lexer)
// Point start to the first non-whitespace character.
begin_new_token(lexer);
if (reached_end(lexer)) return return_token(lexer, TOKEN_EOF, "\n") && false;
if (reached_end(lexer)) return new_token(lexer, TOKEN_EOF, "\n") && false;
char c = peek(lexer);
next(lexer);
@@ -1290,7 +1290,7 @@ static bool lexer_scan_token_inner(Lexer *lexer)
{
return scan_ident(lexer, TOKEN_AT_IDENT, TOKEN_AT_CONST_IDENT, TOKEN_AT_TYPE_IDENT, '@');
}
return return_token(lexer, TOKEN_AT, "@");
return new_token(lexer, TOKEN_AT, "@");
case '\'':
return scan_char(lexer);
case '`':
@@ -1304,42 +1304,42 @@ static bool lexer_scan_token_inner(Lexer *lexer)
{
if (char_is_letter(peek(lexer)))
{
return return_token(lexer, TOKEN_BUILTIN, "$$");
return new_token(lexer, TOKEN_BUILTIN, "$$");
}
return add_error_token_at_current(lexer, "Expected a letter after $$.");
}
return scan_ident(lexer, TOKEN_CT_IDENT, TOKEN_CT_CONST_IDENT, TOKEN_CT_TYPE_IDENT, '$');
case ',':
return return_token(lexer, TOKEN_COMMA, ",");
return new_token(lexer, TOKEN_COMMA, ",");
case ';':
return return_token(lexer, TOKEN_EOS, ";");
return new_token(lexer, TOKEN_EOS, ";");
case '{':
return match(lexer, '|') ? return_token(lexer, TOKEN_LBRAPIPE, "{|") : return_token(lexer, TOKEN_LBRACE, "{");
return match(lexer, '|') ? new_token(lexer, TOKEN_LBRAPIPE, "{|") : new_token(lexer, TOKEN_LBRACE, "{");
case '}':
return return_token(lexer, TOKEN_RBRACE, "}");
return new_token(lexer, TOKEN_RBRACE, "}");
case '(':
return match(lexer, '<') ? return_token(lexer, TOKEN_LGENPAR, "(<") : return_token(lexer, TOKEN_LPAREN, "(");
return match(lexer, '<') ? new_token(lexer, TOKEN_LGENPAR, "(<") : new_token(lexer, TOKEN_LPAREN, "(");
case ')':
return return_token(lexer, TOKEN_RPAREN, ")");
return new_token(lexer, TOKEN_RPAREN, ")");
case '[':
if (match(lexer, '<')) return return_token(lexer, TOKEN_LVEC, "[<");
return return_token(lexer, TOKEN_LBRACKET, "[");
if (match(lexer, '<')) return new_token(lexer, TOKEN_LVEC, "[<");
return new_token(lexer, TOKEN_LBRACKET, "[");
case ']':
return return_token(lexer, TOKEN_RBRACKET, "]");
return new_token(lexer, TOKEN_RBRACKET, "]");
case '.':
if (match(lexer, '.'))
{
if (match(lexer, '.')) return return_token(lexer, TOKEN_ELLIPSIS, "...");
return return_token(lexer, TOKEN_DOTDOT, "..");
if (match(lexer, '.')) return new_token(lexer, TOKEN_ELLIPSIS, "...");
return new_token(lexer, TOKEN_DOTDOT, "..");
}
return return_token(lexer, TOKEN_DOT, ".");
return new_token(lexer, TOKEN_DOT, ".");
case '~':
return return_token(lexer, TOKEN_BIT_NOT, "~");
return new_token(lexer, TOKEN_BIT_NOT, "~");
case ':':
return match(lexer, ':') ? return_token(lexer, TOKEN_SCOPE, "::") : return_token(lexer, TOKEN_COLON, ":");
return match(lexer, ':') ? new_token(lexer, TOKEN_SCOPE, "::") : new_token(lexer, TOKEN_COLON, ":");
case '!':
if (match(lexer, '!')) return return_token(lexer, TOKEN_BANGBANG, "!!");
return match(lexer, '=') ? return_token(lexer, TOKEN_NOT_EQUAL, "!=") : return_token(lexer, TOKEN_BANG, "!");
if (match(lexer, '!')) return new_token(lexer, TOKEN_BANGBANG, "!!");
return match(lexer, '=') ? new_token(lexer, TOKEN_NOT_EQUAL, "!=") : new_token(lexer, TOKEN_BANG, "!");
case '/':
// We can't get any directives comments here.
if (lexer->mode != LEX_DOCS && match(lexer, '*'))
@@ -1348,59 +1348,62 @@ static bool lexer_scan_token_inner(Lexer *lexer)
next(lexer);
return parse_doc_start(lexer);
}
return match(lexer, '=') ? return_token(lexer, TOKEN_DIV_ASSIGN, "/=") : return_token(lexer, TOKEN_DIV, "/");
return match(lexer, '=') ? new_token(lexer, TOKEN_DIV_ASSIGN, "/=") : new_token(lexer, TOKEN_DIV, "/");
case '*':
return match(lexer, '=') ? return_token(lexer, TOKEN_MULT_ASSIGN, "*=") : return_token(lexer, TOKEN_STAR, "*");
return match(lexer, '=') ? new_token(lexer, TOKEN_MULT_ASSIGN, "*=") : new_token(lexer, TOKEN_STAR, "*");
case '=':
if (match(lexer, '>')) return return_token(lexer, TOKEN_IMPLIES, "=>");
return match(lexer, '=') ? return_token(lexer, TOKEN_EQEQ, "==") : return_token(lexer, TOKEN_EQ, "=");
if (match(lexer, '>')) return new_token(lexer, TOKEN_IMPLIES, "=>");
return match(lexer, '=') ? new_token(lexer, TOKEN_EQEQ, "==") : new_token(lexer, TOKEN_EQ, "=");
case '^':
return match(lexer, '=') ? return_token(lexer, TOKEN_BIT_XOR_ASSIGN, "^=") : return_token(lexer,
TOKEN_BIT_XOR,
"^");
return match(lexer, '=') ? new_token(lexer, TOKEN_BIT_XOR_ASSIGN, "^=") : new_token(lexer, TOKEN_BIT_XOR, "^");
case '?':
if (match(lexer, '?')) return return_token(lexer, TOKEN_QUESTQUEST, "??");
return match(lexer, ':') ? return_token(lexer, TOKEN_ELVIS, "?:") : return_token(lexer, TOKEN_QUESTION, "?");
if (match(lexer, '?')) return new_token(lexer, TOKEN_QUESTQUEST, "??");
return match(lexer, ':') ? new_token(lexer, TOKEN_ELVIS, "?:") : new_token(lexer, TOKEN_QUESTION, "?");
case '<':
if (match(lexer, '<'))
{
if (match(lexer, '=')) return return_token(lexer, TOKEN_SHL_ASSIGN, "<<=");
return return_token(lexer, TOKEN_SHL, "<<");
if (match(lexer, '=')) return new_token(lexer, TOKEN_SHL_ASSIGN, "<<=");
return new_token(lexer, TOKEN_SHL, "<<");
}
return match(lexer, '=') ? return_token(lexer, TOKEN_LESS_EQ, "<=") : return_token(lexer, TOKEN_LESS, "<");
return match(lexer, '=') ? new_token(lexer, TOKEN_LESS_EQ, "<=") : new_token(lexer, TOKEN_LESS, "<");
case '>':
if (match(lexer, '>'))
{
if (match(lexer, '=')) return return_token(lexer, TOKEN_SHR_ASSIGN, ">>=");
return return_token(lexer, TOKEN_SHR, ">>");
if (match(lexer, '=')) return new_token(lexer, TOKEN_SHR_ASSIGN, ">>=");
return new_token(lexer, TOKEN_SHR, ">>");
}
if (match(lexer, ')')) return return_token(lexer, TOKEN_RGENPAR, ">)");
if (match(lexer, ']')) return return_token(lexer, TOKEN_RVEC, ">]");
return match(lexer, '=') ? return_token(lexer, TOKEN_GREATER_EQ, ">=") : return_token(lexer,
TOKEN_GREATER,
">");
if (match(lexer, ')')) return new_token(lexer, TOKEN_RGENPAR, ">)");
if (match(lexer, ']')) return new_token(lexer, TOKEN_RVEC, ">]");
return match(lexer, '=') ? new_token(lexer, TOKEN_GREATER_EQ, ">=") : new_token(lexer, TOKEN_GREATER, ">");
case '%':
return match(lexer, '=') ? return_token(lexer, TOKEN_MOD_ASSIGN, "%=") : return_token(lexer, TOKEN_MOD, "%");
return match(lexer, '=') ? new_token(lexer, TOKEN_MOD_ASSIGN, "%=") : new_token(lexer, TOKEN_MOD, "%");
case '&':
if (match(lexer, '&')) return return_token(lexer, TOKEN_AND, "&&");
return match(lexer, '=') ? return_token(lexer, TOKEN_BIT_AND_ASSIGN, "&=") : return_token(lexer,
TOKEN_AMP,
"&");
if (match(lexer, '&'))
{
return match(lexer, '&') ? new_token(lexer, TOKEN_CT_AND, "&&&") : new_token(lexer, TOKEN_AND, "&&");
}
return match(lexer, '=') ? new_token(lexer, TOKEN_BIT_AND_ASSIGN, "&=") : new_token(lexer, TOKEN_AMP, "&");
case '|':
if (match(lexer, '}')) return return_token(lexer, TOKEN_RBRAPIPE, "|}");
if (match(lexer, '|')) return return_token(lexer, TOKEN_OR, "||");
return match(lexer, '=') ? return_token(lexer, TOKEN_BIT_OR_ASSIGN, "|=") : return_token(lexer,
TOKEN_BIT_OR,
if (match(lexer, '}')) return new_token(lexer, TOKEN_RBRAPIPE, "|}");
if (match(lexer, '|'))
{
return match(lexer, '|') ? new_token(lexer, TOKEN_CT_OR, "|||") : new_token(lexer, TOKEN_OR, "||");
}
return match(lexer, '=') ? new_token(lexer, TOKEN_BIT_OR_ASSIGN, "|=") : new_token(lexer, TOKEN_BIT_OR,
"|");
case '+':
if (match(lexer, '+')) return return_token(lexer, TOKEN_PLUSPLUS, "++");
if (match(lexer, '=')) return return_token(lexer, TOKEN_PLUS_ASSIGN, "+=");
return return_token(lexer, TOKEN_PLUS, "+");
if (match(lexer, '+'))
{
if (match(lexer, '+')) return new_token(lexer, TOKEN_CT_CONCAT, "+++");
return new_token(lexer, TOKEN_PLUSPLUS, "++");
}
if (match(lexer, '=')) return new_token(lexer, TOKEN_PLUS_ASSIGN, "+=");
return new_token(lexer, TOKEN_PLUS, "+");
case '-':
if (match(lexer, '>')) return return_token(lexer, TOKEN_ARROW, "->");
if (match(lexer, '-')) return return_token(lexer, TOKEN_MINUSMINUS, "--");
if (match(lexer, '=')) return return_token(lexer, TOKEN_MINUS_ASSIGN, "-=");
return return_token(lexer, TOKEN_MINUS, "-");
if (match(lexer, '>')) return new_token(lexer, TOKEN_ARROW, "->");
if (match(lexer, '-')) return new_token(lexer, TOKEN_MINUSMINUS, "--");
if (match(lexer, '=')) return new_token(lexer, TOKEN_MINUS_ASSIGN, "-=");
return new_token(lexer, TOKEN_MINUS, "-");
case 'x':
if ((peek(lexer) == '"' || peek(lexer) == '\''))
{

View File

@@ -4408,6 +4408,9 @@ void llvm_emit_binary(GenContext *c, BEValue *be_value, Expr *expr, BEValue *lhs
case BINARYOP_BIT_XOR_ASSIGN:
case BINARYOP_SHR_ASSIGN:
case BINARYOP_SHL_ASSIGN:
case BINARYOP_CT_AND:
case BINARYOP_CT_OR:
case BINARYOP_CT_CONCAT:
// Handled elsewhere.
UNREACHABLE
}

View File

@@ -49,11 +49,87 @@ static inline bool compare_fps(Real left, Real right, BinaryOp op)
UNREACHABLE
}
}
void expr_contract_array(ExprConst *expr_const, ConstKind contract_type)
{
if (expr_const->const_kind == CONST_POINTER)
{
*expr_const = (ExprConst) { .const_kind = contract_type };
return;
}
assert(expr_const->const_kind == CONST_INITIALIZER);
Type *type = type_flatten(expr_const->initializer->type);
assert(type_is_any_arraylike(type));
ArraySize len = type->array.len;
if (!len)
{
*expr_const = (ExprConst) { .const_kind = contract_type };
return;
}
char *arr = calloc_arena(len);
ConstInitializer *initializer = expr_const->initializer;
switch (initializer->kind)
{
case CONST_INIT_ZERO:
break;
case CONST_INIT_STRUCT:
case CONST_INIT_UNION:
case CONST_INIT_VALUE:
case CONST_INIT_ARRAY_VALUE:
UNREACHABLE
case CONST_INIT_ARRAY:
{
FOREACH(ConstInitializer *, init, initializer->init_array.elements)
{
assert(init->kind == CONST_INIT_ARRAY_VALUE);
arr[init->init_array_value.index] = (char) int_to_i64(init->init_array_value.element->init_value->const_expr.ixx);
}
break;
}
case CONST_INIT_ARRAY_FULL:
{
FOREACH_IDX(i, ConstInitializer *, init, initializer->init_array_full)
{
assert(init->kind == CONST_INIT_VALUE);
arr[i] = (char)int_to_i64(init->init_value->const_expr.ixx);
}
break;
}
}
*expr_const = (ExprConst) { .const_kind = contract_type, .bytes.ptr = arr, .bytes.len = len };
}
INLINE bool const_is_bytes(ConstKind kind)
{
return kind == CONST_BYTES || kind == CONST_STRING;
}
bool expr_const_compare(const ExprConst *left, const ExprConst *right, BinaryOp op)
{
bool is_eq;
assert(left->const_kind == right->const_kind);
ConstKind left_kind = left->const_kind;
ConstKind right_kind = right->const_kind;
ExprConst replace;
if (left_kind != right_kind)
{
if (const_is_bytes(left_kind))
{
if (!const_is_bytes(right_kind))
{
replace = *right;
expr_contract_array(&replace, left_kind);
right = &replace;
}
}
else if (const_is_bytes(right_kind))
{
if (!const_is_bytes(left_kind))
{
replace = *left;
expr_contract_array(&replace, right_kind);
left = &replace;
}
}
}
switch (left->const_kind)
{
case CONST_BOOL:

View File

@@ -1135,7 +1135,7 @@ static Expr *parse_ct_embed(ParseContext *c, Expr *left)
static Expr *parse_ct_concat_append(ParseContext *c, Expr *left)
{
assert(!left && "Unexpected left hand side");
Expr *expr = EXPR_NEW_TOKEN(tok_is(c, TOKEN_CT_CONCAT) ? EXPR_CT_CONCAT : EXPR_CT_APPEND);
Expr *expr = EXPR_NEW_TOKEN(tok_is(c, TOKEN_CT_CONCATFN) ? EXPR_CT_CONCAT : EXPR_CT_APPEND);
advance(c);
CONSUME_OR_RET(TOKEN_LPAREN, poisoned_expr);
@@ -1170,7 +1170,7 @@ static Expr *parse_ct_and_or(ParseContext *c, Expr *left)
{
assert(!left && "Unexpected left hand side");
Expr *expr = EXPR_NEW_TOKEN(EXPR_CT_AND_OR);
expr->ct_and_or_expr.is_and = tok_is(c, TOKEN_CT_AND);
expr->ct_and_or_expr.is_and = tok_is(c, TOKEN_CT_ANDFN);
advance(c);
CONSUME_OR_RET(TOKEN_LPAREN, poisoned_expr);
if (!parse_expr_list(c, &expr->ct_and_or_expr.args, TOKEN_RPAREN)) return poisoned_expr;
@@ -1956,7 +1956,10 @@ ParseRule rules[TOKEN_EOF + 1] = {
[TOKEN_STRING] = { parse_string_literal, NULL, PREC_NONE },
[TOKEN_REAL] = { parse_double, NULL, PREC_NONE },
[TOKEN_OR] = { NULL, parse_binary, PREC_OR },
[TOKEN_CT_OR] = { NULL, parse_binary, PREC_OR },
[TOKEN_CT_CONCAT] = { NULL, parse_binary, PREC_MULTIPLICATIVE },
[TOKEN_AND] = { parse_unary_expr, parse_binary, PREC_AND },
[TOKEN_CT_AND] = { parse_unary_expr, parse_binary, PREC_AND },
[TOKEN_EQ] = { NULL, parse_binary, PREC_ASSIGNMENT },
[TOKEN_PLUS_ASSIGN] = { NULL, parse_binary, PREC_ASSIGNMENT },
[TOKEN_MINUS_ASSIGN] = { NULL, parse_binary, PREC_ASSIGNMENT },
@@ -1980,11 +1983,11 @@ ParseRule rules[TOKEN_EOF + 1] = {
//[TOKEN_HASH_TYPE_IDENT] = { parse_type_identifier, NULL, PREC_NONE }
[TOKEN_FN] = { parse_lambda, NULL, PREC_NONE },
[TOKEN_CT_CONCAT] = { parse_ct_concat_append, NULL, PREC_NONE },
[TOKEN_CT_CONCATFN] = {parse_ct_concat_append, NULL, PREC_NONE },
[TOKEN_CT_APPEND] = { parse_ct_concat_append, NULL, PREC_NONE },
[TOKEN_CT_SIZEOF] = { parse_ct_sizeof, NULL, PREC_NONE },
[TOKEN_CT_ALIGNOF] = { parse_ct_call, NULL, PREC_NONE },
[TOKEN_CT_AND] = {parse_ct_and_or, NULL, PREC_NONE },
[TOKEN_CT_ANDFN] = {parse_ct_and_or, NULL, PREC_NONE },
[TOKEN_CT_ASSIGNABLE] = { parse_ct_castable, NULL, PREC_NONE },
[TOKEN_CT_DEFINED] = { parse_ct_defined, NULL, PREC_NONE },
[TOKEN_CT_IS_CONST] = {parse_ct_is_const, NULL, PREC_NONE },
@@ -1993,7 +1996,7 @@ ParseRule rules[TOKEN_EOF + 1] = {
[TOKEN_CT_FEATURE] = { parse_ct_call, NULL, PREC_NONE },
[TOKEN_CT_EXTNAMEOF] = { parse_ct_call, NULL, PREC_NONE },
[TOKEN_CT_OFFSETOF] = { parse_ct_call, NULL, PREC_NONE },
[TOKEN_CT_OR] = {parse_ct_and_or, NULL, PREC_NONE },
[TOKEN_CT_ORFN] = {parse_ct_and_or, NULL, PREC_NONE },
[TOKEN_CT_NAMEOF] = { parse_ct_call, NULL, PREC_NONE },
[TOKEN_CT_QNAMEOF] = { parse_ct_call, NULL, PREC_NONE },
[TOKEN_CT_TYPEFROM] = { parse_type_expr, NULL, PREC_NONE },

View File

@@ -1273,7 +1273,7 @@ bool parse_parameters(ParseContext *c, Decl ***params_ref, Decl **body_params,
// Did we get Foo... foo...
if (ellipsis)
{
PRINT_ERROR_HERE("Unexpected '...' following a vararg declaration.");
PRINT_ERROR_LAST("Unexpected '...' following a vararg declaration.");
return false;
}
ellipsis = true;
@@ -1285,7 +1285,7 @@ bool parse_parameters(ParseContext *c, Decl ***params_ref, Decl **body_params,
// Did we get Foo foo...? If so then that's an error.
if (type)
{
PRINT_ERROR_HERE("For typed varargs '...', needs to appear after the type.");
PRINT_ERROR_LAST("For typed varargs '...', needs to appear after the type.");
return false;
}
// This is "foo..."
@@ -1299,9 +1299,9 @@ bool parse_parameters(ParseContext *c, Decl ***params_ref, Decl **body_params,
name = symstr(c);
advance_and_verify(c, TOKEN_CT_IDENT);
// This will catch Type... $foo and $foo..., neither is allowed.
if (ellipsis || peek(c) == TOKEN_ELLIPSIS)
if (ellipsis || tok_is(c, TOKEN_ELLIPSIS))
{
PRINT_ERROR_HERE("Compile time parameters may not be varargs, use untyped macro varargs '...' instead.");
PRINT_ERROR_LAST("Compile time parameters may not be varargs, use untyped macro varargs '...' instead.");
return false;
}
param_kind = VARDECL_PARAM_CT;
@@ -1316,9 +1316,9 @@ bool parse_parameters(ParseContext *c, Decl ***params_ref, Decl **body_params,
return false;
}
// This will catch Type... &foo and &foo..., neither is allowed.
if (ellipsis || try_consume(c, TOKEN_ELLIPSIS))
if (ellipsis || tok_is(c, TOKEN_ELLIPSIS))
{
PRINT_ERROR_HERE("Reference parameters may not be varargs, use untyped macro varargs '...' instead.");
PRINT_ERROR_LAST("Reference parameters may not be varargs, use untyped macro varargs '...' instead.");
return false;
}
// Span includes the "&"
@@ -1333,9 +1333,9 @@ bool parse_parameters(ParseContext *c, Decl ***params_ref, Decl **body_params,
// expression #foo
name = symstr(c);
advance_and_verify(c, TOKEN_HASH_IDENT);
if (ellipsis || try_consume(c, TOKEN_ELLIPSIS))
if (ellipsis || tok_is(c, TOKEN_ELLIPSIS))
{
PRINT_ERROR_HERE("Expression parameters may not be varargs, use untyped macro varargs '...' instead.");
PRINT_ERROR_LAST("Expression parameters may not be varargs, use untyped macro varargs '...' instead.");
return false;
}
param_kind = VARDECL_PARAM_EXPR;
@@ -1344,9 +1344,9 @@ bool parse_parameters(ParseContext *c, Decl ***params_ref, Decl **body_params,
case TOKEN_CT_TYPE_IDENT:
name = symstr(c);
advance_and_verify(c, TOKEN_CT_TYPE_IDENT);
if (ellipsis || try_consume(c, TOKEN_ELLIPSIS))
if (ellipsis || tok_is(c, TOKEN_ELLIPSIS))
{
PRINT_ERROR_HERE("Expression parameters may not be varargs, use untyped macro varargs '...' instead.");
PRINT_ERROR_LAST("Expression parameters may not be varargs, use untyped macro varargs '...' instead.");
return false;
}
param_kind = VARDECL_PARAM_CT_TYPE;

View File

@@ -1296,9 +1296,11 @@ Ast *parse_stmt(ParseContext *c)
case TOKEN_BYTES:
case TOKEN_CHAR_LITERAL:
case TOKEN_CT_ALIGNOF:
case TOKEN_CT_ANDFN:
case TOKEN_CT_AND:
case TOKEN_CT_APPEND:
case TOKEN_CT_ASSIGNABLE:
case TOKEN_CT_CONCATFN:
case TOKEN_CT_CONCAT:
case TOKEN_CT_CONST_IDENT:
case TOKEN_CT_IS_CONST:
@@ -1310,6 +1312,7 @@ Ast *parse_stmt(ParseContext *c)
case TOKEN_CT_IDENT:
case TOKEN_CT_NAMEOF:
case TOKEN_CT_OFFSETOF:
case TOKEN_CT_ORFN:
case TOKEN_CT_OR:
case TOKEN_CT_QNAMEOF:
case TOKEN_CT_SIZEOF:

View File

@@ -443,6 +443,11 @@ RETRY:
case BINARYOP_EQ:
// This type is bool, so check should never happen.
UNREACHABLE
case BINARYOP_CT_OR:
case BINARYOP_CT_AND:
case BINARYOP_CT_CONCAT:
// This should be folded already.
UNREACHABLE
}
UNREACHABLE
case EXPR_BUILTIN_ACCESS:

View File

@@ -6101,7 +6101,6 @@ static bool sema_expr_analyse_and_or(SemaContext *context, Expr *expr, Expr *lef
if (!sema_binary_analyse_subexpr(context, expr, left, right)) return false;
if (!cast_explicit(context, left, type_bool) || !cast_explicit(context, right, type_bool)) return false;
if (expr_both_const(left, right) && sema_constant_fold_ops(left))
{
if (expr->binary_expr.operator == BINARYOP_AND)
@@ -6878,6 +6877,513 @@ static inline bool sema_expr_analyse_or_error(SemaContext *context, Expr *expr)
return true;
}
static inline bool sema_expr_analyse_ct_and_or(SemaContext *context, Expr *expr, Expr *left, Expr *right)
{
assert(expr->resolve_status == RESOLVE_RUNNING);
bool is_and = expr->binary_expr.operator == BINARYOP_CT_AND;
if (!sema_analyse_expr(context, left)) return false;
if (!expr_is_const_bool(left)) RETURN_SEMA_ERROR(left, "Expected this to evaluate to a constant boolean.");
if (left->const_expr.b != is_and)
{
expr_rewrite_const_bool(expr, type_bool, !is_and);
return true;
}
if (!sema_analyse_expr(context, right)) return false;
if (!expr_is_const_bool(right)) RETURN_SEMA_ERROR(right, "Expected this to evaluate to a constant boolean.");
expr_rewrite_const_bool(expr, type_bool, right->const_expr.b);
return true;
}
typedef enum ConcatType_
{
CONCAT_UNKNOWN,
CONCAT_JOIN_BYTES,
CONCAT_JOIN_ARRAYS,
CONCAT_JOIN_LISTS,
} ConcatType;
static bool sema_append_const_array_element(SemaContext *context, Expr *expr, Expr *list, Expr **exprs)
{
bool is_empty_slice = list->const_expr.const_kind == CONST_POINTER;
if (!is_empty_slice && list->const_expr.initializer->kind != CONST_INIT_ARRAY_FULL)
{
RETURN_SEMA_ERROR(list, "Only fully initialized arrays may be appended to.");
}
Type *array_type = type_flatten(list->type);
bool is_vector = array_type->type_kind == TYPE_VECTOR || array_type->type_kind == TYPE_INFERRED_VECTOR;
unsigned len = (is_empty_slice ? 0 : sema_get_const_len(context, list)) + vec_size(exprs) - 1;
Type *indexed = type_get_indexed_type(list->type);
Type *new_type = is_vector ? type_get_vector(indexed, len) : type_get_array(indexed, len);
ConstInitializer *init = list->const_expr.initializer;
ConstInitializer **inits = VECNEW(ConstInitializer*, len);
if (!is_empty_slice)
{
FOREACH(ConstInitializer *, i, init->init_array_full) vec_add(inits, i);
}
unsigned elements = vec_size(exprs);
for (unsigned i = 1; i < elements; i++)
{
Expr *element = exprs[i];
if (!sema_analyse_inferred_expr(context, indexed, element)) return false;
if (!cast_implicit(context, element, indexed, false)) return false;
ConstInitializer *in = CALLOCS(ConstInitializer);
in->kind = CONST_INIT_VALUE;
in->init_value = element;
vec_add(inits, in);
}
expr->expr_kind = EXPR_CONST;
expr->resolve_status = RESOLVE_DONE;
expr->type = new_type;
ConstInitializer *new_init = CALLOCS(ConstInitializer);
new_init->init_array_full = inits;
new_init->type = new_type;
new_init->kind = CONST_INIT_ARRAY_FULL;
expr->const_expr = (ExprConst) {
.const_kind = CONST_INITIALIZER,
.initializer = new_init
};
return true;
}
static bool sema_append_const_array_one(SemaContext *context, Expr *expr, Expr *list, Expr *element)
{
bool is_empty_slice = list->const_expr.const_kind == CONST_POINTER;
if (!is_empty_slice && list->const_expr.initializer->kind != CONST_INIT_ARRAY_FULL)
{
RETURN_SEMA_ERROR(list, "Only fully initialized arrays may be appended to.");
}
Type *array_type = type_flatten(list->type);
bool is_vector = array_type->type_kind == TYPE_VECTOR || array_type->type_kind == TYPE_INFERRED_VECTOR;
unsigned len = (is_empty_slice ? 0 : sema_get_const_len(context, list)) + 1;
Type *indexed = type_get_indexed_type(list->type);
Type *new_type = is_vector ? type_get_vector(indexed, len) : type_get_array(indexed, len);
ConstInitializer *init = list->const_expr.initializer;
ConstInitializer **inits = VECNEW(ConstInitializer*, len);
if (!is_empty_slice)
{
FOREACH(ConstInitializer *, i, init->init_array_full) vec_add(inits, i);
}
assert(element->resolve_status == RESOLVE_DONE);
if (!cast_implicit(context, element, indexed, false)) return false;
ConstInitializer *in = CALLOCS(ConstInitializer);
in->kind = CONST_INIT_VALUE;
in->init_value = element;
vec_add(inits, in);
expr->expr_kind = EXPR_CONST;
expr->resolve_status = RESOLVE_DONE;
expr->type = new_type;
ConstInitializer *new_init = CALLOCS(ConstInitializer);
new_init->init_array_full = inits;
new_init->type = new_type;
new_init->kind = CONST_INIT_ARRAY_FULL;
expr->const_expr = (ExprConst) {
.const_kind = CONST_INITIALIZER,
.initializer = new_init
};
return true;
}
static bool sema_concat_const_bytes(SemaContext *context, Expr *expr, Type *type, bool is_bytes, const char *a, const char *b, ArraySize alen, ArraySize blen)
{
Type *indexed = type_get_indexed_type(type);
char *data = malloc_arena(alen + blen + 1);
char *current = data;
if (alen) memcpy(current, a, alen);
current += alen;
if (blen) memcpy(current, b, blen);
current += blen;
current[0] = '\0';
expr->expr_kind = EXPR_CONST;
expr->const_expr = (ExprConst) {
.const_kind = is_bytes ? CONST_BYTES : CONST_STRING,
.bytes.ptr = data,
.bytes.len = alen + blen
};
expr->resolve_status = RESOLVE_DONE;
expr->type = is_bytes ? type_get_array(indexed, alen + blen) : type;
return true;
}
static bool sema_concat_character(SemaContext *context, Expr *expr, Type *type, const char *a, ArraySize alen, uint64_t c)
{
char append_array[4];
int len;
if (c <= 0x7f)
{
append_array[0] = (char) c;
len = 1;
}
else if (c <= 0x7ff)
{
append_array[0] = (char) (0xC0 | (c >> 6));
append_array[1] = (char) (0x80 | (c & 0x3F));
len = 2;
}
else if (c <= 0xffff)
{
append_array[0] = (char) (0xE0 | (c >> 12));
append_array[1] = (char) (0x80 | ((c >> 6) & 0x3F));
append_array[2] = (char) (0x80 | (c & 0x3F));
len = 3;
}
else
{
append_array[0] = (char) (0xF0 | (c >> 18));
append_array[1] = (char) (0x80 | ((c >> 12) & 0x3F));
append_array[2] = (char) (0x80 | ((c >> 6) & 0x3F));
append_array[3] = (char) (0x80 | (c & 0x3F));
len = 4;
}
return sema_concat_const_bytes(context, expr, type, false, a, append_array, alen, len);
}
/**
* 1. String + Bytes|String
* 2. Bytes + Bytes|String
* 3. Bytes + (i)char => Bytes
* 4. String + character => String
* 5. String + (i)char array/vector => String // Disallowed for now
* 6. Bytes + (i)char array/vector => Bytes // Disallowed for now
*/
static bool sema_concat_bytes_and_other(SemaContext *context, Expr *expr, Expr *left, Expr *right)
{
ArraySize len = left->const_expr.bytes.len;
bool is_bytes = left->const_expr.const_kind == CONST_BYTES;
Type *indexed = type_get_indexed_type(left->type);
const char *left_bytes = left->const_expr.bytes.ptr;
RETRY:;
switch (right->const_expr.const_kind)
{
case CONST_INTEGER:
if (is_bytes)
{
// 2. Bytes + (i)char => Bytes
if (!cast_implicit(context, right, type_char, false)) return false;
char c = (char) int_to_i64(right->const_expr.ixx);
return sema_concat_const_bytes(context, expr, left->type, true, left_bytes, &c, len, 1);
}
// 1. String + character => String
if (int_is_neg(right->const_expr.ixx) || int_icomp(right->const_expr.ixx, 0x10FFFF, BINARYOP_GT))
{
RETURN_SEMA_ERROR(right, "Cannot concatenate a string with an non-unicode value.");
}
return sema_concat_character(context, expr, left->type, left_bytes, len, right->const_expr.ixx.i.low);
case CONST_FLOAT:
case CONST_BOOL:
case CONST_ENUM:
case CONST_ERR:
case CONST_POINTER:
case CONST_TYPEID:
case CONST_MEMBER:
RETURN_SEMA_ERROR(expr, "Concatenating %s with %s is not possible at compile time.",
type_quoted_error_string(left->type), type_to_error_string(right->type));
case CONST_BYTES:
case CONST_STRING:
return sema_concat_const_bytes(context, expr, left->type, is_bytes, left_bytes,
right->const_expr.bytes.ptr,
len, right->const_expr.bytes.len);
case CONST_UNTYPED_LIST:
if (!cast_implicit(context, right, type_get_inferred_array(indexed), false)) return false;
goto RETRY;
case CONST_INITIALIZER:
if (!cast_implicit(context, right, type_get_inferred_array(indexed), false)) return false;
expr_contract_array(&right->const_expr, left->const_expr.const_kind);
goto RETRY;
}
}
static bool sema_append_concat_const_bytes(SemaContext *context, Expr *expr, Expr *list, Expr *element, bool is_append)
{
Type *indexed = type_get_indexed_type(list->type);
assert(indexed && "This should always work");
if (is_append && !cast_implicit(context, element, indexed, false)) return false;
size_t str_len = list->const_expr.bytes.len;
size_t element_len = is_append ? 1 : element->const_expr.bytes.len;
bool is_bytes = list->const_expr.const_kind == CONST_BYTES;
char *data = malloc_arena(str_len + element_len + 1);
char *current = data;
if (str_len) memcpy(current, list->const_expr.bytes.ptr, str_len);
current += str_len;
if (is_append)
{
current[0] = (unsigned char)element->const_expr.ixx.i.low;
}
else
{
if (element_len) memcpy(current, element->const_expr.bytes.ptr, element_len);
}
current += element_len;
current[0] = '\0';
expr->expr_kind = EXPR_CONST;
expr->const_expr = (ExprConst) {
.const_kind = list->const_expr.const_kind,
.bytes.ptr = data,
.bytes.len = str_len + element_len
};
expr->resolve_status = RESOLVE_DONE;
expr->type = is_bytes ? type_get_array(indexed, str_len + element_len) : list->type;
return true;
/*
bool is_empty_slice = list->const_expr.const_kind == CONST_POINTER;
if (!is_empty_slice && list->const_expr.initializer->kind != CONST_INIT_ARRAY_FULL)
{
RETURN_SEMA_ERROR(list, "Only fully initialized arrays may be appended to.");
}
Type *array_type = type_flatten(list->type);
bool is_vector = array_type->type_kind == TYPE_VECTOR || array_type->type_kind == TYPE_INFERRED_VECTOR;
unsigned len = (is_empty_slice ? 0 : sema_get_const_len(context, list)) + 1;
Type *indexed = type_get_indexed_type(list->type);
Type *new_type = is_vector ? type_get_vector(indexed, len) : type_get_array(indexed, len);
ConstInitializer *init = list->const_expr.initializer;
ConstInitializer **inits = VECNEW(ConstInitializer*, len);
if (!is_empty_slice)
{
FOREACH(ConstInitializer *, i, init->init_array_full) vec_add(inits, i);
}
assert(element->resolve_status == RESOLVE_DONE);
if (!cast_implicit(context, element, indexed, false)) return false;
ConstInitializer *in = CALLOCS(ConstInitializer);
in->kind = CONST_INIT_VALUE;
in->init_value = element;
vec_add(inits, in);
expr->expr_kind = EXPR_CONST;
expr->resolve_status = RESOLVE_DONE;
expr->type = new_type;
ConstInitializer *new_init = CALLOCS(ConstInitializer);
new_init->init_array_full = inits;
new_init->type = new_type;
new_init->kind = CONST_INIT_ARRAY_FULL;
expr->const_expr = (ExprConst) {
.const_kind = CONST_INITIALIZER,
.initializer = new_init
};
return true;*/
}
static inline bool sema_expr_const_append(SemaContext *context, Expr *append_expr, Expr *list, Expr *element)
{
Expr **untyped_list = NULL;
switch (list->const_expr.const_kind)
{
case CONST_INITIALIZER:
assert(list->type != type_untypedlist);
return sema_append_const_array_one(context, append_expr, list, element);
case CONST_UNTYPED_LIST:
untyped_list = list->const_expr.untyped_list;
break;
case CONST_POINTER:
if (list->type->canonical->type_kind == TYPE_SLICE)
{
return sema_append_const_array_one(context, append_expr, list, element);
}
FALLTHROUGH;
case CONST_BYTES:
case CONST_STRING:
return sema_append_concat_const_bytes(context, append_expr, list, element, true);
default:
RETURN_SEMA_ERROR(list, "Expected some kind of list or vector here.");
}
vec_add(untyped_list, element);
append_expr->expr_kind = EXPR_CONST;
append_expr->const_expr = (ExprConst) { .untyped_list = untyped_list, .const_kind = CONST_UNTYPED_LIST };
append_expr->type = type_untypedlist;
append_expr->resolve_status = RESOLVE_DONE;
return true;
}
/**
* The following valid cases exist:
*
* 1. String/Bytes + ... => String/Bytes
* 2. Vector/slice/array + Untyped list => Merged untyped list
* 3. Vector/slice/array + arraylike => vector/array iff canoncial match, otherwise Untyped list
* 4. Untyped list + Vector/array/slice => Merged untyped list
* 5. Vector/array/slice + element => Vector/array/slice + 1 len iff canonical match, Untyped list otherwise
* 6. Untyped list + element => Untyped list
*/
static inline bool sema_expr_analyse_ct_concat(SemaContext *context, Expr *concat_expr, Expr *left, Expr *right)
{
assert(concat_expr->resolve_status == RESOLVE_RUNNING);
bool join_single = false;
ArraySize len = 0;
bool use_array = true;
Type *indexed_type = NULL;
if (!sema_analyse_expr(context, left)) return false;
if (!sema_cast_const(left)) RETURN_SEMA_ERROR(left, "Expected this to evaluate to a constant value.");
if (!sema_analyse_expr(context, right)) return false;
if (!sema_cast_const(right)) RETURN_SEMA_ERROR(left, "Expected this to evaluate to a constant value.");
Type *element_type = left->type->canonical;
Type *right_type = right->type->canonical;
ConstKind right_kind = right->const_expr.const_kind;
switch (left->const_expr.const_kind)
{
case CONST_POINTER:
if (element_type->type_kind == TYPE_SLICE)
{
len = 0;
indexed_type = type_get_indexed_type(element_type);
break;
}
FALLTHROUGH;
case CONST_FLOAT:
case CONST_INTEGER:
case CONST_BOOL:
case CONST_ENUM:
case CONST_ERR:
case CONST_TYPEID:
RETURN_SEMA_ERROR(left, "Only bytes, strings and list-like constants can be concatenated.");
case CONST_BYTES:
case CONST_STRING:
return sema_concat_bytes_and_other(context, concat_expr, left, right);
case CONST_INITIALIZER:
switch (type_flatten(element_type)->type_kind)
{
case TYPE_VECTOR:
case TYPE_INFERRED_VECTOR:
use_array = false;
FALLTHROUGH;
case TYPE_SLICE:
case TYPE_ARRAY:
case TYPE_INFERRED_ARRAY:
{
switch (left->const_expr.initializer->kind)
{
case CONST_INIT_ARRAY_FULL:
break;
case CONST_INIT_ZERO:
if (type_flatten(element_type)->type_kind == TYPE_SLICE) break;
FALLTHROUGH;
default:
RETURN_SEMA_ERROR(left, "Only fully initialized arrays may be concatenated.");
}
indexed_type = type_get_indexed_type(element_type);
assert(indexed_type);
len = sema_get_const_len(context, left);
break;
}
case TYPE_UNTYPED_LIST:
UNREACHABLE
default:
RETURN_SEMA_ERROR(left, "Only bytes, strings and array-like constants can be concatenated.");
}
break;
case CONST_UNTYPED_LIST:
len = vec_size(left->const_expr.untyped_list);
break;
case CONST_MEMBER:
RETURN_SEMA_ERROR(left, "This can't be concatenated.");
}
switch (right->const_expr.const_kind)
{
case CONST_FLOAT:
case CONST_INTEGER:
case CONST_BOOL:
case CONST_ENUM:
case CONST_ERR:
case CONST_POINTER:
case CONST_TYPEID:
case CONST_MEMBER:
return sema_expr_const_append(context, concat_expr, left, right);
case CONST_BYTES:
case CONST_STRING:
if (left->type == type_untypedlist || indexed_type == right_type) return sema_expr_const_append(context, concat_expr, left, right);
if (!type_is_integer(indexed_type) || type_size(indexed_type) != 1)
{
RETURN_SEMA_ERROR(right, "You can't concatenate %s and %s.", type_quoted_error_string(left->type),
type_to_error_string(right_type));
}
if (!cast_implicit(context, right, type_get_inferred_array(indexed_type), false)) return false;
expr_contract_array(&left->const_expr, CONST_BYTES);
return sema_concat_bytes_and_other(context, concat_expr, left, right);
case CONST_UNTYPED_LIST:
break;
case CONST_INITIALIZER:
if (indexed_type && cast_implicit_silent(context, right, indexed_type, false))
{
return sema_expr_const_append(context, concat_expr, left, right);
}
break;
}
if (indexed_type && !cast_implicit_silent(context, right, type_get_inferred_array(indexed_type), false))
{
indexed_type = NULL;
}
len += sema_get_const_len(context, right);
if (!indexed_type)
{
Expr **untyped_exprs = VECNEW(Expr*, len + 1);
Expr *exprs[2] = { left, right };
for (unsigned i = 0; i < 2; i++)
{
Expr *single_expr = exprs[i];
if (expr_is_const_untyped_list(single_expr))
{
FOREACH(Expr *, expr_untyped, single_expr->const_expr.untyped_list)
{
vec_add(untyped_exprs, expr_untyped);
}
continue;
}
ConstInitializer *init = single_expr->const_expr.initializer;
if (init->kind != CONST_INIT_ARRAY_FULL)
{
if (init->kind == CONST_INIT_ZERO && init->type == type_untypedlist) continue;
RETURN_SEMA_ERROR(single_expr, "Expected a full array here.");
}
FOREACH(ConstInitializer *, val, init->init_array_full)
{
vec_add(untyped_exprs, val->init_value);
}
}
concat_expr->expr_kind = EXPR_CONST;
concat_expr->type = type_untypedlist;
concat_expr->resolve_status = RESOLVE_DONE;
concat_expr->const_expr = (ExprConst) {
.const_kind = CONST_UNTYPED_LIST,
.untyped_list = untyped_exprs
};
return true;
}
ConstInitializer **inits = VECNEW(ConstInitializer*, len);
Expr *exprs[2] = { left, right };
for (int i = 0; i < 2; i++)
{
Expr *element = exprs[i];
assert(element->const_expr.const_kind == CONST_INITIALIZER);
ConstInitType init_type = element->const_expr.initializer->kind;
switch (init_type)
{
case CONST_INIT_ARRAY_FULL:
break;
case CONST_INIT_ZERO:
if (type_flatten(element->type)->type_kind == TYPE_SLICE) continue;
default:
RETURN_SEMA_ERROR(element, "Only fully initialized arrays may be concatenated.");
}
FOREACH(ConstInitializer *, init, element->const_expr.initializer->init_array_full)
{
vec_add(inits, init);
}
}
concat_expr->expr_kind = EXPR_CONST;
concat_expr->resolve_status = RESOLVE_DONE;
concat_expr->type = use_array ? type_get_array(indexed_type, len) : type_get_vector(indexed_type, len);
ConstInitializer *new_init = CALLOCS(ConstInitializer);
new_init->init_array_full = inits;
new_init->type = concat_expr->type;
new_init->kind = CONST_INIT_ARRAY_FULL;
concat_expr->const_expr = (ExprConst) {
.const_kind = CONST_INITIALIZER,
.initializer = new_init
};
return true;
}
static inline bool sema_expr_analyse_binary(SemaContext *context, Expr *expr)
{
if (expr->binary_expr.operator == BINARYOP_ELSE) return sema_expr_analyse_or_error(context, expr);
@@ -6894,6 +7400,11 @@ static inline bool sema_expr_analyse_binary(SemaContext *context, Expr *expr)
{
case BINARYOP_ELSE:
UNREACHABLE // Handled previously
case BINARYOP_CT_CONCAT:
return sema_expr_analyse_ct_concat(context, expr, left, right);
case BINARYOP_CT_OR:
case BINARYOP_CT_AND:
return sema_expr_analyse_ct_and_or(context, expr, left, right);
case BINARYOP_ASSIGN:
return sema_expr_analyse_assign(context, expr, left, right);
case BINARYOP_MULT:
@@ -8453,7 +8964,7 @@ static inline bool sema_expr_analyse_castable(SemaContext *context, Expr *expr)
}
static inline bool sema_expr_analyse_ct_and_or(SemaContext *context, Expr *expr)
static inline bool sema_expr_analyse_ct_and_or_fn(SemaContext *context, Expr *expr)
{
assert(expr->resolve_status == RESOLVE_RUNNING);
bool is_and = expr->ct_and_or_expr.is_and;
@@ -8473,15 +8984,6 @@ static inline bool sema_expr_analyse_ct_and_or(SemaContext *context, Expr *expr)
return true;
}
typedef enum ConcatType_
{
CONCAT_UNKNOWN,
CONCAT_JOIN_BYTES,
CONCAT_JOIN_ARRAYS,
CONCAT_JOIN_LISTS,
} ConcatType;
bool sema_concat_join_arrays(SemaContext *context, Expr *expr, Expr **exprs, Type *type, ArraySize len)
{
ConstInitializer **inits = VECNEW(ConstInitializer*, len);
@@ -8517,7 +9019,7 @@ bool sema_concat_join_arrays(SemaContext *context, Expr *expr, Expr **exprs, Typ
return true;
}
bool sema_append_const_array(SemaContext *context, Expr *expr, Expr *list, Expr **exprs)
static bool sema_append_const_array(SemaContext *context, Expr *expr, Expr *list, Expr **exprs)
{
bool is_empty_slice = list->const_expr.const_kind == CONST_POINTER;
if (!is_empty_slice && list->const_expr.initializer->kind != CONST_INIT_ARRAY_FULL)
@@ -8584,7 +9086,7 @@ bool sema_concat_join_bytes(Expr *expr, Expr **exprs, ArraySize len)
return true;
}
static inline bool sema_expr_analyse_ct_concat(SemaContext *context, Expr *concat_expr)
static inline bool sema_expr_analyse_ct_concatfn(SemaContext *context, Expr *concat_expr)
{
assert(concat_expr->resolve_status == RESOLVE_RUNNING);
if (!sema_expand_vasplat_exprs(context, concat_expr->ct_concat)) return false;
@@ -9000,9 +9502,9 @@ static inline bool sema_analyse_expr_dispatch(SemaContext *context, Expr *expr)
case EXPR_CT_APPEND:
return sema_expr_analyse_ct_append(context, expr);
case EXPR_CT_CONCAT:
return sema_expr_analyse_ct_concat(context, expr);
return sema_expr_analyse_ct_concatfn(context, expr);
case EXPR_CT_AND_OR:
return sema_expr_analyse_ct_and_or(context, expr);
return sema_expr_analyse_ct_and_or_fn(context, expr);
case EXPR_CT_ARG:
return sema_expr_analyse_ct_arg(context, NULL, expr);
case EXPR_STRINGIFY:

View File

@@ -82,6 +82,12 @@ const char *token_type_to_string(TokenType type)
return "^=";
case TOKEN_BUILTIN:
return "$$";
case TOKEN_CT_AND:
return "&&&";
case TOKEN_CT_OR:
return "|||";
case TOKEN_CT_CONCAT:
return "+++";
case TOKEN_DIV_ASSIGN:
return "/=";
case TOKEN_DOTDOT:
@@ -326,7 +332,7 @@ const char *token_type_to_string(TokenType type)
case TOKEN_CT_ALIGNOF:
return "$alignof";
case TOKEN_CT_AND:
case TOKEN_CT_ANDFN:
return "$and";
case TOKEN_CT_APPEND:
return "$append";
@@ -336,7 +342,7 @@ const char *token_type_to_string(TokenType type)
return "$assignable";
case TOKEN_CT_CASE:
return "$case";
case TOKEN_CT_CONCAT:
case TOKEN_CT_CONCATFN:
return "$concat";
case TOKEN_CT_DEFAULT:
return "$default";
@@ -394,7 +400,7 @@ const char *token_type_to_string(TokenType type)
return "$nameof";
case TOKEN_CT_OFFSETOF:
return "$offsetof";
case TOKEN_CT_OR:
case TOKEN_CT_ORFN:
return "$or";
case TOKEN_CT_QNAMEOF:
return "$qnameof";

View File

@@ -68,7 +68,7 @@
#if (defined(__GNUC__) && __GNUC__ >= 7) || defined(__clang__)
#define PACK( __Declaration__ ) __Declaration__ __attribute__((__packed__))
#define FALLTHROUGH __attribute__ ((fallthrough))
#define FALLTHROUGH (void)1; __attribute__ ((fallthrough))
#define UNUSED __attribute__((unused))
#define NORETURN __attribute__((noreturn))
#define INLINE __attribute__((always_inline)) static inline

View File

@@ -2,16 +2,16 @@
module test;
macro foo()
{
var c = $concat("hello", " world");
String[*] a = $concat(String[1] { "hello" }, String[1] { " world" });
var c = "hello" +++ " world";
String[*] a = String[1] { "hello" } +++ String[1] { " world" };
int[2] $a = { 1, 2 };
$a = $append($a, 100);
$a = $a +++ 100;
int z = $typeof($a).len;
var b = $a;
var d = $append(int[]{}, 1, 2, 3);
var e = $append(String[1] { "hello... " }, " there!");
var f = $concat("", "");
var g = $concat("bye");
var d = int[]{} +++ 1 +++ 2 +++ 3;
var e = String[1] { "hello... " } +++ " there!";
var f = "" +++ "";
var g = "bye";
var h = $$str_hash("helloworld");
}
fn int main()

View File

@@ -0,0 +1,25 @@
fn void main(String[] args)
{
var $x = int[2]{ 1, 2 } +++ int[2]{ 4, 5 };
var $v = "foo" +++ "baz" +++ '!' +++ '?';
var $b = x'403322' +++ "baz";
var $b2 = x'40334a' +++ 55 +++ 55;
var $b3 = x'403322' +++ char[2] { 1, 2 };
var $b4 = x'403322' +++ char[2] { 1, 2 };
var $b5 = "foo" +++ { 55, 57 };
var $b6 = ((ichar[3])x'403322') +++ ichar[2] { 1, 2 };
var $b7 = char[2] { 1, 2 } +++ "foo";
assert($b7 == { 1, 2, 102, 111, 111});
assert($b5 == "foo79");
assert($b3 == $b4);
assert($b6 == { 0x40, 0x33, 0x22, 1, 2 });
int[4] $x2 = int[2]{ 1, 2 }+++ int[2]{ 4, 5 };
int[4] $y = { 1, 2 } +++ { 4, 5 };
assert($x == {1, 2, 4, 5});
assert($x2 == {1, 2, 4, 5});
assert($y == {1, 2, 4, 5});
assert($v == "foobaz!?");
assert($b == {64, 51, 34, 98, 97, 122});
assert($b2 == {64, 51, 74, 55, 55});
assert($b3 == {64, 51, 34, 1, 2});
}

View File

@@ -10,3 +10,5 @@ fn void foo4($Type) { } // #error: Only regular parameters are allowed for funct
fn void foo8(int* &foo) {} // #error: Only regular parameters are allowed for functions.
fn void foo9(int x, int x) {} // #error: Duplicate parameter name 'x'.
macro @foo($a, $b, $c, ...) {}