Initial checkin of docs parsing. char is now unsigned, and signed char is "ichar".

This commit is contained in:
Christoffer Lerno
2021-01-29 18:25:15 +01:00
committed by Christoffer Lerno
parent 45c4f205bb
commit 4f064e7da2
57 changed files with 1105 additions and 497 deletions

View File

@@ -18,9 +18,9 @@ const char[64] LUT_ENC = {
'4', '5', '6', '7', '8', '9', '+', '/',
};
const byte ERR = 0xFF;
const char ERR = 0xFF;
const byte[256] LUT_DEC =
const char[256] LUT_DEC =
{
[0..255] = ERR,
['A'] = 0, ['B'] = 1, ['C'] = 2, ['D'] = 3, ['E'] = 4,
@@ -43,7 +43,7 @@ const char PAD = '=';
const char FIRST = '+';
const char LAST = 'z';
public func void encode(byte[] in, char *out)
public func void encode(char[] in, char *out)
{
int j = 0;
char c = LUT_ENC[1];
@@ -77,7 +77,7 @@ public func void encode(byte[] in, char *out)
}
public func int! decode(char[] in, byte* out)
public func int! decode(char[] in, char* out)
{
int j = 0;
@@ -86,7 +86,7 @@ public func int! decode(char[] in, byte* out)
char value = in[i];
if (value == PAD) return j;
byte c = LUT_DEC[in[i]];
char c = LUT_DEC[in[i]];
if (c == ERR) return InvalidCharacter({i, value})!;
switch (i % 4)
@@ -120,9 +120,9 @@ public func void main()
{
char *helloworld = "Hello World\n";
char[1000] buffer;
encode(cast(helloworld as byte*)[0..12], &buffer);
encode(helloworld[0..12], &buffer);
printf("Result: %s\n", &buffer);
char *to_decode = "aGVsbG8gd29ybGRcMA==";
decode(to_decode[0..19], cast(&buffer as byte*));
decode(to_decode[0..19], &buffer);
printf("Result: %s\n", &buffer);
}

View File

@@ -13,15 +13,15 @@ struct GameBoard
{
int h;
int w;
byte* world;
byte* temp;
char* world;
char* temp;
}
func void GameBoard.show(GameBoard *board)
{
printf("\e[H");
byte* current = board.world;
char* current = board.world;
for (int y = 0; y < board.h; y++)
{
for (int x = 0; x < board.w; x++)
@@ -51,7 +51,7 @@ func void GameBoard.evolve(GameBoard *board)
}
}
if (board.world[x + y * board.w]) n--;
board.temp[x + y * board.w] = cast(n == 3 || (n == 2 && board.world[x + y * board.w]) as byte);
board.temp[x + y * board.w] = cast(n == 3 || (n == 2 && board.world[x + y * board.w]) as char);
}
}
for (int i = 0; i < board.w * board.h; i++)
@@ -61,7 +61,7 @@ func void GameBoard.evolve(GameBoard *board)
}
func int main(int c as char** v)
func int main(int c, char** v)
{
int w = 0;
int h = 0;

291
resources/examples/hash.c3 Normal file
View File

@@ -0,0 +1,291 @@
module hash;
// Code adapted from Odin's hash.odin
// The code below should not be considered *correct*
// They are merely done to illustrate the language syntax.
extern func void printf(char*, ...);
public func void main()
{
char* y = "Hello World!";
printf("Adler32 of %s is %x\n", y, adler32(y[0..11]));
printf("CRC32 of %s is %x\n", y, crc32(y[0..11]));
printf("CRC64 of %s is %llx\n", y, crc64(y[0..11]));
printf("FNV32 of %s is %x\n", y, fnv32(y[0..11]));
printf("FNV32a of %s is %x\n", y, fnv32a(y[0..11]));
printf("FNV64 of %s is %llx\n", y, fnv64(y[0..11]));
printf("FNV64a of %s is %llx\n", y, fnv64a(y[0..11]));
}
public func uint adler32(char[] data)
{
const uint ADLER_CONST = 65521;
uint a = 1;
uint b = 0;
foreach (char x : data)
{
a = (a + x) % ADLER_CONST;
b = (b + a) % ADLER_CONST;
}
return (b << 16) | a;
}
public func uint crc32(char[] data)
{
uint result = ~cast(0 as uint);
foreach (char x : data)
{
result = (result >> 8) ^ CRC32_TABLE[(result ^ x) & 0xFF];
}
return ~result;
}
public func ulong crc64(char[] data)
{
ulong result = 0;
foreach (char x : data)
{
result = (result >> 8) ^ CRC64_TABLE[cast(result ^ x as char)];
}
return result;
}
public func uint fnv32(char[] data)
{
uint h = 0x811c9dc5;
foreach (char x : data)
{
h = (h *% 0x01000193) ^ x;
}
return h;
}
public func ulong fnv64(char[] data)
{
ulong h = 0xcbf29ce484222325;
foreach (char x : data)
{
h = (h *% 0x100000001b3) ^ x;
}
return h;
}
public func uint fnv32a(char[] data)
{
uint h = 0x811c9dc5;
foreach (char x : data)
{
h = (h ^ x) *% 0x01000193;
}
return h;
}
public func ulong fnv64a(char[] data)
{
ulong h = 0xcbf29ce484222325;
foreach (char x : data)
{
h = (h ^ x) *% 0x100000001b3;
}
return h;
}
const uint[256] CRC32_TABLE = {
0x00000000, 0x77073096, 0xee0e612c, 0x990951ba,
0x076dc419, 0x706af48f, 0xe963a535, 0x9e6495a3,
0x0edb8832, 0x79dcb8a4, 0xe0d5e91e, 0x97d2d988,
0x09b64c2b, 0x7eb17cbd, 0xe7b82d07, 0x90bf1d91,
0x1db71064, 0x6ab020f2, 0xf3b97148, 0x84be41de,
0x1adad47d, 0x6ddde4eb, 0xf4d4b551, 0x83d385c7,
0x136c9856, 0x646ba8c0, 0xfd62f97a, 0x8a65c9ec,
0x14015c4f, 0x63066cd9, 0xfa0f3d63, 0x8d080df5,
0x3b6e20c8, 0x4c69105e, 0xd56041e4, 0xa2677172,
0x3c03e4d1, 0x4b04d447, 0xd20d85fd, 0xa50ab56b,
0x35b5a8fa, 0x42b2986c, 0xdbbbc9d6, 0xacbcf940,
0x32d86ce3, 0x45df5c75, 0xdcd60dcf, 0xabd13d59,
0x26d930ac, 0x51de003a, 0xc8d75180, 0xbfd06116,
0x21b4f4b5, 0x56b3c423, 0xcfba9599, 0xb8bda50f,
0x2802b89e, 0x5f058808, 0xc60cd9b2, 0xb10be924,
0x2f6f7c87, 0x58684c11, 0xc1611dab, 0xb6662d3d,
0x76dc4190, 0x01db7106, 0x98d220bc, 0xefd5102a,
0x71b18589, 0x06b6b51f, 0x9fbfe4a5, 0xe8b8d433,
0x7807c9a2, 0x0f00f934, 0x9609a88e, 0xe10e9818,
0x7f6a0dbb, 0x086d3d2d, 0x91646c97, 0xe6635c01,
0x6b6b51f4, 0x1c6c6162, 0x856530d8, 0xf262004e,
0x6c0695ed, 0x1b01a57b, 0x8208f4c1, 0xf50fc457,
0x65b0d9c6, 0x12b7e950, 0x8bbeb8ea, 0xfcb9887c,
0x62dd1ddf, 0x15da2d49, 0x8cd37cf3, 0xfbd44c65,
0x4db26158, 0x3ab551ce, 0xa3bc0074, 0xd4bb30e2,
0x4adfa541, 0x3dd895d7, 0xa4d1c46d, 0xd3d6f4fb,
0x4369e96a, 0x346ed9fc, 0xad678846, 0xda60b8d0,
0x44042d73, 0x33031de5, 0xaa0a4c5f, 0xdd0d7cc9,
0x5005713c, 0x270241aa, 0xbe0b1010, 0xc90c2086,
0x5768b525, 0x206f85b3, 0xb966d409, 0xce61e49f,
0x5edef90e, 0x29d9c998, 0xb0d09822, 0xc7d7a8b4,
0x59b33d17, 0x2eb40d81, 0xb7bd5c3b, 0xc0ba6cad,
0xedb88320, 0x9abfb3b6, 0x03b6e20c, 0x74b1d29a,
0xead54739, 0x9dd277af, 0x04db2615, 0x73dc1683,
0xe3630b12, 0x94643b84, 0x0d6d6a3e, 0x7a6a5aa8,
0xe40ecf0b, 0x9309ff9d, 0x0a00ae27, 0x7d079eb1,
0xf00f9344, 0x8708a3d2, 0x1e01f268, 0x6906c2fe,
0xf762575d, 0x806567cb, 0x196c3671, 0x6e6b06e7,
0xfed41b76, 0x89d32be0, 0x10da7a5a, 0x67dd4acc,
0xf9b9df6f, 0x8ebeeff9, 0x17b7be43, 0x60b08ed5,
0xd6d6a3e8, 0xa1d1937e, 0x38d8c2c4, 0x4fdff252,
0xd1bb67f1, 0xa6bc5767, 0x3fb506dd, 0x48b2364b,
0xd80d2bda, 0xaf0a1b4c, 0x36034af6, 0x41047a60,
0xdf60efc3, 0xa867df55, 0x316e8eef, 0x4669be79,
0xcb61b38c, 0xbc66831a, 0x256fd2a0, 0x5268e236,
0xcc0c7795, 0xbb0b4703, 0x220216b9, 0x5505262f,
0xc5ba3bbe, 0xb2bd0b28, 0x2bb45a92, 0x5cb36a04,
0xc2d7ffa7, 0xb5d0cf31, 0x2cd99e8b, 0x5bdeae1d,
0x9b64c2b0, 0xec63f226, 0x756aa39c, 0x026d930a,
0x9c0906a9, 0xeb0e363f, 0x72076785, 0x05005713,
0x95bf4a82, 0xe2b87a14, 0x7bb12bae, 0x0cb61b38,
0x92d28e9b, 0xe5d5be0d, 0x7cdcefb7, 0x0bdbdf21,
0x86d3d2d4, 0xf1d4e242, 0x68ddb3f8, 0x1fda836e,
0x81be16cd, 0xf6b9265b, 0x6fb077e1, 0x18b74777,
0x88085ae6, 0xff0f6a70, 0x66063bca, 0x11010b5c,
0x8f659eff, 0xf862ae69, 0x616bffd3, 0x166ccf45,
0xa00ae278, 0xd70dd2ee, 0x4e048354, 0x3903b3c2,
0xa7672661, 0xd06016f7, 0x4969474d, 0x3e6e77db,
0xaed16a4a, 0xd9d65adc, 0x40df0b66, 0x37d83bf0,
0xa9bcae53, 0xdebb9ec5, 0x47b2cf7f, 0x30b5ffe9,
0xbdbdf21c, 0xcabac28a, 0x53b39330, 0x24b4a3a6,
0xbad03605, 0xcdd70693, 0x54de5729, 0x23d967bf,
0xb3667a2e, 0xc4614ab8, 0x5d681b02, 0x2a6f2b94,
0xb40bbe37, 0xc30c8ea1, 0x5a05df1b, 0x2d02ef8d,
};
const ulong[256] CRC64_TABLE = {
0x0000000000000000, 0x7ad870c830358979,
0xf5b0e190606b12f2, 0x8f689158505e9b8b,
0xc038e5739841b68f, 0xbae095bba8743ff6,
0x358804e3f82aa47d, 0x4f50742bc81f2d04,
0xab28ecb46814fe75, 0xd1f09c7c5821770c,
0x5e980d24087fec87, 0x24407dec384a65fe,
0x6b1009c7f05548fa, 0x11c8790fc060c183,
0x9ea0e857903e5a08, 0xe478989fa00bd371,
0x7d08ff3b88be6f81, 0x07d08ff3b88be6f8,
0x88b81eabe8d57d73, 0xf2606e63d8e0f40a,
0xbd301a4810ffd90e, 0xc7e86a8020ca5077,
0x4880fbd87094cbfc, 0x32588b1040a14285,
0xd620138fe0aa91f4, 0xacf86347d09f188d,
0x2390f21f80c18306, 0x594882d7b0f40a7f,
0x1618f6fc78eb277b, 0x6cc0863448deae02,
0xe3a8176c18803589, 0x997067a428b5bcf0,
0xfa11fe77117cdf02, 0x80c98ebf2149567b,
0x0fa11fe77117cdf0, 0x75796f2f41224489,
0x3a291b04893d698d, 0x40f16bccb908e0f4,
0xcf99fa94e9567b7f, 0xb5418a5cd963f206,
0x513912c379682177, 0x2be1620b495da80e,
0xa489f35319033385, 0xde51839b2936bafc,
0x9101f7b0e12997f8, 0xebd98778d11c1e81,
0x64b116208142850a, 0x1e6966e8b1770c73,
0x8719014c99c2b083, 0xfdc17184a9f739fa,
0x72a9e0dcf9a9a271, 0x08719014c99c2b08,
0x4721e43f0183060c, 0x3df994f731b68f75,
0xb29105af61e814fe, 0xc849756751dd9d87,
0x2c31edf8f1d64ef6, 0x56e99d30c1e3c78f,
0xd9810c6891bd5c04, 0xa3597ca0a188d57d,
0xec09088b6997f879, 0x96d1784359a27100,
0x19b9e91b09fcea8b, 0x636199d339c963f2,
0xdf7adabd7a6e2d6f, 0xa5a2aa754a5ba416,
0x2aca3b2d1a053f9d, 0x50124be52a30b6e4,
0x1f423fcee22f9be0, 0x659a4f06d21a1299,
0xeaf2de5e82448912, 0x902aae96b271006b,
0x74523609127ad31a, 0x0e8a46c1224f5a63,
0x81e2d7997211c1e8, 0xfb3aa75142244891,
0xb46ad37a8a3b6595, 0xceb2a3b2ba0eecec,
0x41da32eaea507767, 0x3b024222da65fe1e,
0xa2722586f2d042ee, 0xd8aa554ec2e5cb97,
0x57c2c41692bb501c, 0x2d1ab4dea28ed965,
0x624ac0f56a91f461, 0x1892b03d5aa47d18,
0x97fa21650afae693, 0xed2251ad3acf6fea,
0x095ac9329ac4bc9b, 0x7382b9faaaf135e2,
0xfcea28a2faafae69, 0x8632586aca9a2710,
0xc9622c4102850a14, 0xb3ba5c8932b0836d,
0x3cd2cdd162ee18e6, 0x460abd1952db919f,
0x256b24ca6b12f26d, 0x5fb354025b277b14,
0xd0dbc55a0b79e09f, 0xaa03b5923b4c69e6,
0xe553c1b9f35344e2, 0x9f8bb171c366cd9b,
0x10e3202993385610, 0x6a3b50e1a30ddf69,
0x8e43c87e03060c18, 0xf49bb8b633338561,
0x7bf329ee636d1eea, 0x012b592653589793,
0x4e7b2d0d9b47ba97, 0x34a35dc5ab7233ee,
0xbbcbcc9dfb2ca865, 0xc113bc55cb19211c,
0x5863dbf1e3ac9dec, 0x22bbab39d3991495,
0xadd33a6183c78f1e, 0xd70b4aa9b3f20667,
0x985b3e827bed2b63, 0xe2834e4a4bd8a21a,
0x6debdf121b863991, 0x1733afda2bb3b0e8,
0xf34b37458bb86399, 0x8993478dbb8deae0,
0x06fbd6d5ebd3716b, 0x7c23a61ddbe6f812,
0x3373d23613f9d516, 0x49aba2fe23cc5c6f,
0xc6c333a67392c7e4, 0xbc1b436e43a74e9d,
0x95ac9329ac4bc9b5, 0xef74e3e19c7e40cc,
0x601c72b9cc20db47, 0x1ac40271fc15523e,
0x5594765a340a7f3a, 0x2f4c0692043ff643,
0xa02497ca54616dc8, 0xdafce7026454e4b1,
0x3e847f9dc45f37c0, 0x445c0f55f46abeb9,
0xcb349e0da4342532, 0xb1eceec59401ac4b,
0xfebc9aee5c1e814f, 0x8464ea266c2b0836,
0x0b0c7b7e3c7593bd, 0x71d40bb60c401ac4,
0xe8a46c1224f5a634, 0x927c1cda14c02f4d,
0x1d148d82449eb4c6, 0x67ccfd4a74ab3dbf,
0x289c8961bcb410bb, 0x5244f9a98c8199c2,
0xdd2c68f1dcdf0249, 0xa7f41839ecea8b30,
0x438c80a64ce15841, 0x3954f06e7cd4d138,
0xb63c61362c8a4ab3, 0xcce411fe1cbfc3ca,
0x83b465d5d4a0eece, 0xf96c151de49567b7,
0x76048445b4cbfc3c, 0x0cdcf48d84fe7545,
0x6fbd6d5ebd3716b7, 0x15651d968d029fce,
0x9a0d8ccedd5c0445, 0xe0d5fc06ed698d3c,
0xaf85882d2576a038, 0xd55df8e515432941,
0x5a3569bd451db2ca, 0x20ed197575283bb3,
0xc49581ead523e8c2, 0xbe4df122e51661bb,
0x3125607ab548fa30, 0x4bfd10b2857d7349,
0x04ad64994d625e4d, 0x7e7514517d57d734,
0xf11d85092d094cbf, 0x8bc5f5c11d3cc5c6,
0x12b5926535897936, 0x686de2ad05bcf04f,
0xe70573f555e26bc4, 0x9ddd033d65d7e2bd,
0xd28d7716adc8cfb9, 0xa85507de9dfd46c0,
0x273d9686cda3dd4b, 0x5de5e64efd965432,
0xb99d7ed15d9d8743, 0xc3450e196da80e3a,
0x4c2d9f413df695b1, 0x36f5ef890dc31cc8,
0x79a59ba2c5dc31cc, 0x037deb6af5e9b8b5,
0x8c157a32a5b7233e, 0xf6cd0afa9582aa47,
0x4ad64994d625e4da, 0x300e395ce6106da3,
0xbf66a804b64ef628, 0xc5bed8cc867b7f51,
0x8aeeace74e645255, 0xf036dc2f7e51db2c,
0x7f5e4d772e0f40a7, 0x05863dbf1e3ac9de,
0xe1fea520be311aaf, 0x9b26d5e88e0493d6,
0x144e44b0de5a085d, 0x6e963478ee6f8124,
0x21c640532670ac20, 0x5b1e309b16452559,
0xd476a1c3461bbed2, 0xaeaed10b762e37ab,
0x37deb6af5e9b8b5b, 0x4d06c6676eae0222,
0xc26e573f3ef099a9, 0xb8b627f70ec510d0,
0xf7e653dcc6da3dd4, 0x8d3e2314f6efb4ad,
0x0256b24ca6b12f26, 0x788ec2849684a65f,
0x9cf65a1b368f752e, 0xe62e2ad306bafc57,
0x6946bb8b56e467dc, 0x139ecb4366d1eea5,
0x5ccebf68aecec3a1, 0x2616cfa09efb4ad8,
0xa97e5ef8cea5d153, 0xd3a62e30fe90582a,
0xb0c7b7e3c7593bd8, 0xca1fc72bf76cb2a1,
0x45775673a732292a, 0x3faf26bb9707a053,
0x70ff52905f188d57, 0x0a2722586f2d042e,
0x854fb3003f739fa5, 0xff97c3c80f4616dc,
0x1bef5b57af4dc5ad, 0x61372b9f9f784cd4,
0xee5fbac7cf26d75f, 0x9487ca0fff135e26,
0xdbd7be24370c7322, 0xa10fceec0739fa5b,
0x2e675fb4576761d0, 0x54bf2f7c6752e8a9,
0xcdcf48d84fe75459, 0xb71738107fd2dd20,
0x387fa9482f8c46ab, 0x42a7d9801fb9cfd2,
0x0df7adabd7a6e2d6, 0x772fdd63e7936baf,
0xf8474c3bb7cdf024, 0x829f3cf387f8795d,
0x66e7a46c27f3aa2c, 0x1c3fd4a417c62355,
0x935745fc4798b8de, 0xe98f353477ad31a7,
0xa6df411fbfb21ca3, 0xdc0731d78f8795da,
0x536fa08fdfd90e51, 0x29b7d047efec8728,
};

View File

@@ -1,213 +0,0 @@
module hash;
// Code adapted from Odin's hash.odin
// The code below should not be considered *correct*
// They are merely done to illustrate the language syntax.
public func uint adler32(byte[] data)
{
const uint ADLER_CONST = 65521;
uint a = 1;
uint b = 0;
for (byte x : data)
{
a = (a + x) % ADLER_CONST;
b = (b + a) % ADLER_CONST;
}
return (b << 16) | a;
}
public func uint crc32(byte[] data)
{
uint result = ~cast(uint as 0);
for (byte x : data)
{
result = (result >> 8) ^ CRC32_TABLE[result ^ x) & 255];
}
return ~result;
}
public func uint crc64(byte[*] data)
{
ulong result = ~cast(ulong as 0);
for (byte x : data)
{
result = (result >> 8) ^ CRC64_TABLE[(result ^ x) & 255];
}
return ~result;
}
public func uint fnv32(byte[*] data)
{
uint h = 0x811c9dc5;
for (byte x : data)
{
h = (h *% 0x01000193) ^ x;
}
return h;
}
public func ulong fnv64(byte[] data)
{
ulong h = 0xcbf29ce484222325;
for (byte x : data)
{
h = (h *% 0x100000001b3) ^ x;
}
return h;
}
public func uint fnv32a(byte[] data)
{
uint h = 0x811c9dc5;
for (byte x : data)
{
h = (h ^ b) *% 0x01000193;
}
return h;
}
public func ulong fnv32a(byte[] data)
{
ulong h = 0xcbf29ce484222325;
for (byte x in data)
{
h = (h ^ b) *% 0x100000001b3;
}
return h;
}
const uint[256] CRC32_TABLE = {
0x00000000 as 0x77073096 as 0xee0e612c, 0x990951ba,
0x076dc419, 0x706af48f, 0xe963a535, 0x9e6495a3,
0x0edb8832, 0x79dcb8a4, 0xe0d5e91e, 0x97d2d988,
0x09b64c2b, 0x7eb17cbd, 0xe7b82d07, 0x90bf1d91,
0x1db71064, 0x6ab020f2, 0xf3b97148, 0x84be41de,
0x1adad47d, 0x6ddde4eb, 0xf4d4b551, 0x83d385c7,
0x136c9856, 0x646ba8c0, 0xfd62f97a, 0x8a65c9ec,
0x14015c4f, 0x63066cd9, 0xfa0f3d63, 0x8d080df5,
0x3b6e20c8, 0x4c69105e, 0xd56041e4, 0xa2677172,
0x3c03e4d1, 0x4b04d447, 0xd20d85fd, 0xa50ab56b,
0x35b5a8fa, 0x42b2986c, 0xdbbbc9d6, 0xacbcf940,
0x32d86ce3, 0x45df5c75, 0xdcd60dcf, 0xabd13d59,
0x26d930ac, 0x51de003a, 0xc8d75180, 0xbfd06116,
0x21b4f4b5, 0x56b3c423, 0xcfba9599, 0xb8bda50f,
0x2802b89e, 0x5f058808, 0xc60cd9b2, 0xb10be924,
0x2f6f7c87, 0x58684c11, 0xc1611dab, 0xb6662d3d,
0x76dc4190, 0x01db7106, 0x98d220bc, 0xefd5102a,
0x71b18589, 0x06b6b51f, 0x9fbfe4a5, 0xe8b8d433,
0x7807c9a2, 0x0f00f934, 0x9609a88e, 0xe10e9818,
0x7f6a0dbb, 0x086d3d2d, 0x91646c97, 0xe6635c01,
0x6b6b51f4, 0x1c6c6162, 0x856530d8, 0xf262004e,
0x6c0695ed, 0x1b01a57b, 0x8208f4c1, 0xf50fc457,
0x65b0d9c6, 0x12b7e950, 0x8bbeb8ea, 0xfcb9887c,
0x62dd1ddf, 0x15da2d49, 0x8cd37cf3, 0xfbd44c65,
0x4db26158, 0x3ab551ce, 0xa3bc0074, 0xd4bb30e2,
0x4adfa541, 0x3dd895d7, 0xa4d1c46d, 0xd3d6f4fb,
0x4369e96a, 0x346ed9fc, 0xad678846, 0xda60b8d0,
0x44042d73, 0x33031de5, 0xaa0a4c5f, 0xdd0d7cc9,
0x5005713c, 0x270241aa, 0xbe0b1010, 0xc90c2086,
0x5768b525, 0x206f85b3, 0xb966d409, 0xce61e49f,
0x5edef90e, 0x29d9c998, 0xb0d09822, 0xc7d7a8b4,
0x59b33d17, 0x2eb40d81, 0xb7bd5c3b, 0xc0ba6cad,
0xedb88320, 0x9abfb3b6, 0x03b6e20c, 0x74b1d29a,
0xead54739, 0x9dd277af, 0x04db2615, 0x73dc1683,
0xe3630b12, 0x94643b84, 0x0d6d6a3e, 0x7a6a5aa8,
0xe40ecf0b, 0x9309ff9d, 0x0a00ae27, 0x7d079eb1,
0xf00f9344, 0x8708a3d2, 0x1e01f268, 0x6906c2fe,
0xf762575d, 0x806567cb, 0x196c3671, 0x6e6b06e7,
0xfed41b76, 0x89d32be0, 0x10da7a5a, 0x67dd4acc,
0xf9b9df6f, 0x8ebeeff9, 0x17b7be43, 0x60b08ed5,
0xd6d6a3e8, 0xa1d1937e, 0x38d8c2c4, 0x4fdff252,
0xd1bb67f1, 0xa6bc5767, 0x3fb506dd, 0x48b2364b,
0xd80d2bda, 0xaf0a1b4c, 0x36034af6, 0x41047a60,
0xdf60efc3, 0xa867df55, 0x316e8eef, 0x4669be79,
0xcb61b38c, 0xbc66831a, 0x256fd2a0, 0x5268e236,
0xcc0c7795, 0xbb0b4703, 0x220216b9, 0x5505262f,
0xc5ba3bbe, 0xb2bd0b28, 0x2bb45a92, 0x5cb36a04,
0xc2d7ffa7, 0xb5d0cf31, 0x2cd99e8b, 0x5bdeae1d,
0x9b64c2b0, 0xec63f226, 0x756aa39c, 0x026d930a,
0x9c0906a9, 0xeb0e363f, 0x72076785, 0x05005713,
0x95bf4a82, 0xe2b87a14, 0x7bb12bae, 0x0cb61b38,
0x92d28e9b, 0xe5d5be0d, 0x7cdcefb7, 0x0bdbdf21,
0x86d3d2d4, 0xf1d4e242, 0x68ddb3f8, 0x1fda836e,
0x81be16cd, 0xf6b9265b, 0x6fb077e1, 0x18b74777,
0x88085ae6, 0xff0f6a70, 0x66063bca, 0x11010b5c,
0x8f659eff, 0xf862ae69, 0x616bffd3, 0x166ccf45,
0xa00ae278, 0xd70dd2ee, 0x4e048354, 0x3903b3c2,
0xa7672661, 0xd06016f7, 0x4969474d, 0x3e6e77db,
0xaed16a4a, 0xd9d65adc, 0x40df0b66, 0x37d83bf0,
0xa9bcae53, 0xdebb9ec5, 0x47b2cf7f, 0x30b5ffe9,
0xbdbdf21c, 0xcabac28a, 0x53b39330, 0x24b4a3a6,
0xbad03605, 0xcdd70693, 0x54de5729, 0x23d967bf,
0xb3667a2e, 0xc4614ab8, 0x5d681b02, 0x2a6f2b94,
0xb40bbe37, 0xc30c8ea1, 0x5a05df1b, 0x2d02ef8d,
};
const ulong[256] CRC64_TABLE = {
0x0000000000000000, 0x42f0e1eba9ea3693, 0x85e1c3d753d46d26, 0xc711223cfa3e5bb5,
0x493366450e42ecdf, 0x0bc387aea7a8da4c, 0xccd2a5925d9681f9, 0x8e224479f47cb76a,
0x9266cc8a1c85d9be, 0xd0962d61b56fef2d, 0x17870f5d4f51b498, 0x5577eeb6e6bb820b,
0xdb55aacf12c73561, 0x99a54b24bb2d03f2, 0x5eb4691841135847, 0x1c4488f3e8f96ed4,
0x663d78ff90e185ef, 0x24cd9914390bb37c, 0xe3dcbb28c335e8c9, 0xa12c5ac36adfde5a,
0x2f0e1eba9ea36930, 0x6dfeff5137495fa3, 0xaaefdd6dcd770416, 0xe81f3c86649d3285,
0xf45bb4758c645c51, 0xb6ab559e258e6ac2, 0x71ba77a2dfb03177, 0x334a9649765a07e4,
0xbd68d2308226b08e, 0xff9833db2bcc861d, 0x388911e7d1f2dda8, 0x7a79f00c7818eb3b,
0xcc7af1ff21c30bde, 0x8e8a101488293d4d, 0x499b3228721766f8, 0x0b6bd3c3dbfd506b,
0x854997ba2f81e701, 0xc7b97651866bd192, 0x00a8546d7c558a27, 0x4258b586d5bfbcb4,
0x5e1c3d753d46d260, 0x1cecdc9e94ace4f3, 0xdbfdfea26e92bf46, 0x990d1f49c77889d5,
0x172f5b3033043ebf, 0x55dfbadb9aee082c, 0x92ce98e760d05399, 0xd03e790cc93a650a,
0xaa478900b1228e31, 0xe8b768eb18c8b8a2, 0x2fa64ad7e2f6e317, 0x6d56ab3c4b1cd584,
0xe374ef45bf6062ee, 0xa1840eae168a547d, 0x66952c92ecb40fc8, 0x2465cd79455e395b,
0x3821458aada7578f, 0x7ad1a461044d611c, 0xbdc0865dfe733aa9, 0xff3067b657990c3a,
0x711223cfa3e5bb50, 0x33e2c2240a0f8dc3, 0xf4f3e018f031d676, 0xb60301f359dbe0e5,
0xda050215ea6c212f, 0x98f5e3fe438617bc, 0x5fe4c1c2b9b84c09, 0x1d14202910527a9a,
0x93366450e42ecdf0, 0xd1c685bb4dc4fb63, 0x16d7a787b7faa0d6, 0x5427466c1e109645,
0x4863ce9ff6e9f891, 0x0a932f745f03ce02, 0xcd820d48a53d95b7, 0x8f72eca30cd7a324,
0x0150a8daf8ab144e, 0x43a04931514122dd, 0x84b16b0dab7f7968, 0xc6418ae602954ffb,
0xbc387aea7a8da4c0, 0xfec89b01d3679253, 0x39d9b93d2959c9e6, 0x7b2958d680b3ff75,
0xf50b1caf74cf481f, 0xb7fbfd44dd257e8c, 0x70eadf78271b2539, 0x321a3e938ef113aa,
0x2e5eb66066087d7e, 0x6cae578bcfe24bed, 0xabbf75b735dc1058, 0xe94f945c9c3626cb,
0x676dd025684a91a1, 0x259d31cec1a0a732, 0xe28c13f23b9efc87, 0xa07cf2199274ca14,
0x167ff3eacbaf2af1, 0x548f120162451c62, 0x939e303d987b47d7, 0xd16ed1d631917144,
0x5f4c95afc5edc62e, 0x1dbc74446c07f0bd, 0xdaad56789639ab08, 0x985db7933fd39d9b,
0x84193f60d72af34f, 0xc6e9de8b7ec0c5dc, 0x01f8fcb784fe9e69, 0x43081d5c2d14a8fa,
0xcd2a5925d9681f90, 0x8fdab8ce70822903, 0x48cb9af28abc72b6, 0x0a3b7b1923564425,
0x70428b155b4eaf1e, 0x32b26afef2a4998d, 0xf5a348c2089ac238, 0xb753a929a170f4ab,
0x3971ed50550c43c1, 0x7b810cbbfce67552, 0xbc902e8706d82ee7, 0xfe60cf6caf321874,
0xe224479f47cb76a0, 0xa0d4a674ee214033, 0x67c58448141f1b86, 0x253565a3bdf52d15,
0xab1721da49899a7f, 0xe9e7c031e063acec, 0x2ef6e20d1a5df759, 0x6c0603e6b3b7c1ca,
0xf6fae5c07d3274cd, 0xb40a042bd4d8425e, 0x731b26172ee619eb, 0x31ebc7fc870c2f78,
0xbfc9838573709812, 0xfd39626eda9aae81, 0x3a28405220a4f534, 0x78d8a1b9894ec3a7,
0x649c294a61b7ad73, 0x266cc8a1c85d9be0, 0xe17dea9d3263c055, 0xa38d0b769b89f6c6,
0x2daf4f0f6ff541ac, 0x6f5faee4c61f773f, 0xa84e8cd83c212c8a, 0xeabe6d3395cb1a19,
0x90c79d3fedd3f122, 0xd2377cd44439c7b1, 0x15265ee8be079c04, 0x57d6bf0317edaa97,
0xd9f4fb7ae3911dfd, 0x9b041a914a7b2b6e, 0x5c1538adb04570db, 0x1ee5d94619af4648,
0x02a151b5f156289c, 0x4051b05e58bc1e0f, 0x87409262a28245ba, 0xc5b073890b687329,
0x4b9237f0ff14c443, 0x0962d61b56fef2d0, 0xce73f427acc0a965, 0x8c8315cc052a9ff6,
0x3a80143f5cf17f13, 0x7870f5d4f51b4980, 0xbf61d7e80f251235, 0xfd913603a6cf24a6,
0x73b3727a52b393cc, 0x31439391fb59a55f, 0xf652b1ad0167feea, 0xb4a25046a88dc879,
0xa8e6d8b54074a6ad, 0xea16395ee99e903e, 0x2d071b6213a0cb8b, 0x6ff7fa89ba4afd18,
0xe1d5bef04e364a72, 0xa3255f1be7dc7ce1, 0x64347d271de22754, 0x26c49cccb40811c7,
0x5cbd6cc0cc10fafc, 0x1e4d8d2b65facc6f, 0xd95caf179fc497da, 0x9bac4efc362ea149,
0x158e0a85c2521623, 0x577eeb6e6bb820b0, 0x906fc95291867b05, 0xd29f28b9386c4d96,
0xcedba04ad0952342, 0x8c2b41a1797f15d1, 0x4b3a639d83414e64, 0x09ca82762aab78f7,
0x87e8c60fded7cf9d, 0xc51827e4773df90e, 0x020905d88d03a2bb, 0x40f9e43324e99428,
0x2cffe7d5975e55e2, 0x6e0f063e3eb46371, 0xa91e2402c48a38c4, 0xebeec5e96d600e57,
0x65cc8190991cb93d, 0x273c607b30f68fae, 0xe02d4247cac8d41b, 0xa2dda3ac6322e288,
0xbe992b5f8bdb8c5c, 0xfc69cab42231bacf, 0x3b78e888d80fe17a, 0x7988096371e5d7e9,
0xf7aa4d1a85996083, 0xb55aacf12c735610, 0x724b8ecdd64d0da5, 0x30bb6f267fa73b36,
0x4ac29f2a07bfd00d, 0x08327ec1ae55e69e, 0xcf235cfd546bbd2b, 0x8dd3bd16fd818bb8,
0x03f1f96f09fd3cd2, 0x41011884a0170a41, 0x86103ab85a2951f4, 0xc4e0db53f3c36767,
0xd8a453a01b3a09b3, 0x9a54b24bb2d03f20, 0x5d45907748ee6495, 0x1fb5719ce1045206,
0x919735e51578e56c, 0xd367d40ebc92d3ff, 0x1476f63246ac884a, 0x568617d9ef46bed9,
0xe085162ab69d5e3c, 0xa275f7c11f7768af, 0x6564d5fde549331a, 0x279434164ca30589,
0xa9b6706fb8dfb2e3, 0xeb46918411358470, 0x2c57b3b8eb0bdfc5, 0x6ea7525342e1e956,
0x72e3daa0aa188782, 0x30133b4b03f2b111, 0xf7021977f9cceaa4, 0xb5f2f89c5026dc37,
0x3bd0bce5a45a6b5d, 0x79205d0e0db05dce, 0xbe317f32f78e067b, 0xfcc19ed95e6430e8,
0x86b86ed5267cdbd3, 0xc4488f3e8f96ed40, 0x0359ad0275a8b6f5, 0x41a94ce9dc428066,
0xcf8b0890283e370c, 0x8d7be97b81d4019f, 0x4a6acb477bea5a2a, 0x089a2aacd2006cb9,
0x14dea25f3af9026d, 0x562e43b4931334fe, 0x913f6188692d6f4b, 0xd3cf8063c0c759d8,
0x5dedc41a34bbeeb2, 0x1f1d25f19d51d821, 0xd80c07cd676f8394, 0x9afce626ce85b507,
};

View File

@@ -7,7 +7,7 @@ import stdlib;
const uint MaxText = 1024;
enum TokenKind : byte (string name)
enum TokenKind : char (string name)
{
WORD("word"),
TEXT("text"),
@@ -345,7 +345,7 @@ func bool isKeyChar(u8 c)
func void Tokenizer.parseKey(Tokenizer* t, Token* result)
{
char* start = t.current;
while (t.current[0] && isKeyChar(cast(t.current[0] as byte))) t.current++;
while (t.current[0] && isKeyChar(cast(t.current[0] as char))) t.current++;
uint len = cast(t.current - start as uint);
// assert(len < MaxText);

View File

@@ -56,7 +56,7 @@ static void usage(void)
OUTPUT(" clean Clean all build files.");
OUTPUT(" run [<target>] Run (and build if needed) the target in the current project.");
OUTPUT(" dist [<target>] Clean and build a target for distribution.");
OUTPUT(" docs [<target>] Generate documentation for the target.");
OUTPUT(" directives [<target>] Generate documentation for the target.");
OUTPUT(" bench [<target>] Benchmark a target.");
OUTPUT(" clean-run [<target>] Clean, then run the target.");
OUTPUT(" compile-run <file1> [<file2> ...] Compile files then immediately run the result.");
@@ -225,7 +225,7 @@ static void parse_command(void)
parse_optional_target();
return;
}
if (arg_match("docs"))
if (arg_match("directives"))
{
build_options.command = COMMAND_DOCS;
parse_optional_target();

View File

@@ -84,9 +84,9 @@ void create_project(void)
chdir("..");
if (mkdir("docs", 0755)) goto ERROR;
if (mkdir("directives", 0755)) goto ERROR;
chdir("docs");
chdir("directives");
file = fopen("about.md", "a");
if (!file) goto ERROR;

View File

@@ -1030,6 +1030,13 @@ static void fprint_ast_recursive(Context *context, FILE *file, Ast *ast, int ind
DUMP("(ct-compound\n");
fprint_asts_recursive(context, file, ast->ct_compound_stmt, indent + 1);
DUMPEND();
case AST_DOCS:
DUMP("(directives");
fprint_asts_recursive(context, file, ast->directives, indent + 1);
DUMPEND();
case AST_DOC_DIRECTIVE:
DUMP("(doc-directive");
DUMPEND();
case AST_DEFINE_STMT:
DUMP("(define");
DUMPDECL(ast->define_stmt);

View File

@@ -443,6 +443,7 @@ typedef struct _Decl
TokenId name_token;
SourceSpan span;
const char *external_name;
Ast *docs;
DeclKind decl_kind : 6;
Visibility visibility : 2;
ResolveStatus resolve_status : 2;
@@ -1053,6 +1054,34 @@ typedef struct
Expr *expr;
} AstAssertStmt;
typedef struct
{
DocDirectiveKind kind;
union
{
struct
{
TokenId param;
TokenId rest_of_line;
} param;
struct
{
Expr *decl_exprs;
Expr *comment;
} contract;
struct
{
TokenId rest_of_line;
} pure;
struct
{
const char *directive_name;
TokenId rest_of_line;
} generic;
};
} AstDocDirective;
typedef struct _Ast
{
SourceSpan span;
@@ -1088,6 +1117,8 @@ typedef struct _Ast
AstScopedStmt scoped_stmt; // 16
AstAssertStmt ct_assert_stmt;
AstAssertStmt assert_stmt;
Ast **directives;
AstDocDirective doc_directive;
};
} Ast;
@@ -1152,6 +1183,9 @@ typedef struct
const char *line_start;
File *current_file;
SourceLoc last_in_range;
TokenData *latest_token_data;
SourceLocation *latest_token_loc;
unsigned char *latest_token_type;
} Lexer;
@@ -1220,6 +1254,8 @@ typedef struct _Context
Token tok;
TokenId prev_tok;
Token next_tok;
TokenId docs_start;
TokenId docs_end;
} Context;
typedef struct
@@ -1329,8 +1365,8 @@ extern Diagnostics diagnostics;
extern Type *type_bool, *type_void, *type_string, *type_voidptr;
extern Type *type_half, *type_float, *type_double, *type_quad;
extern Type *type_char, *type_short, *type_int, *type_long, *type_isize;
extern Type *type_byte, *type_ushort, *type_uint, *type_ulong, *type_usize;
extern Type *type_ichar, *type_short, *type_int, *type_long, *type_isize;
extern Type *type_char, *type_ushort, *type_uint, *type_ulong, *type_usize;
extern Type *type_u128, *type_i128;
extern Type *type_compint, *type_compfloat;
extern Type *type_c_short, *type_c_int, *type_c_long, *type_c_longlong;
@@ -1342,6 +1378,7 @@ extern const char *attribute_list[NUMBER_OF_ATTRIBUTES];
extern const char *kw_align;
extern const char *kw_alignof;
extern const char *kw_distinct;
extern const char *kw_ensure;
extern const char *kw_inline;
extern const char *kw_kindof;
extern const char *kw_len;
@@ -1350,6 +1387,11 @@ extern const char *kw_nameof;
extern const char *kw_offsetof;
extern const char *kw_ordinal;
extern const char *kw_qnameof;
extern const char *kw_reqparse;
extern const char *kw_require;
extern const char *kw_pure;
extern const char *kw_param;
extern const char *kw_errors;
extern const char *kw_sizeof;
extern const char *kw___ceil;
extern const char *kw___round;

View File

@@ -69,6 +69,8 @@ typedef enum
AST_DEFAULT_STMT,
AST_DEFER_STMT,
AST_DO_STMT,
AST_DOC_DIRECTIVE,
AST_DOCS,
AST_EXPR_STMT,
AST_TRY_STMT,
AST_FOR_STMT,
@@ -158,6 +160,16 @@ typedef enum
case DECL_CT_SWITCH: case DECL_CT_CASE: case DECL_ATTRIBUTE: case DECL_LABEL: \
case DECL_DEFINE
typedef enum
{
DOC_DIRECTIVE_UNKNOWN,
DOC_DIRECTIVE_PURE,
DOC_DIRECTIVE_REQUIRE,
DOC_DIRECTIVE_PARAM,
DOC_DIRECTIVE_ERRORS,
DOC_DIRECTIVE_ENSURE,
} DocDirectiveKind;
typedef enum
{
EXPR_POISONED,
@@ -296,6 +308,7 @@ typedef enum
// two character tokens.
TOKEN_AND, // &&
TOKEN_ARROW, // -> // Not used but reserved
TOKEN_BANGBANG, // !!
TOKEN_BIT_AND_ASSIGN, // &=
TOKEN_BIT_OR_ASSIGN, // |=
TOKEN_BIT_XOR_ASSIGN, // ^=
@@ -321,7 +334,6 @@ typedef enum
TOKEN_SCOPE, // ::
TOKEN_SHL, // <<
TOKEN_SHR, // >>
TOKEN_BANGBANG, // !!
// Three or more
TOKEN_ELLIPSIS, // ...
@@ -333,12 +345,12 @@ typedef enum
// Basic types names
TOKEN_VOID,
TOKEN_BYTE,
TOKEN_BOOL,
TOKEN_CHAR,
TOKEN_DOUBLE,
TOKEN_FLOAT,
TOKEN_HALF,
TOKEN_ICHAR,
TOKEN_INT,
TOKEN_ISIZE,
TOKEN_LONG,
@@ -448,7 +460,8 @@ typedef enum
TOKEN_DOCS_START, // /**
TOKEN_DOCS_END, // */ (may start with an arbitrary number of `*`
TOKEN_DOCS_EOL, // "\n" only seen in docs.
TOKEN_DOCS_EOL, // "\n" only seen in directives.
TOKEN_DOCS_DIRECTIVE, // @ in the directive
TOKEN_DOCS_LINE, // Any line within /** **/
TOKEN_EOF, // \n - SHOULD ALWAYS BE THE LAST TOKEN.

View File

@@ -4,10 +4,23 @@
#include "compiler_internal.h"
typedef enum
{
LEX_NORMAL,
LEX_DOCS,
} LexMode;
typedef enum
{
DOC_END_EOF,
DOC_END_LAST,
DOC_END_EOL,
DOC_END_ERROR,
} DocEnd;
#pragma mark --- Lexing general methods.
static bool lexer_scan_token_inner(Lexer *lexer);
static bool lexer_scan_token_inner(Lexer *lexer, LexMode mode);
// Peek at the current character in the buffer.
static inline char peek(Lexer *lexer)
@@ -76,7 +89,7 @@ static inline bool match(Lexer *lexer, char expected)
* This call is doing the basic allocation, with other functions
* filling out additional information.
**/
static inline void add_generic_token(Lexer *lexer, TokenType type, SourceLocation **ret_loc, TokenData **ret_data)
static inline void add_generic_token(Lexer *lexer, TokenType type)
{
// Allocate source location, type, data for the token
// each of these use their own arena,
@@ -85,7 +98,7 @@ static inline void add_generic_token(Lexer *lexer, TokenType type, SourceLocatio
// Consequently these allocs are actually simultaneously
// allocating data and putting that data in an array.
SourceLocation *location = sourceloc_alloc();
char *token_type = toktype_alloc();
unsigned char *token_type = (unsigned char *)toktype_alloc();
TokenData *data = tokdata_alloc();
*token_type = type;
@@ -124,19 +137,18 @@ static inline void add_generic_token(Lexer *lexer, TokenType type, SourceLocatio
}
// Return pointers to the data and the location,
// these maybe be used to fill in data.
*ret_data = data;
*ret_loc = location;
lexer->latest_token_data = data;
lexer->latest_token_loc = location;
lexer->latest_token_type = token_type;
}
// Error? We simply generate an invalid token and print out the error.
static bool add_error_token(Lexer *lexer, const char *message, ...)
{
TokenData *data;
SourceLocation *loc;
add_generic_token(lexer, TOKEN_INVALID_TOKEN, &loc, &data);
add_generic_token(lexer, TOKEN_INVALID_TOKEN);
va_list list;
va_start(list, message);
sema_verror_range(loc, message, list);
sema_verror_range(lexer->latest_token_loc, message, list);
va_end(list);
return false;
}
@@ -144,10 +156,8 @@ static bool add_error_token(Lexer *lexer, const char *message, ...)
// Add a new regular token.
static bool add_token(Lexer *lexer, TokenType type, const char *string)
{
TokenData *data;
SourceLocation *loc;
add_generic_token(lexer, type, &loc, &data);
data->string = string;
add_generic_token(lexer, type);
lexer->latest_token_data->string = string;
return true;
}
@@ -251,16 +261,18 @@ static inline bool parse_multiline_comment(Lexer *lexer)
}
}
/**
* Skip regular whitespace.
*/
static void skip_whitespace(Lexer *lexer)
static void skip_whitespace(Lexer *lexer, LexMode lex_type)
{
while (1)
{
switch (peek(lexer))
{
case '\n':
if (lex_type != LEX_NORMAL) return;
lexer_store_line_end(lexer);
FALLTHROUGH;
case ' ':
@@ -276,6 +288,7 @@ static void skip_whitespace(Lexer *lexer)
}
#pragma mark --- Identifier scanning
static inline bool scan_prefixed_ident(Lexer *lexer, TokenType type, TokenType no_ident_type, bool ends_with_bang, const char *start)
@@ -489,10 +502,8 @@ static inline bool scan_dec(Lexer *lexer)
{
return add_error_token(lexer, "Invalid float value.");
}
SourceLocation *token;
TokenData *data;
add_generic_token(lexer, TOKEN_REAL, &token, &data);
data->value = fval;
add_generic_token(lexer, TOKEN_REAL);
lexer->latest_token_data->value = fval;
return true;
}
return add_token(lexer, TOKEN_INTEGER, lexer->lexing_start);
@@ -671,11 +682,9 @@ static inline bool scan_char(Lexer *lexer)
add_error_token(lexer, "Character literals may only be 1, 2 or 8 characters wide.");
}
TokenData *data;
SourceLocation *loc;
add_generic_token(lexer, TOKEN_CHAR_LITERAL, &loc, &data);
data->char_lit.u64 = bytes.u64;
data->width = (char)width;
add_generic_token(lexer, TOKEN_CHAR_LITERAL);
lexer->latest_token_data->char_lit.u64 = bytes.u64;
lexer->latest_token_data->width = (char)width;
return true;
}
@@ -698,6 +707,282 @@ static inline bool scan_string(Lexer *lexer)
return add_token(lexer, TOKEN_STRING, lexer->lexing_start);
}
#pragma mark --- Lexer doc lexing
/**
* Skip any stars until we either have no more * or we find '* /'
* @param lexer
*/
static void skip_doc_stars(Lexer *lexer)
{
while (peek(lexer) == '*' && peek_next(lexer) != '/') next(lexer);
}
/**
* OPTIONALLY adds * / token. This allows any number of '*' to preceed it.
* @param lexer
* @return
*/
static bool parse_add_end_of_docs_if_present(Lexer *lexer)
{
int lookahead = 0;
// while we see '*' walk forward.
while (lexer->current[lookahead] == '*') lookahead++;
// if we didn't see a '*' to begin with, then it's not an end
if (lookahead < 1) return false;
// And if it doesn't have a '/' at the last position it isn't either.
if (lexer->current[lookahead] != '/') return false;
// Otherwise, gladly skip ahead and store the end.
skip(lexer, lookahead + 1);
add_token(lexer, TOKEN_DOCS_END, lexer->lexing_start);
lexer->lexing_start = lexer->current;
return true;
}
static void parse_add_end_of_doc_line(Lexer *lexer)
{
assert(peek(lexer) == '\n');
// Add the EOL token.
lexer_store_line_end(lexer);
next(lexer);
add_token(lexer, TOKEN_DOCS_EOL, lexer->lexing_start);
lexer->lexing_start = lexer->current;
// Skip whitespace
skip_whitespace(lexer, LEX_DOCS);
// And any leading stars:
skip_doc_stars(lexer);
}
/**
* Parse the end of a directive or a simple line, e.g.
* For "* @param lexer The lexer used." then the remainder is "The lexer used."
* For "*** Hello world" the remainder is "Hello world"
*/
static DocEnd parse_doc_remainder(Lexer *lexer)
{
// Skip all initial whitespace.
skip_whitespace(lexer, LEX_DOCS);
lexer->lexing_start = lexer->current;
int characters_read = 0;
while (1)
{
switch (peek(lexer))
{
case '*':
// Did we find the end of the directives?
// If so return control.
if (characters_read > 0)
{
add_token(lexer, TOKEN_DOCS_LINE, 0);
lexer->lexing_start = lexer->current;
}
if (parse_add_end_of_docs_if_present(lexer)) return DOC_END_LAST;
// Otherwise use default parsing.
break;
case '\n':
// End of line
if (characters_read > 0)
{
add_token(lexer, TOKEN_DOCS_LINE, 0);
lexer->lexing_start = lexer->current;
}
return DOC_END_EOL;
case '\0':
if (characters_read > 0)
{
add_token(lexer, TOKEN_DOCS_LINE, 0);
lexer->lexing_start = lexer->current;
}
return DOC_END_EOF;
default:
break;
}
// Otherwise move forward
characters_read++;
next(lexer);
}
}
static DocEnd parse_doc_error_directive(Lexer *lexer)
{
while (1)
{
// Skip any whitespace.
skip_whitespace(lexer, LEX_DOCS);
// First scan the name
if (!lexer_scan_token_inner(lexer, LEX_DOCS)) return DOC_END_ERROR;
if (*lexer->latest_token_type != TOKEN_TYPE_IDENT) break;
// Skip any whitespace.
skip_whitespace(lexer, LEX_DOCS);
// If we don't reach "|" we exit, since errors are composed using ErrorA | ErrorB
if (peek(lexer) != '|') break;
if (!lexer_scan_token_inner(lexer, LEX_DOCS)) return DOC_END_ERROR;
// We might get "|=" or something, in that case exit.
if (*lexer->latest_token_type != TOKEN_BIT_OR) break;
}
return parse_doc_remainder(lexer);
}
/**
* Contract directives use the style: "@require a > 2, b && c == true : "Must work foo"
*
* @param lexer
* @return
*/
static DocEnd parse_doc_contract_directive(Lexer *lexer)
{
while (1)
{
// Skip all initial whitespace.
skip_whitespace(lexer, LEX_DOCS);
switch (peek(lexer))
{
case '*':
// Did we find the end of the directives?
// If so return control.
if (parse_add_end_of_docs_if_present(lexer)) return DOC_END_LAST;
// Otherwise use default parsing.
break;
case '\n':
return DOC_END_EOL;
case '\0':
return DOC_END_EOF;
default:
break;
}
// Otherwise move forward
if (!lexer_scan_token_inner(lexer, LEX_DOCS)) return DOC_END_ERROR;
// "return" is an identifier inside.
if (*lexer->latest_token_type == TOKEN_RETURN)
{
*lexer->latest_token_type = TOKEN_IDENT;
}
}
}
static DocEnd parse_doc_param_directive(Lexer *lexer)
{
// Skip any whitespace.
skip_whitespace(lexer, LEX_DOCS);
// First scan the name
if (!lexer_scan_token_inner(lexer, LEX_DOCS)) return DOC_END_ERROR;
// Then the remainder
return parse_doc_remainder(lexer);
}
static DocEnd parse_doc_directive(Lexer *lexer)
{
// We expect a directive here.
if (!is_letter(peek_next(lexer)))
{
return add_error_token(lexer, "Expected doc directive here.");
}
lexer->lexing_start = lexer->current;
// First parse the '@'
skip(lexer, 1);
add_token(lexer, TOKEN_DOCS_DIRECTIVE, "@");
lexer->lexing_start = lexer->current;
// Then our keyword
if (!scan_ident(lexer, TOKEN_IDENT, TOKEN_CONST, TOKEN_TYPE_IDENT, 0)) return DOC_END_ERROR;
assert(*lexer->latest_token_type == TOKEN_IDENT || *lexer->latest_token_type == TOKEN_RETURN);
const char *last_token_string = lexer->latest_token_data->string;
if (*lexer->latest_token_type == TOKEN_RETURN)
{
// Backpatch the type.
*lexer->latest_token_type = TOKEN_IDENT;
return parse_doc_remainder(lexer);
}
if (kw_errors == last_token_string)
{
return parse_doc_error_directive(lexer);
}
if (last_token_string == kw_require || last_token_string == kw_ensure || last_token_string == kw_reqparse)
{
return parse_doc_contract_directive(lexer);
}
if (last_token_string == kw_param)
{
// The variable
return parse_doc_param_directive(lexer);
}
return parse_doc_remainder(lexer);
}
/**
* Parse the / ** * / directives comments
**/
static bool parse_doc_comment(Lexer *lexer)
{
// Add the doc start token.
add_token(lexer, TOKEN_DOCS_START, lexer->lexing_start);
// Skip any additional stars
skip_doc_stars(lexer);
// Main "doc parse" loop.
while (1)
{
// 1. Skip any whitespace
skip_whitespace(lexer, LEX_DOCS);
// 2. Did we find the end?
if (reached_end(lexer)) return add_error_token(lexer, "Missing '*/' to end the doc comment.");
// 3. See if we reach the end of the docs.
if (parse_add_end_of_docs_if_present(lexer)) return true;
DocEnd end;
// Parse a segment
switch (peek(lexer))
{
case '@':
end = parse_doc_directive(lexer);
break;
case '\n':
end = DOC_END_EOL;
break;
default:
end = parse_doc_remainder(lexer);
break;
}
// We're done parsing a line:
switch (end)
{
case DOC_END_ERROR:
return false;
case DOC_END_EOF:
// Just continue, this will be picked up in the beginning of the loop.
break;
case DOC_END_LAST:
// We're done, so return.
return true;
case DOC_END_EOL:
// Walk past the end of line.
parse_add_end_of_doc_line(lexer);
break;
default:
UNREACHABLE
}
}
}
#pragma mark --- Lexer public functions
@@ -708,16 +993,18 @@ Token lexer_advance(Lexer *lexer)
return token;
}
static bool lexer_scan_token_inner(Lexer *lexer)
static bool lexer_scan_token_inner(Lexer *lexer, LexMode mode)
{
// Now skip the whitespace.
skip_whitespace(lexer);
skip_whitespace(lexer, mode);
// Point start to the first non-whitespace character.
lexer->lexing_start = lexer->current;
if (reached_end(lexer))
{
assert(mode == LEX_NORMAL);
return add_token(lexer, TOKEN_EOF, "\n") && false;
}
@@ -765,9 +1052,13 @@ static bool lexer_scan_token_inner(Lexer *lexer)
if (match(lexer, '!')) return add_token(lexer, TOKEN_BANGBANG, "!!");
return match(lexer, '=') ? add_token(lexer, TOKEN_NOT_EQUAL, "!=") : add_token(lexer, TOKEN_BANG, "!");
case '/':
if (match(lexer, '/')) return parse_line_comment(lexer);
if (match(lexer, '*')) return parse_multiline_comment(lexer);
if (match(lexer, '+')) return parse_nested_comment(lexer);
// We can't get any directives comments here.
if (mode != LEX_DOCS)
{
if (match(lexer, '/')) return parse_line_comment(lexer);
if (match(lexer, '*')) return match(lexer, '*') ? parse_doc_comment(lexer) : parse_multiline_comment(lexer);
if (match(lexer, '+')) return parse_nested_comment(lexer);
}
return match(lexer, '=') ? add_token(lexer, TOKEN_DIV_ASSIGN, "/=") : add_token(lexer, TOKEN_DIV, "/");
case '*':
if (match(lexer, '%'))
@@ -864,7 +1155,7 @@ void lexer_init_with_file(Lexer *lexer, File *file)
lexer->lexer_index = file->token_start_id;
while(1)
{
if (!lexer_scan_token_inner(lexer))
if (!lexer_scan_token_inner(lexer, LEX_NORMAL))
{
if (reached_end(lexer)) break;
while (!reached_end(lexer) && peek(lexer) != '\n') next(lexer);

View File

@@ -51,8 +51,8 @@ static void gencontext_destroy(GenContext *context)
LLVMValueRef llvm_emit_memclear_size_align(GenContext *c, LLVMValueRef ref, uint64_t size, unsigned int align, bool bitcast)
{
LLVMValueRef target = bitcast ? LLVMBuildBitCast(c->builder, ref, llvm_get_type(c, type_get_ptr(type_byte)), "") : ref;
return LLVMBuildMemSet(c->builder, target, LLVMConstInt(llvm_get_type(c, type_byte), 0, false),
LLVMValueRef target = bitcast ? LLVMBuildBitCast(c->builder, ref, llvm_get_type(c, type_get_ptr(type_char)), "") : ref;
return LLVMBuildMemSet(c->builder, target, LLVMConstInt(llvm_get_type(c, type_char), 0, false),
LLVMConstInt(llvm_get_type(c, type_ulong), size, false), align);
}
@@ -290,6 +290,38 @@ static void gencontext_emit_global_variable_definition(GenContext *c, Decl *decl
decl->backend_ref = LLVMAddGlobal(c->module, llvm_get_type(c, decl->type), "tempglobal");
}
void llvm_emit_ptr_from_array(GenContext *c, BEValue *value)
{
switch (value->type->type_kind)
{
case TYPE_POINTER:
llvm_value_rvalue(c, value);
value->kind = BE_ADDRESS;
return;
case TYPE_ARRAY:
return;
case TYPE_SUBARRAY:
{
// TODO insert trap on overflow.
LLVMTypeRef subarray_type = llvm_get_type(c, value->type);
assert(value->kind == BE_ADDRESS);
LLVMValueRef pointer_addr = LLVMBuildStructGEP2(c->builder, subarray_type, value->value, 0, "subarray_ptr");
LLVMTypeRef pointer_type = llvm_get_type(c, type_get_ptr(value->type->array.base));
AlignSize alignment = type_abi_alignment(type_voidptr);
// We need to pick the worst alignment in case this is packed in an array.
if (value->alignment < alignment) alignment = value->alignment;
llvm_value_set_address_align(value,
llvm_emit_load_aligned(c, pointer_type, pointer_addr, 0, "subarrptr"), value->type, alignment);
return;
}
case TYPE_VARARRAY:
case TYPE_STRING:
TODO
default:
UNREACHABLE
}
}
static void gencontext_emit_global_variable_init(GenContext *c, Decl *decl)
{
assert(decl->var.kind == VARDECL_GLOBAL || decl->var.kind == VARDECL_CONST);
@@ -640,9 +672,9 @@ void gencontext_emit_introspection_type(GenContext *context, Decl *decl)
}
}
}
LLVMValueRef global_name = LLVMAddGlobal(context->module, llvm_get_type(context, type_byte), decl->name ? decl->name : "anon");
LLVMValueRef global_name = LLVMAddGlobal(context->module, llvm_get_type(context, type_char), decl->name ? decl->name : "anon");
LLVMSetGlobalConstant(global_name, 1);
LLVMSetInitializer(global_name, LLVMConstInt(llvm_get_type(context, type_byte), 1, false));
LLVMSetInitializer(global_name, LLVMConstInt(llvm_get_type(context, type_char), 1, false));
decl->type->backend_typeid = LLVMConstPointerCast(global_name, llvm_get_type(context, type_typeid));
switch (decl->visibility)
@@ -995,8 +1027,8 @@ void llvm_store_bevalue_aligned(GenContext *c, LLVMValueRef destination, BEValue
// Here we do an optimized(?) memcopy.
ByteSize size = type_size(value->type);
LLVMValueRef copy_size = llvm_const_int(c, size <= UINT32_MAX ? type_uint : type_usize, size);
destination = LLVMBuildBitCast(c->builder, destination, llvm_get_ptr_type(c, type_byte), "");
LLVMValueRef source = LLVMBuildBitCast(c->builder, value->value, llvm_get_ptr_type(c, type_byte), "");
destination = LLVMBuildBitCast(c->builder, destination, llvm_get_ptr_type(c, type_char), "");
LLVMValueRef source = LLVMBuildBitCast(c->builder, value->value, llvm_get_ptr_type(c, type_char), "");
LLVMBuildMemCpy(c->builder, destination, alignment ?: type_abi_alignment(value->type),
source, value->alignment ?: type_abi_alignment(value->type), copy_size);
return;

View File

@@ -23,8 +23,8 @@ LLVMValueRef llvm_emit_is_no_error(GenContext *c, LLVMValueRef error)
LLVMTypeRef llvm_const_padding_type(GenContext *c, ByteSize size)
{
assert(size > 0);
if (size == 1) return llvm_get_type(c, type_byte);
return LLVMArrayType(llvm_get_type(c, type_byte), size);
if (size == 1) return llvm_get_type(c, type_char);
return LLVMArrayType(llvm_get_type(c, type_char), size);
}
LLVMValueRef llvm_emit_const_padding(GenContext *c, ByteSize size)
@@ -132,42 +132,12 @@ static inline LLVMValueRef gencontext_emit_sub_int(GenContext *context, Type *ty
: LLVMBuildNSWSub(context->builder, left, right, "sub");
}
static inline void gencontext_emit_subscript_addr_base(GenContext *context, BEValue *value, Expr *parent)
static inline void llvm_emit_subscript_addr_base(GenContext *context, BEValue *value, Expr *parent)
{
LLVMValueRef parent_value;
Type *type = type_flatten(parent->type);
switch (type->type_kind)
{
case TYPE_POINTER:
llvm_emit_expr(context, value, parent);
llvm_value_rvalue(context, value);
value->kind = BE_ADDRESS;
return;
case TYPE_ARRAY:
llvm_emit_expr(context, value, parent);
return;
case TYPE_SUBARRAY:
{
// TODO insert trap on overflow.
LLVMTypeRef subarray_type = llvm_get_type(context, type);
llvm_emit_expr(context, value, parent);
assert(value->kind == BE_ADDRESS);
LLVMValueRef pointer_addr = LLVMBuildStructGEP2(context->builder, subarray_type, value->value, 0, "subarray_ptr");
LLVMTypeRef pointer_type = llvm_get_type(context, type_get_ptr(type->array.base));
AlignSize alignment = type_abi_alignment(type_voidptr);
// We need to pick the worst alignment in case this is packed in an array.
if (value->alignment < alignment) alignment = value->alignment;
llvm_value_set_address_align(value,
llvm_emit_load_aligned(context, pointer_type, pointer_addr, 0, "subarrptr"), type, alignment);
return;
}
case TYPE_VARARRAY:
case TYPE_STRING:
TODO
default:
UNREACHABLE
}
llvm_emit_expr(context, value, parent);
llvm_emit_ptr_from_array(context, value);
}
static inline LLVMValueRef llvm_emit_subscript_addr_with_base(GenContext *c, Type *parent_type, LLVMValueRef parent_value, LLVMValueRef index_value)
@@ -274,7 +244,7 @@ static inline void gencontext_emit_subscript(GenContext *c, BEValue *value, Expr
{
BEValue ref;
// First, get thing being subscripted.
gencontext_emit_subscript_addr_base(c, &ref, expr->subscript_expr.expr);
llvm_emit_subscript_addr_base(c, &ref, expr->subscript_expr.expr);
// It needs to be an address.
llvm_value_addr(c, &ref);
@@ -967,7 +937,7 @@ static inline void llvm_emit_inc_dec_change(GenContext *c, bool use_mod, BEValue
case TYPE_POINTER:
{
// Use byte here, we don't need a big offset.
LLVMValueRef add = LLVMConstInt(diff < 0 ? llvm_get_type(c, type_char) : llvm_get_type(c, type_byte), diff, diff < 0);
LLVMValueRef add = LLVMConstInt(diff < 0 ? llvm_get_type(c, type_ichar) : llvm_get_type(c, type_char), diff, diff < 0);
after_value = LLVMBuildGEP2(c->builder, llvm_get_type(c, type->pointer), value.value, &add, 1, "ptrincdec");
break;
}
@@ -1325,7 +1295,6 @@ static void gencontext_emit_slice(GenContext *context, BEValue *be_value, Expr *
// Calculate the size
LLVMValueRef size = LLVMBuildSub(context->builder, LLVMBuildAdd(context->builder, end_index, llvm_const_int(context, start_type, 1), ""), start_index, "size");
LLVMValueRef start_pointer;
switch (parent_type->type_kind)
{
@@ -2187,7 +2156,7 @@ static void llvm_emit_const_expr(GenContext *c, BEValue *be_value, Expr *expr)
return;
case TYPE_STRING:
{
LLVMValueRef global_name = LLVMAddGlobal(c->module, LLVMArrayType(llvm_get_type(c, type_char), expr->const_expr.string.len + 1), "");
LLVMValueRef global_name = LLVMAddGlobal(c->module, LLVMArrayType(llvm_get_type(c, type_ichar), expr->const_expr.string.len + 1), "");
LLVMSetLinkage(global_name, LLVMInternalLinkage);
LLVMSetGlobalConstant(global_name, 1);
LLVMSetInitializer(global_name, LLVMConstStringInContext(c->context,
@@ -2529,7 +2498,7 @@ void llvm_emit_call_expr(GenContext *context, BEValue *be_value, Expr *expr)
if (signature->failable && signature->ret_abi_info)
{
Type *actual_return_type = type_lowering(signature->rtype->type);
return_param = llvm_emit_alloca(context, llvm_get_type(context, actual_return_type), 0, "retparam");
return_param = llvm_emit_alloca_aligned(context, actual_return_type, "retparam");
llvm_value_set(be_value, return_param, type_get_ptr(actual_return_type));
llvm_emit_parameter(context, &values, signature->ret_abi_info, be_value, be_value->type);
}

View File

@@ -240,6 +240,7 @@ void llvm_emit_memcpy_to_decl(GenContext *c, Decl *decl, LLVMValueRef source, un
void llvm_emit_stmt(GenContext *c, Ast *ast);
static inline LLVMValueRef llvm_emit_store(GenContext *context, Decl *decl, LLVMValueRef value);
void llvm_emit_panic_on_true(GenContext *c, LLVMValueRef value, const char *panic_name);
void llvm_emit_ptr_from_array(GenContext *c, BEValue *value);
void llvm_emit_return_abi(GenContext *c, BEValue *return_value, BEValue *failable);
void llvm_emit_return_implicit(GenContext *c);
LLVMValueRef llvm_emit_struct_gep_raw(GenContext *context, LLVMValueRef ptr, LLVMTypeRef struct_type, unsigned index, unsigned struct_alignment, unsigned offset, unsigned *alignment);

View File

@@ -400,7 +400,7 @@ void gencontext_emit_for_stmt(GenContext *c, Ast *ast)
llvm_emit_block(c, exit_block);
}
void llvm_emit_foreach_stmt(GenContext *c, Ast *ast)
static void llvm_emit_foreach_stmt(GenContext *c, Ast *ast)
{
// First we generate an exit.
LLVMBasicBlockRef exit_block = llvm_basic_block_new(c, "foreach.exit");
@@ -421,6 +421,8 @@ void llvm_emit_foreach_stmt(GenContext *c, Ast *ast)
// We pop the error here.
POP_ERROR();
llvm_emit_ptr_from_array(c, &enum_value);
// Create the index and optionally the index var
LLVMTypeRef real_index_type = llvm_get_type(c, type_isize);
BEValue index_var = {};
@@ -1178,6 +1180,8 @@ void llvm_emit_stmt(GenContext *c, Ast *ast)
}
switch (ast->ast_kind)
{
case AST_DOCS:
case AST_DOC_DIRECTIVE:
case AST_POISONED:
case AST_DEFINE_STMT:
UNREACHABLE

View File

@@ -120,7 +120,7 @@ static inline LLVMTypeRef llvm_type_from_ptr(GenContext *context, Type *type)
}
if (type == type_voidptr)
{
return type->backend_type = llvm_get_ptr_type(context, type_byte);
return type->backend_type = llvm_get_ptr_type(context, type_char);
}
return type->backend_type = LLVMPointerType(llvm_get_type(context, type->pointer), /** TODO **/0);
}
@@ -401,12 +401,12 @@ LLVMTypeRef llvm_get_coerce_type(GenContext *c, ABIArgInfo *arg_info)
// Add padding if needed.
if (arg_info->coerce_expand.offset_lo)
{
elements[element_index++] = LLVMArrayType(llvm_get_type(c, type_byte), arg_info->coerce_expand.offset_lo);
elements[element_index++] = LLVMArrayType(llvm_get_type(c, type_char), arg_info->coerce_expand.offset_lo);
}
elements[element_index++] = llvm_abi_type(c, arg_info->coerce_expand.lo);
if (arg_info->coerce_expand.padding_hi)
{
elements[element_index++] = LLVMArrayType(llvm_get_type(c, type_byte), arg_info->coerce_expand.padding_hi);
elements[element_index++] = LLVMArrayType(llvm_get_type(c, type_char), arg_info->coerce_expand.padding_hi);
}
if (arg_info->coerce_expand.hi)
{

View File

@@ -63,8 +63,7 @@ Path *path_find_parent_path(Context *context, Path *path)
Path *parent_path = path_create_from_string(context, path->module, last_scope_chars - path->module - 1, INVALID_RANGE);
// Should never fail.
assert(parent_path);
assert(parent_path && "Didn't we pass in a TOKEN_IDENT? That's the only reason this could fail.");
return parent_path;
}

View File

@@ -946,8 +946,8 @@ static Expr* parse_expr_block(Context *context, Expr *left)
ParseRule rules[TOKEN_EOF + 1] = {
[TOKEN_BOOL] = { parse_type_identifier, NULL, PREC_NONE },
[TOKEN_BYTE] = { parse_type_identifier, NULL, PREC_NONE },
[TOKEN_CHAR] = { parse_type_identifier, NULL, PREC_NONE },
[TOKEN_ICHAR] = { parse_type_identifier, NULL, PREC_NONE },
[TOKEN_SHORT] = { parse_type_identifier, NULL, PREC_NONE },
[TOKEN_USHORT] = { parse_type_identifier, NULL, PREC_NONE },
[TOKEN_INT] = { parse_type_identifier, NULL, PREC_NONE },

View File

@@ -491,9 +491,6 @@ static inline TypeInfo *parse_base_type(Context *context)
case TOKEN_BOOL:
type_found = type_bool;
break;
case TOKEN_BYTE:
type_found = type_byte;
break;
case TOKEN_CHAR:
type_found = type_char;
break;
@@ -503,6 +500,9 @@ static inline TypeInfo *parse_base_type(Context *context)
case TOKEN_FLOAT:
type_found = type_float;
break;
case TOKEN_ICHAR:
type_found = type_ichar;
break;
case TOKEN_INT:
type_found = type_int;
break;
@@ -868,9 +868,9 @@ bool parse_next_is_decl(Context *context)
switch (context->tok.type)
{
case TOKEN_VOID:
case TOKEN_BYTE:
case TOKEN_BOOL:
case TOKEN_CHAR:
case TOKEN_BOOL:
case TOKEN_ICHAR:
case TOKEN_DOUBLE:
case TOKEN_FLOAT:
case TOKEN_INT:
@@ -912,11 +912,11 @@ bool parse_next_is_case_type(Context *context)
switch (context->tok.type)
{
case TOKEN_VOID:
case TOKEN_BYTE:
case TOKEN_BOOL:
case TOKEN_CHAR:
case TOKEN_DOUBLE:
case TOKEN_FLOAT:
case TOKEN_ICHAR:
case TOKEN_INT:
case TOKEN_ISIZE:
case TOKEN_LONG:
@@ -1849,6 +1849,125 @@ void parse_imports(Context *context)
}
}
static inline TokenId parse_doc_opt_rest_of_line(Context *context)
{
return try_consume(context, TOKEN_DOCS_LINE) ? context->prev_tok : INVALID_TOKEN_ID;
}
static inline bool parse_doc_param(Context *context, Ast *docs)
{
switch (context->tok.type)
{
case TOKEN_IDENT:
case TOKEN_CT_IDENT:
case TOKEN_TYPE_IDENT:
case TOKEN_CT_CONST_IDENT:
case TOKEN_HASH_CONST_IDENT:
case TOKEN_HASH_TYPE_IDENT:
case TOKEN_CT_TYPE_IDENT:
case TOKEN_CONST_IDENT:
case TOKEN_HASH_IDENT:
break;
default:
SEMA_TOKEN_ERROR(context->tok, "Expected a parameter name here.");
return false;
}
docs->doc_directive.kind = DOC_DIRECTIVE_PARAM;
docs->doc_directive.param.param = context->tok.id;
advance(context);
docs->doc_directive.param.rest_of_line = parse_doc_opt_rest_of_line(context);
return true;
}
static inline bool parse_doc_errors(Context *context, Ast *docs)
{
TODO
while (1)
{
if (context->tok.type != TOKEN_TYPE_IDENT)
{
SEMA_TOKEN_ERROR(context->tok, "Expected an error type here.");
}
}
switch (context->tok.type)
{
case TOKEN_TYPE_IDENT:
break;
default:
return false;
}
docs->doc_directive.kind = DOC_DIRECTIVE_PARAM;
docs->doc_directive.param.param = context->tok.id;
advance(context);
docs->doc_directive.param.rest_of_line = parse_doc_opt_rest_of_line(context);
return true;
}
static inline bool parse_doc_contract(Context *context, Ast *docs)
{
docs->doc_directive.contract.decl_exprs = TRY_EXPR_OR(parse_decl_expr_list(context), false);
if (try_consume(context, TOKEN_COLON))
{
docs->doc_directive.contract.comment = TRY_EXPR_OR(parse_expr(context), false);
}
return true;
}
static bool parse_docs(Context *context, Ast **docs)
{
*docs = NULL;
if (!try_consume(context, TOKEN_DOCS_START)) return true;
Ast *ast = new_ast(AST_DOCS, (SourceSpan) { .loc = context->prev_tok, .end_loc = context->prev_tok });
while (!try_consume(context, TOKEN_DOCS_END))
{
// Spin past the lines and line ends
if (try_consume(context, TOKEN_DOCS_EOL)) continue;
if (try_consume(context, TOKEN_DOCS_LINE)) continue;
CONSUME_OR(TOKEN_DOCS_DIRECTIVE, false);
CONSUME_OR(TOKEN_IDENT, false);
const char *directive = TOKSTR(context->prev_tok);
SourceSpan span = { context->prev_tok, context->prev_tok };
Ast *doc_ast = new_ast(AST_DOC_DIRECTIVE, span);
if (directive == kw_param)
{
if (!parse_doc_param(context, doc_ast)) return false;
goto LINE_END;
}
if (directive == kw_pure)
{
vec_add(ast->directives, doc_ast);
doc_ast->doc_directive.kind = DOC_DIRECTIVE_PURE;
doc_ast->doc_directive.pure.rest_of_line = parse_doc_opt_rest_of_line(context);
goto LINE_END;
}
if (directive == kw_ensure)
{
doc_ast->doc_directive.kind = DOC_DIRECTIVE_ENSURE;
if (!parse_doc_contract(context, ast)) return false;
goto LINE_END;
}
if (directive == kw_require)
{
doc_ast->doc_directive.kind = DOC_DIRECTIVE_REQUIRE;
if (!parse_doc_contract(context, ast)) return false;
goto LINE_END;
}
if (directive == kw_errors)
{
if (!parse_doc_errors(context, ast)) return false;
goto LINE_END;
}
doc_ast->doc_directive.kind = DOC_DIRECTIVE_UNKNOWN;
doc_ast->doc_directive.generic.directive_name = directive;
doc_ast->doc_directive.generic.rest_of_line = parse_doc_opt_rest_of_line(context);
LINE_END:
if (try_consume(context, TOKEN_DOCS_EOL)) continue;
EXPECT_OR(TOKEN_DOCS_END, false);
}
return true;
}
/**
* top_level_statement ::= visibility? top_level
@@ -1871,6 +1990,8 @@ void parse_imports(Context *context)
*/
Decl *parse_top_level_statement(Context *context)
{
Ast *docs = NULL;
if (!parse_docs(context, &docs)) return poisoned_decl;
Visibility visibility = VISIBLE_MODULE;
switch (context->tok.type)
{
@@ -1889,53 +2010,95 @@ Decl *parse_top_level_statement(Context *context)
break;
}
Decl *decl;
switch (context->tok.type)
{
case TOKEN_DOCS_START:
if (context->docs_start.index == INVALID_TOKEN_ID.index)
{
SEMA_TOKEN_ERROR(context->tok, "Did not expect doc comments after visibility.");
return poisoned_decl;
}
SEMA_TOKEN_ERROR(context->tok, "There are more than one doc comment in a row, that is not allowed.");
return poisoned_decl;
case TOKEN_DEFINE:
return parse_define(context, visibility);
decl = TRY_DECL_OR(parse_define(context, visibility), poisoned_decl);
break;
case TOKEN_ATTRIBUTE:
return parse_attribute_declaration(context, visibility);
decl = TRY_DECL_OR(parse_attribute_declaration(context, visibility), poisoned_decl);
break;
case TOKEN_FUNC:
return parse_func_definition(context, visibility, false);
decl = TRY_DECL_OR(parse_func_definition(context, visibility, false), poisoned_decl);
break;
case TOKEN_CT_ASSERT:
if (!check_no_visibility_before(context, visibility)) return poisoned_decl;
{
Ast *ast = TRY_AST_OR(parse_ct_assert_stmt(context), false);
vec_add(context->ct_asserts, ast);
if (docs)
{
SEMA_ERROR(docs, "Unexpected doc comment before $assert, did you mean to use a regular comment?");
return poisoned_decl;
}
return NULL;
}
case TOKEN_CT_IF:
if (!check_no_visibility_before(context, visibility)) return poisoned_decl;
return parse_ct_if_top_level(context);
decl = TRY_DECL_OR(parse_ct_if_top_level(context), poisoned_decl);
if (docs)
{
SEMA_ERROR(docs, "Unexpected doc comment before $if, did you mean to use a regular comment?");
return poisoned_decl;
}
break;
case TOKEN_CT_SWITCH:
if (!check_no_visibility_before(context, visibility)) return poisoned_decl;
return parse_ct_switch_top_level(context);
decl = TRY_DECL_OR(parse_ct_switch_top_level(context), poisoned_decl);
if (docs)
{
SEMA_ERROR(docs, "Unexpected doc comment before $switch, did you mean to use a regular comment?");
return poisoned_decl;
}
break;
case TOKEN_CONST:
return parse_top_level_const_declaration(context, visibility);
decl = TRY_DECL_OR(parse_top_level_const_declaration(context, visibility), poisoned_decl);
break;
case TOKEN_STRUCT:
case TOKEN_UNION:
return parse_struct_declaration(context, visibility);
decl = TRY_DECL_OR(parse_struct_declaration(context, visibility), poisoned_decl);
break;
case TOKEN_GENERIC:
return parse_generics_declaration(context, visibility);
decl = TRY_DECL_OR(parse_generics_declaration(context, visibility), poisoned_decl);
break;
case TOKEN_MACRO:
return parse_macro_declaration(context, visibility);
decl = TRY_DECL_OR(parse_macro_declaration(context, visibility), poisoned_decl);
break;
case TOKEN_ENUM:
return parse_enum_declaration(context, visibility);
decl = TRY_DECL_OR(parse_enum_declaration(context, visibility), poisoned_decl);
break;
case TOKEN_ERR:
return parse_error_declaration(context, visibility);
decl = TRY_DECL_OR(parse_error_declaration(context, visibility), poisoned_decl);
break;
case TOKEN_TYPEDEF:
return parse_typedef_declaration(context, visibility);
decl = TRY_DECL_OR(parse_typedef_declaration(context, visibility), poisoned_decl);
break;
case TOKEN_CT_TYPE_IDENT:
case TOKEN_TYPE_IDENT:
// All of these start type
return parse_global_declaration(context, visibility);
decl = TRY_DECL_OR(parse_global_declaration(context, visibility), poisoned_decl);
break;
case TOKEN_IDENT:
if (!check_no_visibility_before(context, visibility)) return poisoned_decl;
return parse_incremental_array(context);
decl = TRY_DECL_OR(parse_incremental_array(context), poisoned_decl);
if (docs)
{
SEMA_ERROR(docs,
"Unexpected doc comment before incremental array, did you mean to use a regular comment?");
return poisoned_decl;
}
break;
case TOKEN_EOF:
assert(visibility != VISIBLE_MODULE);
TODO
//sema_error_at(context->token->span.loc - 1, "Expected a top level declaration'.");
SEMA_TOKID_ERROR(context->prev_tok, "Expected a top level declaration");
return poisoned_decl;
case TOKEN_CT_CONST_IDENT:
if (context->next_tok.type == TOKEN_EQ)
@@ -1950,11 +2113,14 @@ Decl *parse_top_level_statement(Context *context)
return poisoned_decl;
default:
// We could have included all fundamental types above, but do it here instead.
if (token_is_type(context->tok.type))
if (!token_is_type(context->tok.type))
{
return parse_global_declaration(context, visibility);
SEMA_TOKEN_ERROR(context->tok, "Expected a top level declaration here.");
return poisoned_decl;
}
SEMA_TOKEN_ERROR(context->tok, "Expected a top level declaration here.");
return poisoned_decl;
decl = TRY_DECL_OR(parse_global_declaration(context, visibility), poisoned_decl);
break;
}
decl->docs = docs;
return decl;
}

View File

@@ -953,6 +953,7 @@ Ast *parse_stmt(Context *context)
{
case TOKEN_ASM_STRING:
case TOKEN_ASM_CONSTRAINT:
case TOKEN_DOCS_DIRECTIVE:
UNREACHABLE
case TOKEN_LBRACE:
return parse_compound_stmt(context);
@@ -962,11 +963,11 @@ Ast *parse_stmt(Context *context)
advance(context);
return poisoned_ast;
case TOKEN_VOID:
case TOKEN_BYTE:
case TOKEN_BOOL:
case TOKEN_CHAR:
case TOKEN_BOOL:
case TOKEN_DOUBLE:
case TOKEN_FLOAT:
case TOKEN_ICHAR:
case TOKEN_INT:
case TOKEN_ISIZE:
case TOKEN_LONG:

View File

@@ -6,8 +6,6 @@
#include "parser_internal.h"
#pragma mark --- Parser base methods
/**
@@ -22,7 +20,7 @@ inline void advance(Context *context)
context->next_lead_comment = NULL;
context->prev_tok = context->tok.id;
context->tok = context->next_tok;
while(1)
while (1)
{
if (context->tok.type == TOKEN_EOF)
{
@@ -54,7 +52,8 @@ inline void advance(Context *context)
{
if (context->trailing_comment)
{
SEMA_TOKEN_ERROR(context->next_tok, "You have multiple trailing doc-style comments, should the second one go on the next line?");
SEMA_TOKEN_ERROR(context->next_tok,
"You have multiple trailing doc-style comments, should the second one go on the next line?");
}
else
{
@@ -65,7 +64,8 @@ inline void advance(Context *context)
{
if (context->lead_comment)
{
SEMA_TOKEN_ERROR(context->next_tok, "You have multiple doc-style comments in a row, are all of them really meant to document the code that follows?");
SEMA_TOKEN_ERROR(context->next_tok,
"You have multiple doc-style comments in a row, are all of them really meant to document the code that follows?");
}
else
{
@@ -104,7 +104,6 @@ bool consume(Context *context, TokenType type, const char *message, ...)
}
#pragma mark --- Extern functions
/**
@@ -114,7 +113,8 @@ bool consume(Context *context, TokenType type, const char *message, ...)
static inline void parse_translation_unit(Context *context)
{
// Prime everything
advance(context); advance(context);
advance(context);
advance(context);
if (!parse_module(context)) return;
parse_imports(context);
while (!TOKEN_IS(TOKEN_EOF))

View File

@@ -62,6 +62,11 @@ bool consume(Context *context, TokenType type, const char *message, ...);
bool consume_const_name(Context *context, const char* type);
Expr *parse_precedence_with_left_side(Context *context, Expr *left_side, Precedence precedence);
static inline bool tok_is(Context *context, TokenType type)
{
return context->tok.type == type;
}
static inline bool expect(Context *context, TokenType token_type)
{
if (token_type == context->tok.type) return true;

View File

@@ -175,7 +175,7 @@ bool strpt(Context *context, Expr* left, Type *from_canonical, Type *canonical,
bool stpt(Context *context, Expr* left, Type *from, Type *canonical, Type *type, CastType cast_type)
{
if (canonical->pointer != type_char && canonical->pointer != type_byte)
if (canonical->pointer != type_ichar && canonical->pointer != type_char)
{
return sema_type_mismatch(context, left, type, cast_type);
}

View File

@@ -268,54 +268,19 @@ static bool expr_cast_to_index(Context *context, Expr *index)
return cast_implicit(context, index, type_isize);
case TYPE_I8:
case TYPE_I16:
return cast_implicit(context, index, type_int);
case TYPE_U8:
case TYPE_U16:
return cast_implicit(context, index, type_uint);
case TYPE_I32:
case TYPE_U32:
case TYPE_I64:
case TYPE_U64:
// This is fine.
return true;
case TYPE_U128:
SEMA_ERROR(index, "You need to explicitly cast this to a uint or ulong.");
return false;
case TYPE_I128:
SEMA_ERROR(index, "You need to explicitly cast this to a int or long.");
return false;
default:
SEMA_ERROR(index, "Cannot implicitly convert '%s' to an index.", type_to_error_string(index->type));
return false;
}
}
static bool expr_cast_to_index_size(Context *context, Expr *index)
{
switch (index->type->canonical->type_kind)
{
case TYPE_IXX:
if (!bigint_fits_in_bits(&index->const_expr.i, 64, true))
{
SEMA_ERROR(index, "The index is out of range, it must fit in a signed 64 bit integer.");
return false;
}
return cast_implicit(context, index, type_isize);
case TYPE_U8:
case TYPE_U16:
case TYPE_U32:
case TYPE_U64:
return cast_implicit(context, index, type_usize);
case TYPE_I8:
case TYPE_I16:
case TYPE_I32:
case TYPE_I64:
return cast_implicit(context, index, type_isize);
case TYPE_U128:
SEMA_ERROR(index, "You need to explicitly cast this to a usize.");
SEMA_ERROR(index, "You need to explicitly cast this to a uint or ulong.");
return false;
case TYPE_I128:
SEMA_ERROR(index, "You need to explicitly cast this to a size.");
SEMA_ERROR(index, "You need to explicitly cast this to a int or long.");
return false;
default:
SEMA_ERROR(index, "Cannot implicitly convert '%s' to an index.", type_to_error_string(index->type));
@@ -1425,7 +1390,7 @@ static inline bool sema_expr_analyse_subscript(Context *context, Expr *expr)
SEMA_ERROR(subscripted, "Cannot index '%s'.", type_to_error_string(type));
return false;
}
if (!sema_analyse_expr(context, type_int, index)) return false;
if (!sema_analyse_expr(context, NULL, index)) return false;
expr->constant = index->constant & subscripted->constant;
expr->pure = index->pure & subscripted->pure;
@@ -1472,8 +1437,8 @@ static inline bool sema_expr_analyse_slice(Context *context, Expr *expr)
expr->constant &= !end || end->constant;
// Fix index sizes
if (!expr_cast_to_index_size(context, start)) return false;
if (end && !expr_cast_to_index_size(context, end)) return false;
if (!expr_cast_to_index(context, start)) return false;
if (end && !expr_cast_to_index(context, end)) return false;
// Check range
if (type->type_kind == TYPE_POINTER)
@@ -4815,6 +4780,24 @@ static Ast *ast_copy_from_macro(Context *context, Ast *source)
Ast *ast = ast_shallow_copy(source);
switch (source->ast_kind)
{
case AST_DOCS:
ast->directives = ast_copy_list_from_macro(context, ast->directives);
return ast;
case AST_DOC_DIRECTIVE:
switch (ast->doc_directive.kind)
{
case DOC_DIRECTIVE_REQUIRE:
case DOC_DIRECTIVE_ENSURE:
MACRO_COPY_EXPR(ast->doc_directive.contract.decl_exprs);
MACRO_COPY_EXPR(ast->doc_directive.contract.comment);
break;
case DOC_DIRECTIVE_PARAM:
case DOC_DIRECTIVE_ERRORS:
case DOC_DIRECTIVE_PURE:
case DOC_DIRECTIVE_UNKNOWN:
break;
}
return ast;
case AST_POISONED:
return ast;
case AST_ASM_STMT:

View File

@@ -1688,6 +1688,8 @@ static inline bool sema_analyse_statement_inner(Context *context, Ast *statement
{
case AST_POISONED:
case AST_SCOPED_STMT:
case AST_DOCS:
case AST_DOC_DIRECTIVE:
UNREACHABLE
case AST_ASM_STMT:
return sema_analyse_asm_stmt(context, statement);

View File

@@ -39,6 +39,8 @@ const char *attribute_list[NUMBER_OF_ATTRIBUTES];
const char *kw_align;
const char *kw_alignof;
const char *kw_distinct;
const char *kw_ensure;
const char *kw_errors;
const char *kw_inline;
const char *kw_kindof;
const char *kw_len;
@@ -46,13 +48,18 @@ const char *kw_main;
const char *kw_nameof;
const char *kw_offsetof;
const char *kw_ordinal;
const char *kw_param;
const char *kw_pure;
const char *kw_qnameof;
const char *kw_reqparse;
const char *kw_require;
const char *kw_sizeof;
const char *kw___ceil;
const char *kw___round;
const char *kw___sqrt;
const char *kw___trunc;
void symtab_init(uint32_t capacity)
{
assert (is_power_of_two(capacity) && "Must be a power of two");
@@ -88,6 +95,8 @@ void symtab_init(uint32_t capacity)
kw_align = KW_DEF("align");
kw_alignof = KW_DEF("alignof");
kw_distinct = KW_DEF("distinct");
kw_ensure = KW_DEF("ensure");
kw_errors = KW_DEF("errors");
kw_inline = KW_DEF("inline");
kw_kindof = KW_DEF("kindof");
kw_len = KW_DEF("len");
@@ -95,7 +104,10 @@ void symtab_init(uint32_t capacity)
kw_nameof = KW_DEF("nameof");
kw_offsetof = KW_DEF("offsetof");
kw_ordinal = KW_DEF("ordinal");
kw_param = KW_DEF("param");
kw_pure = KW_DEF("pure");
kw_qnameof = KW_DEF("qnameof");
kw_require = KW_DEF("required");
kw_sizeof = KW_DEF("sizeof");
kw___ceil = KW_DEF("__ceil");
kw___round = KW_DEF("__round");

View File

@@ -294,8 +294,8 @@ const char *token_type_to_string(TokenType type)
return "short";
case TOKEN_USHORT:
return "ushort";
case TOKEN_BYTE:
return "byte";
case TOKEN_ICHAR:
return "ichar";
case TOKEN_CHAR:
return "char";
case TOKEN_ISIZE:
@@ -329,6 +329,8 @@ const char *token_type_to_string(TokenType type)
return "/**";
case TOKEN_DOCS_END:
return "*/";
case TOKEN_DOCS_DIRECTIVE:
return "directive";
case TOKEN_DOCS_LINE:
return "DOCS_LINE";

View File

@@ -22,13 +22,13 @@ Type *type_double = &t_f64;
Type *type_quad = &t_f128;
Type *type_typeid = &t_typeid;
Type *type_typeinfo = &t_typeinfo;
Type *type_char = &t_i8;
Type *type_ichar = &t_i8;
Type *type_short = &t_i16;
Type *type_int = &t_i32;
Type *type_long = &t_i64;
Type *type_i128 = &t_i128;
Type *type_isize = &t_isz;
Type *type_byte = &t_u8;
Type *type_char = &t_u8;
Type *type_ushort = &t_u16;
Type *type_uint = &t_u32;
Type *type_ulong = &t_u64;
@@ -60,7 +60,7 @@ Type *type_int_signed_by_bitsize(unsigned bytesize)
{
switch (bytesize)
{
case 8: return type_char;
case 8: return type_ichar;
case 16: return type_short;
case 32: return type_int;
case 64: return type_long;
@@ -72,7 +72,7 @@ Type *type_int_unsigned_by_bitsize(unsigned bytesize)
{
switch (bytesize)
{
case 8: return type_byte;
case 8: return type_char;
case 16: return type_ushort;
case 32: return type_uint;
case 64: return type_ulong;
@@ -535,7 +535,7 @@ bool type_is_homogenous_aggregate(Type *type, Type **base, unsigned *elements)
break;
case TYPE_BOOL:
// Lower bool to unsigned char
type = type_byte;
type = type_char;
break;
case ALL_SIGNED_INTS:
// Lower signed to unsigned
@@ -887,13 +887,13 @@ type_create(#_name, &_shortname, _type, _bits, target->align_ ## _align, target-
DEF_TYPE(float, t_f32, TYPE_F32, 32, float);
DEF_TYPE(double, t_f64, TYPE_F64, 64, double);
DEF_TYPE(char, t_i8, TYPE_I8, 8, byte);
DEF_TYPE(ichar, t_i8, TYPE_I8, 8, byte);
DEF_TYPE(short, t_i16, TYPE_I16, 16, short);
DEF_TYPE(int, t_i32, TYPE_I32, 32, int);
DEF_TYPE(long, t_i64, TYPE_I64, 64, long);
DEF_TYPE(i128, t_i128, TYPE_I128, 128, i128);
DEF_TYPE(byte, t_u8, TYPE_U8, 8, byte);
DEF_TYPE(char, t_u8, TYPE_U8, 8, byte);
DEF_TYPE(ushort, t_u16, TYPE_U16, 16, short);
DEF_TYPE(uint, t_u32, TYPE_U32, 32, int);
DEF_TYPE(ulong, t_u64, TYPE_U64, 64, long);

View File

@@ -43,7 +43,7 @@ TargetInfo target_info_new()
.float_128 = false,
.float_16 = false,
.align_pointer = 8,
.align_byte = 8,
.align_char = 8,
.align_c_int = 32,
.align_c_long = 32,
.align_c_long_long = 64,

View File

@@ -53,7 +53,7 @@ typedef struct
bool float_128;
bool float_16;
unsigned align_pointer;
unsigned align_byte;
unsigned align_char;
unsigned align_short;
unsigned align_int;
unsigned align_long;

View File

@@ -23,6 +23,7 @@ entry:
%1 = bitcast [30 x double]* %student_t to i8*
call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 16 %1, i8* align 16 bitcast ([30 x double]* @0 to i8*), i32 240, i1 false)
%2 = load i32, i32* %x, align 4
%arridx = getelementptr inbounds [30 x double], [30 x double]* %student_t, i32 0, i32 %2
%uiuiext = zext i32 %2 to i64
%arridx = getelementptr inbounds [30 x double], [30 x double]* %student_t, i64 0, i64 %uiuiext
%3 = load double, double* %arridx, align 8
ret double %3

View File

@@ -3,7 +3,7 @@ module test;
struct Connection
{
long to;
long to;
char* type;
long length;
}

View File

@@ -11,7 +11,7 @@ bitstruct BitField
}
bitstruct BitField2 : byte
bitstruct BitField2 : char
{
int a : 3;
int b : 3;
@@ -30,7 +30,7 @@ struct Packet
int packet_id;
}
bitstruct BitField3 : byte[3]
bitstruct BitField3 : char[3]
{
int a : 3;
int b : 6;
@@ -38,7 +38,7 @@ bitstruct BitField3 : byte[3]
int d : 5;
}
bitstruct BitField3 : byte[3] @aligned
bitstruct BitField3 : char[3] @aligned
{
int a : 3;
int b : 5;

View File

@@ -1,5 +1,5 @@
const byte AA = ~0;
const byte BB = 200 ;
const char AA = ~0;
const char BB = 200 ;
const uint CC = ~0;
const uint DD = FOO;
@@ -7,7 +7,7 @@ const FOO = ~0;
uint x = AA;
uint z = CC;
byte w = FOO;
char w = FOO;
ushort v = FOO;
uint z2 = DD;

View File

@@ -7,8 +7,8 @@ error TheError
error TheError2
{
byte a;
byte b;
char a;
char b;
}
error TheError3

View File

@@ -1,5 +1,5 @@
error TooBig // #error: Error type may not exceed pointer
{
usize a;
char b;
ichar b;
}

View File

@@ -39,11 +39,11 @@ func void testDiv(int a, int b)
func void testAssignment()
{
char x = -3 - 5;
char c = -128;
ichar x = -3 - 5;
ichar c = -128;
}
func byte test22()
func char test22()
{
return 100;
}

View File

@@ -88,36 +88,36 @@ func void test16()
func void test17()
{
byte a = 100 + 300; // #error: '300' does not fit in type 'byte'
char a = 100 + 300; // #error: '300' does not fit in type 'char'
}
func void test18()
{
byte b = 100 + 156; // #error: Cannot fit '256' into type 'byte'
char b = 100 + 156; // #error: Cannot fit '256' into type 'char'
}
func void test19()
{
char b = (-40) - 126; // #error: Cannot fit '-166' into type 'char'
ichar b = (-40) - 126; // #error: Cannot fit '-166' into type 'ichar'
}
func void test20()
{
char d = ((-128 - 10) + 10) - 2; // #error: Cannot fit '-130' into type 'char'
char c = 100 * 100; // #error: Cannot fit '10000' into type 'char'
char e = (-138 + 30);
char f = -138 + 30; // #error: '-138' does not fit in type 'char'
char g = -(128);
check(128); // #error: '128' does not fit in type 'char'
ichar d = ((-128 - 10) + 10) - 2; // #error: Cannot fit '-130' into type 'ichar'
ichar c = 100 * 100; // #error: Cannot fit '10000' into type 'ichar'
ichar e = (-138 + 30);
ichar f = -138 + 30; // #error: '-138' does not fit in type 'ichar'
ichar g = -(128);
check(128); // #error: '128' does not fit in type 'ichar'
}
func void check(char x) {}
func void check(ichar x) {}
func byte test22()
func char test22()
{
return 300; // #error: '300' does not fit in type 'byte'
return 300; // #error: '300' does not fit in type 'char'
}

View File

@@ -1,19 +1,19 @@
func void test2(char a)
func void test2(ichar a)
{}
func void test1()
{
test2(100);
char c = 1;
ichar c = 1;
test2(c);
int a = 1;
test2(a); // #error: Cannot implicitly cast 'int' to 'char'.
test2(100 + a); // #error: Cannot implicitly cast 'int' to 'char'.
test2(a); // #error: Cannot implicitly cast 'int' to 'ichar'.
test2(100 + a); // #error: Cannot implicitly cast 'int' to 'ichar'.
const int X = 120;
test2(X); // #error: Cannot implicitly cast 'int' to 'char'.
test2(X); // #error: Cannot implicitly cast 'int' to 'ichar'.
test2(100 + 100); // #error: Cannot fit '200' into type 'char'.
test2(100 + 100); // #error: Cannot fit '200' into type 'ichar'.
}

View File

@@ -1,6 +1,6 @@
func void test1()
{
char a = cast(256 + 1 as char);
ichar a = cast(256 + 1 as ichar);
ushort b = cast(65536+1 as ushort);
char c = cast(65536+400 as ushort); // #error: Cannot implicitly cast 'ushort' to 'char'
ichar c = cast(65536+400 as ushort); // #error: Cannot implicitly cast 'ushort' to 'ichar'
}

View File

@@ -11,7 +11,7 @@ enum Enum : uint
A, B
}
enum EnumB : byte
enum EnumB : char
{
C, D
}
@@ -21,7 +21,7 @@ typedef func void(Enum) as Func;
func void test1(Enum e)
{
bool a = cast(e as bool);
byte b = cast(e as byte);
char b = cast(e as char);
uint c = cast(e as uint);
float d = cast(e as float);
uint* f = cast(e as uint*);

View File

@@ -4,8 +4,8 @@ public func int main(int argc, char** argv)
{
int a = 10;
int b = cast(20 as char);
int c = cast(a as char);
int b = cast(20 as ichar);
int c = cast(a as ichar);
return 0;
}

View File

@@ -10,7 +10,7 @@ enum Enum : uint
typedef func void(int) as Func;
typedef func bool(byte*) as FuncOther;
typedef func bool(char*) as FuncOther;
typedef func void(int) as FuncSame;
@@ -22,7 +22,7 @@ func void test1(Func arg)
func void test2(Func arg)
{
char b = cast(arg as char);
ichar b = cast(arg as ichar);
}
func void test3(Func arg)

View File

@@ -3,10 +3,10 @@ typedef int as Number32;
func void test1()
{
int a = cast(10 as char);
int b = cast(200 as char);
int a = cast(10 as ichar);
int b = cast(200 as ichar);
int c = cast(200 as int);
char d = cast(200 as int); // #error: Cannot implicitly cast 'int' to 'char'.
ichar d = cast(200 as int); // #error: Cannot implicitly cast 'int' to 'ichar'.
}
func void test2()

View File

@@ -1,12 +1,12 @@
func void test1(char* cp)
func void test1(ichar* cp)
{
int a = 10;
char* cp2 = cp - 10;
char* cp3 = cp + 10;
ichar* cp2 = cp - 10;
ichar* cp3 = cp + 10;
cp2 -= 10;
cp2 -= a;
cp3 += 10;
cp3 += a;
char* cp4 = cp - a;
ichar* cp4 = cp - a;
}

View File

@@ -1,6 +1,6 @@
struct Struct
{
byte* ptr;
char* ptr;
}
func void test1(Struct* s)

View File

@@ -2,7 +2,7 @@ module test;
func int foo1()
{
byte *pp = void;
char *pp = void;
uint w_cnt = void;
w_cnt += *pp;
@@ -17,7 +17,7 @@ func void foo2(int x)
test2(x, x ? 1.0 : 12.5, 1.0);
}
func int trys(char* s, int x)
func int trys(ichar* s, int x)
{
int asa = void;
double val = void;
@@ -34,7 +34,7 @@ func int trys(char* s, int x)
struct InternalFPF
{
byte type;
char type;
}
local func void setInternalFPFZero(InternalFPF* dest) @noinline

View File

@@ -7,7 +7,7 @@ func void test()
printf("%d\n", true);
printf("%d\n", 123);
printf("%f\n", 12.3);
char x1 = -123;
ichar x1 = -123;
bool b = false;
float z1 = 12.3;
printf("%d\n", b);

View File

@@ -8,7 +8,7 @@ struct MinInfo
struct UzGlobs
{
char answerbuf;
ichar answerbuf;
MinInfo[1] info;
MinInfo* pInfo;
}

View File

@@ -11,14 +11,14 @@ int[2] a2 = 30; // #error: Cannot implicitly cast 'compint' to 'int[2]'
// i8[] a; // @error{definition of variable with array type needs an explicit size or an initializer}
char ca = 0;
char cb = 1;
ichar ca = 0;
ichar cb = 1;
char cc = 127;
char cd = -128;
ichar cc = 127;
ichar cd = -128;
char ce = 128; // #error: '128' does not fit
char cf = -129; // #error: '-129' does not fit
ichar ce = 128; // #error: '128' does not fit
ichar cf = -129; // #error: '-129' does not fit
char cg = 70000; // #error: '70000' does not fit
char ch = -70000; // #error: '-70000' does not fit
ichar cg = 70000; // #error: '70000' does not fit
ichar ch = -70000; // #error: '-70000' does not fit

View File

@@ -8,11 +8,11 @@ public func void test1(int* x)
int d = x[-1];
}
public func void test2(byte* x)
public func void test2(char* x)
{
byte a = x[0];
byte b = *x;
byte c = x[1];
char a = x[0];
char b = *x;
char c = x[1];
}
public func void test3(long* x)
@@ -28,15 +28,15 @@ public func void test3(long* x)
%a = alloca i64, align 8
%b = alloca i64, align 8
%c = alloca i64, align 8
store i64* %0, i64** %x
store i64* %0, i64** %x, align 8
%1 = load i64*, i64** %x, align 8
%ptridx = getelementptr inbounds i64, i64* %1, i32 0
%ptridx = getelementptr inbounds i64, i64* %1, i64 0
%2 = load i64, i64* %ptridx, align 8
store i64 %2, i64* %a, align 8
%3 = load i64*, i64** %x, align 8
%4 = load i64, i64* %3, align 8
store i64 %4, i64* %b, align 8
%5 = load i64*, i64** %x, align 8
%ptridx1 = getelementptr inbounds i64, i64* %5, i32 1
%ptridx1 = getelementptr inbounds i64, i64* %5, i64 1
%6 = load i64, i64* %ptridx1, align 8
store i64 %6, i64* %c, align 8

View File

@@ -22,7 +22,7 @@ func void main()
{
printf("Value[%d]: %f\n", i, a);
}
foreach (byte i, double a : foo)
foreach (char i, double a : foo)
{
printf("Value2[%d]: %f\n", i, a);
}

View File

@@ -51,7 +51,7 @@ foreach.body:
br i1 %noerr2, label %after_check3, label %voiderr
after_check3:
%arridx = getelementptr inbounds [3 x i32], [3 x i32]* %x, i32 0, i32 0
%arridx = getelementptr inbounds [3 x i32], [3 x i32]* %x, i64 0, i64 0
store i32 1, i32* %arridx, align 4
br label %voiderr

View File

@@ -1,3 +1,3 @@
byte bar1 = '\xaf';
char bar2 = '\x0F';
char bar1 = '\xaf';
ichar bar2 = '\x0F';
ushort bar4 = '\u0FaF';

View File

@@ -44,13 +44,13 @@ public Foo4 foo4 = { 1, 2 };
struct Foo5
{
int bar @align(16);
char foo @align(16);
ichar foo @align(16);
}
$assert(Foo5.sizeof == 32);
public Foo5 foo5 = { 1, 2 };
func int test5(char x)
func int test5(ichar x)
{
Foo5 y = { .foo = x };
return y.foo + y.bar;

View File

@@ -16,9 +16,9 @@ func void test2()
func void test3()
{
char a = 1;
ichar a = 1;
int b = 2;
char c = a + b; // #error: Cannot implicitly cast 'int' to 'char'
ichar c = a + b; // #error: Cannot implicitly cast 'int' to 'ichar'
}
func void test4()
@@ -54,7 +54,7 @@ typedef int as Number;
func void test8()
{
Number a = 10;
char c = a; // #error: implicitly cast 'Number' (int) to 'char'
ichar c = a; // #error: implicitly cast 'Number' (int) to 'ichar'
}
@@ -82,33 +82,33 @@ enum Enum : int
func void test11()
{
int a = Enum.A;
char b = Enum.B; // #error: Cannot implicitly convert 'Enum' with underlying type of 'int' to 'char'
ichar b = Enum.B; // #error: Cannot implicitly convert 'Enum' with underlying type of 'int' to 'ichar'
}
func void test12()
{
float f = 3.14;
char a = f; // #error: cast 'float' to 'char'
ichar a = f; // #error: cast 'float' to 'ichar'
}
func void test13()
{
int a = 1;
char b = a; // #error: cast 'int' to 'char'
ichar b = a; // #error: cast 'int' to 'ichar'
}
func void test14()
{
byte a = 1;
char b = a; // #error: cast 'byte' to 'char'
char a = 1;
ichar b = a; // #error: cast 'char' to 'ichar'
}
func void test15()
{
float f = 3.14;
char c = 1;
char* a = &f; // #error: cast 'float*' to 'char*'
char* b = &c;
ichar c = 1;
ichar* a = &f; // #error: cast 'float*' to 'ichar*'
ichar* b = &c;
}
func void test16()
@@ -116,9 +116,9 @@ func void test16()
float f = 3.14;
int i = 1;
char c = 1 ? 'c' : 'd';
char d = 1 ? 'c' : i; // #error: cast 'int' to 'char'
char e = 1 ? i : 0; // #error: cast 'int' to 'char'
ichar c = 1 ? 'c' : 'd';
ichar d = 1 ? 'c' : i; // #error: cast 'int' to 'ichar'
ichar e = 1 ? i : 0; // #error: cast 'int' to 'ichar'
int g = 1 ? i : f; // #error: cast 'float' to 'int'
int a = f ? 1 : 0;
}
@@ -157,11 +157,11 @@ func void foo() {}
func void test22()
{
char a = foo(); // #error: cast 'void' to 'char'
ichar a = foo(); // #error: cast 'void' to 'ichar'
short b = foo(); // #error: cast 'void' to 'short'
int c = foo(); // #error: cast 'void' to 'int'
long d = foo(); // #error: cast 'void' to 'long'
byte e = foo(); // #error: cast 'void' to 'byte'
char e = foo(); // #error: cast 'void' to 'char'
ushort f = foo(); // #error: cast 'void' to 'ushort'
uint g = foo(); // #error: cast 'void' to 'uint'
ulong h = foo(); // #error: cast 'void' to 'ulong'

View File

@@ -6,7 +6,7 @@ enum Inf
C = 10000
}
enum Inf2 : byte
enum Inf2 : char
{
A,
B,