Files
c3c/lib/std/io/stream/bytebuffer.c3
Christoffer Lerno fc849c1440 0.6.0: init_new/init_temp removed. LinkedList API rewritten. List "pop" and "remove" function now return Optionals. RingBuffer API rewritten. Allocator interface changed. Deprecated Allocator, DString and mem functions removed. "identity" functions are now constants for Matrix and Complex numbers. @default implementations for interfaces removed. any* => any, same for interfaces. Emit local/private globals as "private" in LLVM, following C "static". Updated enum syntax. Add support [rgba] properties in vectors. Improved checks of aliased "void". Subarray -> slice. Fix of llvm codegen enum check. Improved alignment handling. Add --output-dir #1155. Removed List/Object append. GenericList renamed AnyList. Remove unused "unwrap". Fixes to cond. Optimize output in dead branches. Better checking of operator methods. Disallow any from implementing dynamic methods. Check for operator mismatch. Remove unnecessary bitfield. Remove numbering in --list* commands Old style enum declaration for params/type, but now the type is optional. Add note on #1086. Allow making distinct types out of "void", "typeid", "anyfault" and faults. Remove system linker build options. "Try" expressions must be simple expressions. Add optimized build to Mac tests. Register int. assert(false) only allowed in unused branches or in tests. Compile time failed asserts is a compile time error. Remove current_block_is_target. Bug when assigning an optional from an optional. Remove unused emit_zstring. Simplify phi code. Remove unnecessary unreachable blocks and remove unnecessary current_block NULL assignments. Proper handling of '.' and Win32 '//server' paths. Unify expression and macro blocks in the middle end. Add "no discard" to expression blocks with a return value. Detect "unsigned >= 0" as errors. Fix issue with distinct void as a member #1147. Improve callstack debug information #1184. Fix issue with absolute output-dir paths. Lambdas were not type checked thoroughly #1185. Fix compilation warning #1187. Request jump table using @jump for switches. Path normalization - fix possible null terminator out of bounds. Improved error messages on inlined macros.
2024-05-22 18:22:04 +02:00

148 lines
3.6 KiB
C

module std::io;
import std::math;
struct ByteBuffer (InStream, OutStream)
{
Allocator allocator;
usz max_read;
char[] bytes;
usz read_idx;
usz write_idx;
bool has_last;
}
/**
* ByteBuffer provides a streamable read/write buffer.
* max_read defines how many bytes might be kept before its internal buffer is shrinked.
* @require self.bytes.len == 0 "Buffer already initialized."
**/
fn ByteBuffer*! ByteBuffer.new_init(&self, usz max_read, usz initial_capacity = 16, Allocator allocator = allocator::heap())
{
*self = { .allocator = allocator, .max_read = max_read };
initial_capacity = max(initial_capacity, 16);
self.grow(initial_capacity)!;
return self;
}
fn ByteBuffer*! ByteBuffer.temp_init(&self, usz max_read, usz initial_capacity = 16)
{
return self.new_init(max_read, initial_capacity, allocator::temp());
}
/**
* @require buf.len > 0
* @require self.bytes.len == 0 "Buffer already initialized."
**/
fn ByteBuffer*! ByteBuffer.init_with_buffer(&self, char[] buf)
{
*self = { .max_read = buf.len, .bytes = buf };
return self;
}
fn void ByteBuffer.free(&self)
{
if (self.allocator) allocator::free(self.allocator, self.bytes);
*self = {};
}
fn usz! ByteBuffer.write(&self, char[] bytes) @dynamic
{
usz cap = self.bytes.len - self.write_idx;
if (cap < bytes.len) self.grow(bytes.len)!;
self.bytes[self.write_idx:bytes.len] = bytes[..];
self.write_idx += bytes.len;
return bytes.len;
}
fn void! ByteBuffer.write_byte(&self, char c) @dynamic
{
usz cap = self.bytes.len - self.write_idx;
if (cap == 0) self.grow(1)!;
self.bytes[self.write_idx] = c;
self.write_idx++;
}
fn usz! ByteBuffer.read(&self, char[] bytes) @dynamic
{
usz readable = self.write_idx - self.read_idx;
if (readable == 0)
{
self.has_last = false;
return IoError.EOF?;
}
usz n = min(readable, bytes.len);
bytes[:n] = self.bytes[self.read_idx:n];
self.read_idx += n;
self.has_last = n > 0;
self.shrink();
return n;
}
fn char! ByteBuffer.read_byte(&self) @dynamic
{
usz readable = self.write_idx - self.read_idx;
if (readable == 0)
{
self.has_last = false;
return IoError.EOF?;
}
char c = self.bytes[self.read_idx];
self.read_idx++;
self.has_last = true;
self.shrink();
return c;
}
/*
* Only the last byte of a successful read can be pushed back.
*/
fn void! ByteBuffer.pushback_byte(&self) @dynamic
{
if (!self.has_last) return IoError.EOF?;
assert(self.read_idx > 0);
self.read_idx--;
self.has_last = false;
}
fn usz! ByteBuffer.seek(&self, isz offset, Seek seek) @dynamic
{
switch (seek)
{
case SET:
if (offset < 0 || offset > self.write_idx) return IoError.INVALID_POSITION?;
self.read_idx = offset;
return offset;
case CURSOR:
if ((offset < 0 && self.read_idx < -offset) ||
(offset > 0 && self.read_idx + offset > self.write_idx)) return IoError.INVALID_POSITION?;
self.read_idx += offset;
case END:
if (offset < 0 || offset > self.write_idx) return IoError.INVALID_POSITION?;
self.read_idx = self.write_idx - offset;
}
return self.read_idx;
}
fn usz! ByteBuffer.available(&self) @inline @dynamic
{
return self.write_idx - self.read_idx;
}
fn void! ByteBuffer.grow(&self, usz n)
{
n = math::next_power_of_2(n);
char* p = allocator::realloc_aligned(self.allocator, self.bytes, n, .alignment = char.alignof)!;
self.bytes = p[:n];
}
macro ByteBuffer.shrink(&self)
{
if (self.read_idx >= self.max_read)
{
// Drop the read data besides the last byte (for pushback_byte).
usz readable = self.write_idx - self.read_idx;
self.bytes[:1 + readable] = self.bytes[self.read_idx - 1:1 + readable];
self.write_idx = 1 + readable;
self.read_idx = 1;
}
}