Updated indentation to C3 standard.

This commit is contained in:
Christoffer Lerno
2023-07-26 14:01:24 +02:00
parent a376d8e2bf
commit 499c82b089
83 changed files with 2357 additions and 2356 deletions

View File

@@ -11,11 +11,11 @@ fn void! NativeMutex.init(&mtx, MutexType type)
Pthread_mutexattr_t attr;
if (pthread_mutexattr_init(&attr)) return ThreadFault.INIT_FAILED?;
defer pthread_mutexattr_destroy(&attr);
if (type & thread::MUTEX_RECURSIVE)
{
if (pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE)) return ThreadFault.INIT_FAILED?;
}
if (pthread_mutex_init(mtx, &attr)) return ThreadFault.INIT_FAILED?;
if (type & thread::MUTEX_RECURSIVE)
{
if (pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE)) return ThreadFault.INIT_FAILED?;
}
if (pthread_mutex_init(mtx, &attr)) return ThreadFault.INIT_FAILED?;
}
fn void! NativeMutex.destroy(&mtx)
@@ -30,25 +30,25 @@ fn void! NativeMutex.lock(&mtx)
fn void! NativeMutex.lock_timeoutout(&mtx, ulong ms)
{
/* Try to acquire the lock and, if we fail, sleep for 5ms. */
Errno result;
while ((result = pthread_mutex_trylock(mtx)) == errno::EBUSY)
{
if (!ms) break;
ulong sleep = min(5, ms);
if (!libc::nanosleep(&& TimeSpec { .s = 0, .ns = (CLong)sleep * 1000_000 }, null)) return ThreadFault.LOCK_FAILED?;
ms -= sleep;
}
switch (result)
{
case errno::OK:
return;
case errno::EBUSY:
case errno::ETIMEDOUT:
return ThreadFault.LOCK_TIMEOUT?;
default:
return ThreadFault.LOCK_FAILED?;
}
/* Try to acquire the lock and, if we fail, sleep for 5ms. */
Errno result;
while ((result = pthread_mutex_trylock(mtx)) == errno::EBUSY)
{
if (!ms) break;
ulong sleep = min(5, ms);
if (!libc::nanosleep(&& TimeSpec { .s = 0, .ns = (CLong)sleep * 1000_000 }, null)) return ThreadFault.LOCK_FAILED?;
ms -= sleep;
}
switch (result)
{
case errno::OK:
return;
case errno::EBUSY:
case errno::ETIMEDOUT:
return ThreadFault.LOCK_TIMEOUT?;
default:
return ThreadFault.LOCK_FAILED?;
}
}
fn bool NativeMutex.try_lock(&mtx)
@@ -83,7 +83,7 @@ fn void! NativeConditionVariable.broadcast(&cond)
fn void! NativeConditionVariable.wait(&cond, NativeMutex* mtx)
{
if (pthread_cond_wait(cond, mtx)) return ThreadFault.WAIT_FAILED?;
if (pthread_cond_wait(cond, mtx)) return ThreadFault.WAIT_FAILED?;
}
fn void! NativeConditionVariable.wait_timeout(&cond, NativeMutex* mtx, ulong ms)
@@ -115,11 +115,11 @@ fn void! NativeThread.create(&thread, ThreadFn thread_fn, void* arg)
PosixThreadData *thread_data = malloc(PosixThreadData);
*thread_data = { .thread_fn = thread_fn, .arg = arg };
if (pthread_create(thread, null, &callback, thread_data) != 0)
{
*thread = null;
free(thread_data);
return ThreadFault.INIT_FAILED?;
}
{
*thread = null;
free(thread_data);
return ThreadFault.INIT_FAILED?;
}
}
fn void! NativeThread.detach(thread)

View File

@@ -4,14 +4,14 @@ import std::os::win32;
fn void! NativeMutex.init(&mtx, MutexType type)
{
mtx.already_locked = false;
mtx.recursive = (bool)(type & thread::MUTEX_RECURSIVE);
mtx.timed = (bool)(type & thread::MUTEX_TIMED);
if (!mtx.timed)
{
win32::initializeCriticalSection(&(mtx.critical_section));
return;
}
if (!(mtx.handle = win32::createMutex(null, false, null))) return ThreadFault.INIT_FAILED?;
mtx.recursive = (bool)(type & thread::MUTEX_RECURSIVE);
mtx.timed = (bool)(type & thread::MUTEX_TIMED);
if (!mtx.timed)
{
win32::initializeCriticalSection(&(mtx.critical_section));
return;
}
if (!(mtx.handle = win32::createMutex(null, false, null))) return ThreadFault.INIT_FAILED?;
}
fn void! NativeMutex.destroy(&mtx)
@@ -21,27 +21,27 @@ fn void! NativeMutex.destroy(&mtx)
win32::deleteCriticalSection(&mtx.critical_section);
return;
}
if (!win32::closeHandle(mtx.handle)) return ThreadFault.DESTROY_FAILED?;
if (!win32::closeHandle(mtx.handle)) return ThreadFault.DESTROY_FAILED?;
}
fn void! NativeMutex.lock(&mtx)
{
if (!mtx.timed)
{
win32::enterCriticalSection(&mtx.critical_section);
}
else
{
switch (win32::waitForSingleObject(mtx.handle, win32::INFINITE))
{
case win32::WAIT_OBJECT_0:
break;
{
win32::enterCriticalSection(&mtx.critical_section);
}
else
{
switch (win32::waitForSingleObject(mtx.handle, win32::INFINITE))
{
case win32::WAIT_OBJECT_0:
break;
case win32::WAIT_ABANDONED:
default:
return ThreadFault.LOCK_FAILED?;
}
}
}
}
if (!mtx.recursive)
{
while (mtx.already_locked) win32::sleep(1);
@@ -97,10 +97,10 @@ fn void! NativeMutex.unlock(&mtx)
mtx.already_locked = false;
if (!mtx.timed)
{
win32::leaveCriticalSection(&mtx.critical_section);
return;
win32::leaveCriticalSection(&mtx.critical_section);
return;
}
if (!win32::releaseMutex(mtx.handle)) return ThreadFault.UNLOCK_FAILED?;
if (!win32::releaseMutex(mtx.handle)) return ThreadFault.UNLOCK_FAILED?;
}
const int CONDITION_EVENT_ONE = 0;
@@ -151,23 +151,23 @@ fn void! NativeConditionVariable.broadcast(&cond)
fn void! timedwait(NativeConditionVariable* cond, NativeMutex* mtx, uint timeout) @private
{
win32::enterCriticalSection(&cond.waiters_count_lock);
cond.waiters_count++;
cond.waiters_count++;
win32::leaveCriticalSection(&cond.waiters_count_lock);
mtx.unlock()!;
uint result = win32::waitForMultipleObjects(2, &cond.events, false, timeout);
switch (result)
{
case win32::WAIT_TIMEOUT:
mtx.lock()!;
return ThreadFault.WAIT_TIMEOUT?;
case win32::WAIT_FAILED:
mtx.lock()!;
return ThreadFault.WAIT_FAILED?;
default:
break;
}
uint result = win32::waitForMultipleObjects(2, &cond.events, false, timeout);
switch (result)
{
case win32::WAIT_TIMEOUT:
mtx.lock()!;
return ThreadFault.WAIT_TIMEOUT?;
case win32::WAIT_FAILED:
mtx.lock()!;
return ThreadFault.WAIT_FAILED?;
default:
break;
}
win32::enterCriticalSection(&cond.waiters_count_lock);
cond.waiters_count--;
@@ -220,30 +220,30 @@ fn void native_thread_yield()
fn void NativeOnceFlag.call_once(&flag, OnceFn func)
{
while (@volatile_load(flag.status) < 3)
{
switch (@volatile_load(flag.status))
{
case 0:
if (mem::compare_exchange_volatile(&flag.status, 1, 0, AtomicOrdering.SEQ_CONSISTENT, AtomicOrdering.SEQ_CONSISTENT) == 0)
{
win32::initializeCriticalSection(&flag.lock);
win32::enterCriticalSection(&flag.lock);
@volatile_store(flag.status, 2);
func();
@volatile_store(flag.status, 3);
win32::leaveCriticalSection(&flag.lock);
return;
}
break;
case 1:
break;
case 2:
win32::enterCriticalSection(&flag.lock);
win32::leaveCriticalSection(&flag.lock);
break;
}
}
while (@volatile_load(flag.status) < 3)
{
switch (@volatile_load(flag.status))
{
case 0:
if (mem::compare_exchange_volatile(&flag.status, 1, 0, AtomicOrdering.SEQ_CONSISTENT, AtomicOrdering.SEQ_CONSISTENT) == 0)
{
win32::initializeCriticalSection(&flag.lock);
win32::enterCriticalSection(&flag.lock);
@volatile_store(flag.status, 2);
func();
@volatile_store(flag.status, 3);
win32::leaveCriticalSection(&flag.lock);
return;
}
break;
case 1:
break;
case 2:
win32::enterCriticalSection(&flag.lock);
win32::leaveCriticalSection(&flag.lock);
break;
}
}
}
fn void! NativeThread.join(thread, int *res)

View File

@@ -53,11 +53,11 @@ macro void! ConditionVariable.signal(&cond) => NativeConditionVariable.signal((N
macro void! ConditionVariable.broadcast(&cond) => NativeConditionVariable.broadcast((NativeConditionVariable*)cond);
macro void! ConditionVariable.wait(&cond, Mutex* mutex)
{
return NativeConditionVariable.wait((NativeConditionVariable*)cond, (NativeMutex*)mutex);
return NativeConditionVariable.wait((NativeConditionVariable*)cond, (NativeMutex*)mutex);
}
macro void! ConditionVariable.wait_timeout(&cond, Mutex* mutex, ulong timeout)
{
return NativeConditionVariable.wait_timeout((NativeConditionVariable*)cond, (NativeMutex*)mutex, timeout);
return NativeConditionVariable.wait_timeout((NativeConditionVariable*)cond, (NativeMutex*)mutex, timeout);
}