Files
c3c/test/unit/stdlib/threads/mutex.c3
Sander van den Bosch 3f20e5af1d add join for ThreadPool without destroying the threads (#2579)
* add join for ThreadPool without destroying the threads
* Make the main Thread block waiting for the worker threads to finish instead of buzy looping and do proper initialization and freeing of all variables.
* Updated test to use `atomic_store` and  take into account the maximum queue size of the threadpool.
* - Add `ThreadPool` join function to wait for all threads to finish in the pool without destroying the threads.
- Return of Thread/Mutex/CondVar `destroy()` is now "@maydiscard" and should be ignored. It will return void in 0.8.0.
- Return of Mutex `unlock()` and `lock()` is now "@maydiscard" and should be ignored. They will return void in 0.8.0.
- Return of ConditionVariable `signal()` `broadcast()` and `wait()` are now "@maydiscard". They will return void in 0.8.0.
- Return of Thread `detatch()` is now "@maydiscard". It will return void in 0.8.0.
- Buffered/UnbufferedChannel, and both ThreadPools have `@maydiscard` on a set of functions. They will retunr void in 0.8.0.
- Pthread bindings correctly return Errno instead of CInt.
- Return of Thread `join()` is now "@maydiscard".

---------

Co-authored-by: Christoffer Lerno <christoffer@aegik.com>
2025-12-06 23:54:04 +01:00

116 lines
1.9 KiB
Plaintext

module thread_test;
import std::thread;
import std::os;
const TEST_MAGNITUDE = 10;
fn void own_mutex(Mutex* m)
{
m.lock();
m.unlock();
}
fn void ensure_owner_checks() @test
{
Mutex m;
m.init()!!;
Thread[3 * TEST_MAGNITUDE] threads;
foreach(&t : threads)
{
t.create((ThreadFn)&own_mutex, &m)!!;
}
foreach(&t : threads) t.join();
own_mutex(&m);
}
struct ArgsWrapper1
{
Mutex* m;
ulong* v;
}
fn void shared_mutex_increment(ArgsWrapper1* args)
{
args.m.lock();
args.v++;
args.m.unlock();
}
fn void shared_mutex_decrement(ArgsWrapper1* args)
{
args.m.lock();
args.v--;
args.m.unlock();
}
fn void shared_mutex() @test
{
Mutex m;
m.init()!!;
m.lock();
ulong v;
ArgsWrapper1 args =
{
.m = &m,
.v = &v
};
// An even number of threads must be chosen
Thread[6 * TEST_MAGNITUDE] threads;
for (int i = 0; i < threads.len / 2; i++)
{
(&threads[i]).create((ThreadFn)&shared_mutex_increment, &args)!!;
}
for (int i = (threads.len / 2); i < threads.len; i++)
{
(&threads[i]).create((ThreadFn)&shared_mutex_decrement, &args)!!;
}
m.unlock();
foreach(&t : threads) t.join();
assert(v == 0);
}
// Recursive mutex
fn void acquire_recursively(RecursiveMutex* m)
{
// TODO: The recursive mutex functions can not directly be called via pointer
for (usz i = 0; i < 5 * TEST_MAGNITUDE; i++)
{
((Mutex*)m).lock();
}
for (usz i = 0; i < 5 * TEST_MAGNITUDE; i++)
{
((Mutex*)m).unlock();
}
}
fn void test_recursive_mutex() @test
{
RecursiveMutex m;
m.init()!!;
defer m.destroy();
Thread[3 * TEST_MAGNITUDE] threads;
foreach(&t : threads)
{
t.create((ThreadFn)&acquire_recursively, &m)!!;
}
foreach(&t : threads) t.join();
return acquire_recursively(&m);
}