add join for ThreadPool without destroying the threads (#2579)

* add join for ThreadPool without destroying the threads
* Make the main Thread block waiting for the worker threads to finish instead of buzy looping and do proper initialization and freeing of all variables.
* Updated test to use `atomic_store` and  take into account the maximum queue size of the threadpool.
* - Add `ThreadPool` join function to wait for all threads to finish in the pool without destroying the threads.
- Return of Thread/Mutex/CondVar `destroy()` is now "@maydiscard" and should be ignored. It will return void in 0.8.0.
- Return of Mutex `unlock()` and `lock()` is now "@maydiscard" and should be ignored. They will return void in 0.8.0.
- Return of ConditionVariable `signal()` `broadcast()` and `wait()` are now "@maydiscard". They will return void in 0.8.0.
- Return of Thread `detatch()` is now "@maydiscard". It will return void in 0.8.0.
- Buffered/UnbufferedChannel, and both ThreadPools have `@maydiscard` on a set of functions. They will retunr void in 0.8.0.
- Pthread bindings correctly return Errno instead of CInt.
- Return of Thread `join()` is now "@maydiscard".

---------

Co-authored-by: Christoffer Lerno <christoffer@aegik.com>
This commit is contained in:
Sander van den Bosch
2025-12-06 23:54:04 +01:00
committed by GitHub
parent 18e2838772
commit 3f20e5af1d
17 changed files with 470 additions and 370 deletions

View File

@@ -11,7 +11,7 @@ fn void init_destroy_buffered() @test
{
BufferedChannel{int} c;
c.init(mem, 1)!!;
defer c.destroy()!!;
defer c.destroy();
}
}
@@ -21,7 +21,7 @@ fn void init_destroy_unbuffered() @test
{
UnbufferedChannel{int} c;
c.init(mem)!!;
defer c.destroy()!!;
defer c.destroy();
}
}
@@ -29,7 +29,7 @@ fn void push_to_buffered_channel_no_lock() @test
{
BufferedChannel{int} c;
c.init(mem, 1)!!;
defer c.destroy()!!;
defer c.destroy();
c.push(1)!!;
}
@@ -38,7 +38,7 @@ fn void push_pop_buffered_no_locks() @test
{
BufferedChannel{int} c;
c.init(mem, 1)!!;
defer c.destroy()!!;
defer c.destroy();
c.push(123)!!;
int got = c.pop()!!;
@@ -49,10 +49,10 @@ fn void push_pop_unbuffered_with_locks() @test
{
UnbufferedChannel{int} c;
c.init(mem)!!;
defer c.destroy()!!;
defer c.destroy();
Thread thread;
defer thread.join()!!;
defer thread.join();
thread.create(fn int(void* arg)
{
@@ -72,9 +72,9 @@ fn void sending_to_closed_unbuffered_chan_is_forbidden() @test
{
UnbufferedChannel{int} c;
c.init(mem, )!!;
defer c.destroy()!!;
defer c.destroy();
c.close()!!;
c.close();
if (catch err = c.push(123))
{
@@ -88,9 +88,9 @@ fn void sending_to_closed_buffered_chan_is_forbidden() @test
{
BufferedChannel{int} c;
c.init(mem, 1)!!;
defer c.destroy()!!;
defer c.destroy();
c.close()!!;
c.close();
if (catch err = c.push(123))
{
@@ -104,9 +104,9 @@ fn void reading_from_empty_closed_unbuffered_chan_is_forbidden() @test
{
UnbufferedChannel{int} c;
c.init(mem, )!!;
defer c.destroy()!!;
defer c.destroy();
c.close()!!;
c.close();
if (catch err = c.pop())
{
@@ -120,9 +120,9 @@ fn void reading_from_empty_closed_buffered_chan_is_forbidden() @test
{
BufferedChannel{int} c;
c.init(mem, 1)!!;
defer c.destroy()!!;
defer c.destroy();
c.close()!!;
c.close();
if (catch err = c.pop())
{
@@ -136,13 +136,13 @@ fn void reading_from_non_empty_closed_buffered_chan_is_ok() @test
{
BufferedChannel{int} c;
c.init(mem, 3)!!;
defer c.destroy()!!;
defer c.destroy();
c.push(1)!!;
c.push(2)!!;
c.push(3)!!;
c.close()!!;
c.close();
int got = c.pop()!!;
assert(got == 1);
@@ -165,15 +165,15 @@ fn void reading_from_empty_buffered_chan_aborted_by_close() @test
{
BufferedChannel{int} c;
c.init(mem, 3)!!;
defer c.destroy()!!;
defer c.destroy();
Thread thread;
defer thread.join()!!;
defer thread.join();
thread.create(fn int(void* arg)
{
BufferedChannel{int} c = (BufferedChannel{int})arg;
c.close()!!;
c.close();
return 0;
}, (void*)c)!!;
@@ -191,15 +191,15 @@ fn void reading_from_unbuffered_chan_aborted_by_close() @test
{
UnbufferedChannel{int} c;
c.init(mem, )!!;
defer c.destroy()!!;
defer c.destroy();
Thread thread;
defer thread.join()!!;
defer thread.join();
thread.create(fn int(void* arg)
{
UnbufferedChannel{int} c = (UnbufferedChannel{int})arg;
c.close()!!;
c.close();
return 0;
}, (void*)c)!!;
@@ -217,17 +217,17 @@ fn void sending_to_full_buffered_chan_aborted_by_close() @test
{
BufferedChannel{int} c;
c.init(mem, 1)!!;
defer c.destroy()!!;
defer c.destroy();
c.push(1)!!;
Thread thread;
defer thread.join()!!;
defer thread.join();
thread.create(fn int(void* arg)
{
BufferedChannel{int} c = (BufferedChannel{int})arg;
c.close()!!;
c.close();
return 0;
}, (void*)c)!!;
@@ -245,15 +245,15 @@ fn void sending_to_unbuffered_chan_aborted_by_close() @test
{
UnbufferedChannel{int} c;
c.init(mem, )!!;
defer c.destroy()!!;
defer c.destroy();
Thread thread;
defer thread.join()!!;
defer thread.join();
thread.create(fn int(void* arg)
{
UnbufferedChannel{int} c = (UnbufferedChannel{int})arg;
c.close()!!;
c.close();
return 0;
}, (void*)c)!!;
@@ -271,10 +271,10 @@ fn void multiple_actions_unbuffered() @test
{
UnbufferedChannel{int} c;
c.init(mem, )!!;
defer c.destroy()!!;
defer c.destroy();
Thread thread;
defer thread.join()!!;
defer thread.join();
thread.create(fn int(void* arg)
{
@@ -305,10 +305,10 @@ fn void multiple_actions_buffered() @test
{
BufferedChannel{int} c;
c.init(mem, 10)!!;
defer c.destroy()!!;
defer c.destroy();
Thread thread;
defer thread.join()!!;
defer thread.join();
thread.create(fn int(void* arg)
{

View File

@@ -5,35 +5,12 @@ import std::os;
const TEST_MAGNITUDE = 10;
fn void lock_control_test() @test
{
Mutex m;
m.init()!!;
m.lock()!!;
assert(@catch(m.lock()));
}
fn void unlock_control_test() @test
{
Mutex m;
m.init()!!;
assert(@catch(m.unlock()));
}
fn void lock_with_double_unlock_test() @test
fn void own_mutex(Mutex* m)
{
Mutex m;
m.init()!!;
m.lock()!!;
m.unlock()!!;
assert(@catch(m.unlock()));
}
fn void? own_mutex(Mutex* m)
{
m.lock()!;
m.unlock()!;
m.lock();
m.unlock();
}
fn void ensure_owner_checks() @test
@@ -48,12 +25,9 @@ fn void ensure_owner_checks() @test
t.create((ThreadFn)&own_mutex, &m)!!;
}
foreach(&t : threads)
{
t.join()!!;
}
foreach(&t : threads) t.join();
own_mutex(&m)!!;
own_mutex(&m);
}
struct ArgsWrapper1
@@ -64,23 +38,23 @@ struct ArgsWrapper1
fn void shared_mutex_increment(ArgsWrapper1* args)
{
args.m.lock()!!;
args.m.lock();
args.v++;
args.m.unlock()!!;
args.m.unlock();
}
fn void shared_mutex_decrement(ArgsWrapper1* args)
{
args.m.lock()!!;
args.m.lock();
args.v--;
args.m.unlock()!!;
args.m.unlock();
}
fn void shared_mutex() @test
{
Mutex m;
m.init()!!;
m.lock()!!;
m.lock();
ulong v;
@@ -101,11 +75,8 @@ fn void shared_mutex() @test
(&threads[i]).create((ThreadFn)&shared_mutex_decrement, &args)!!;
}
m.unlock()!!;
foreach(&t : threads)
{
t.join()!!;
}
m.unlock();
foreach(&t : threads) t.join();
assert(v == 0);
}
@@ -117,12 +88,12 @@ fn void acquire_recursively(RecursiveMutex* m)
for (usz i = 0; i < 5 * TEST_MAGNITUDE; i++)
{
((Mutex*)m).lock()!!;
((Mutex*)m).lock();
}
for (usz i = 0; i < 5 * TEST_MAGNITUDE; i++)
{
((Mutex*)m).unlock()!!;
((Mutex*)m).unlock();
}
}
@@ -130,7 +101,7 @@ fn void test_recursive_mutex() @test
{
RecursiveMutex m;
m.init()!!;
defer m.destroy()!!;
defer m.destroy();
Thread[3 * TEST_MAGNITUDE] threads;
foreach(&t : threads)
@@ -138,10 +109,7 @@ fn void test_recursive_mutex() @test
t.create((ThreadFn)&acquire_recursively, &m)!!;
}
foreach(&t : threads)
{
t.join()!!;
}
foreach(&t : threads) t.join();
return acquire_recursively(&m);
}

View File

@@ -9,7 +9,7 @@ fn void init_destroy() @test
{
Pool pool;
pool.init()!!;
pool.destroy()!!;
pool.destroy();
}
}
@@ -21,8 +21,8 @@ fn void push_destroy() @test
int y = 20;
Pool pool;
pool.init()!!;
defer pool.destroy()!!;
pool.push(&do_work, &y)!!;
defer pool.destroy();
pool.push(&do_work, &y);
thread::sleep(time::ms(50));
test::eq(@atomic_load(x), @atomic_load(y));
}
@@ -36,16 +36,38 @@ fn void push_stop() @test
int y = 20;
Pool pool;
pool.init()!!;
pool.push(&do_work, &y)!!;
pool.stop_and_destroy()!!;
pool.push(&do_work, &y);
pool.stop_and_destroy();
test::eq(@atomic_load(x), @atomic_load(y));
}
}
fn void join() @test
{
@atomic_store(x, 0);
Pool pool;
pool.init()!!;
defer pool.stop_and_destroy();
for (usz i = 0; i < 4; i++)
{
pool.push(&do_wait, (void*)i);
}
pool.join();
test::eq(x, 6);
}
int x;
fn int do_work(void* arg)
{
@atomic_store(x, @atomic_load(*(int*)arg));
return 0;
}
fn int do_wait(void* arg)
{
usz value = (iptr)arg;
for (usz i = 0; i < value; i++) thread::sleep(time::ms(50));
@atomic_store(x, @atomic_load(x) + (int)value);
return 0;
}

View File

@@ -26,8 +26,8 @@ fn void testrun_mutex()
foreach (&t : ts)
{
t.create(fn int(void* arg) {
m_global.lock()!!;
defer m_global.unlock()!!;
m_global.lock();
defer m_global.unlock();
a += 10;
thread::sleep_ms(5);
a *= 10;
@@ -45,32 +45,47 @@ fn void testrun_mutex()
assert(t.join()!! == 0);
}
assert(a == ts.len);
m_global.destroy()!!;
m_global.destroy();
}
fn void testrun_mutex_try() @test
{
Mutex m;
m.init()!!;
m.lock()!!;
m.lock();
assert(m.try_lock() == false);
m.unlock()!!;
m.unlock();
assert(m.try_lock() == true);
m.unlock()!!;
m.unlock();
}
int val_mutex = 0;
fn void testrun_mutex_timeout() @test
{
TimedMutex m;
m.init()!!;
m.lock()!!;
if (try m.lock_timeout(20))
{
unreachable("lock_timeout should fail");
}
m.unlock()!!;
m.lock();
Thread t;
val_mutex = 0;
t.create(fn int(void* arg) {
TimedMutex* m = arg;
if (try m.lock_timeout(20))
{
val_mutex = 1;
}
else
{
val_mutex = 2;
}
return 0;
}, &m)!!;
t.join();
assert(val_mutex == 2);
m.unlock();
m.lock_timeout(20)!!;
m.unlock()!!;
m.unlock();
}
int x_once = 100;