Commit c2d428ba authored by simonmar's avatar simonmar
Browse files

[project @ 2005-05-05 11:35:29 by simonmar]

Treat the blackhole_queue as a weak list: it shouldn't keep its
elements alive, because otherwise we can't detect deadlock using the
GC.  Fortunately the fix is quite simple.
parent 8ffe6eb9
......@@ -1294,6 +1294,21 @@ traverse_weak_ptr_list(void)
}
}
/* Finally, we can update the blackhole_queue. This queue
* simply strings together TSOs blocked on black holes, it is
* not intended to keep anything alive. Hence, we do not follow
* pointers on the blackhole_queue until now, when we have
* determined which TSOs are otherwise reachable. We know at
* this point that all TSOs have been evacuated, however.
*/
{
StgTSO **pt;
for (pt = &blackhole_queue; *pt != END_TSO_QUEUE; pt = &((*pt)->link)) {
*pt = (StgTSO *)isAlive((StgClosure *)*pt);
ASSERT(*pt != NULL);
}
}
weak_stage = WeakDone; // *now* we're done,
return rtsTrue; // but one more round of scavenging, please
......@@ -2356,8 +2371,10 @@ scavenge_fun_srt(const StgInfoTable *info)
static void
scavengeTSO (StgTSO *tso)
{
// chase the link field for any TSOs on the same queue
tso->link = (StgTSO *)evacuate((StgClosure *)tso->link);
// We don't chase the link field: TSOs on the blackhole queue are
// not automatically alive, so the link field is a "weak" pointer.
// Queues of TSOs are traversed explicitly.
if ( tso->why_blocked == BlockedOnMVar
|| tso->why_blocked == BlockedOnBlackHole
|| tso->why_blocked == BlockedOnException
......
......@@ -2745,6 +2745,16 @@ waitThread_(StgMainThread* m, Capability *initialCapability)
KH @ 25/10/99
*/
static void
evac_TSO_queue (evac_fn evac, StgTSO ** ptso)
{
StgTSO **pt;
for (pt = ptso; *pt != END_TSO_QUEUE; pt = &((*pt)->link)) {
evac((StgClosure **)pt);
}
}
void
GetRoots( evac_fn evac )
{
......@@ -2769,29 +2779,29 @@ GetRoots( evac_fn evac )
markEventQueue();
#else /* !GRAN */
if (run_queue_hd != END_TSO_QUEUE) {
ASSERT(run_queue_tl != END_TSO_QUEUE);
evac((StgClosure **)&run_queue_hd);
evac_TSO_queue(evac, &run_queue_hd);
evac((StgClosure **)&run_queue_tl);
}
if (blocked_queue_hd != END_TSO_QUEUE) {
ASSERT(blocked_queue_tl != END_TSO_QUEUE);
evac((StgClosure **)&blocked_queue_hd);
evac_TSO_queue(evac, &blocked_queue_hd);
evac((StgClosure **)&blocked_queue_tl);
}
if (sleeping_queue != END_TSO_QUEUE) {
evac((StgClosure **)&sleeping_queue);
evac_TSO_queue(evac, &blocked_queue_hd);
evac((StgClosure **)&blocked_queue_tl);
}
#endif
if (blackhole_queue != END_TSO_QUEUE) {
evac((StgClosure **)&blackhole_queue);
}
// Don't chase the blackhole_queue just yet, we treat it as "weak"
if (suspended_ccalling_threads != END_TSO_QUEUE) {
evac((StgClosure **)&suspended_ccalling_threads);
evac_TSO_queue(evac, &suspended_ccalling_threads);
}
#if defined(PARALLEL_HASKELL) || defined(GRAN)
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment