Commit 9d909b3b authored by Simon Marlow's avatar Simon Marlow

implement clean/dirty TSOs

Along the lines of the clean/dirty arrays and IORefs implemented
recently, now threads are marked clean or dirty depending on whether
they need to be scanned during a minor GC or not.  This should speed
up GC when there are lots of threads, especially if most of them are
idle.
parent 38014860
......@@ -77,6 +77,22 @@ typedef StgTSOStatBuf StgTSOGranInfo;
*/
typedef StgWord32 StgThreadID;
/*
* Flags for the tso->flags field.
*
* The TSO_DIRTY flag indicates that this TSO's stack should be
* scanned during garbage collection. The link field of a TSO is
* always scanned, so we don't have to dirty a TSO just for linking
* it on a different list.
*
* TSO_DIRTY is set by
* - schedule(), just before running a thread,
* - raiseAsync(), because it modifies a thread's stack
* - resumeThread(), just before running the thread again
* and unset by the garbage collector (only).
*/
#define TSO_DIRTY 1
/*
* Type returned after running a thread. Values of this type
* include HeapOverflow, StackOverflow etc. See Constants.h for the
......@@ -123,8 +139,9 @@ typedef struct StgTSO_ {
struct StgTSO_* link; /* Links threads onto blocking queues */
struct StgTSO_* global_link; /* Links all threads together */
StgWord16 what_next; /* Values defined in Constants.h */
StgWord16 why_blocked; /* Values defined in Constants.h */
StgWord16 what_next; /* Values defined in Constants.h */
StgWord16 why_blocked; /* Values defined in Constants.h */
StgWord32 flags;
StgTSOBlockInfo block_info;
struct StgTSO_* blocked_exceptions;
StgThreadID id;
......
......@@ -3004,10 +3004,19 @@ scavenge(step *stp)
case TSO:
{
StgTSO *tso = (StgTSO *)p;
evac_gen = 0;
rtsBool saved_eager = eager_promotion;
eager_promotion = rtsFalse;
scavengeTSO(tso);
evac_gen = saved_evac_gen;
failed_to_evac = rtsTrue; // mutable anyhow.
eager_promotion = saved_eager;
if (failed_to_evac) {
tso->flags |= TSO_DIRTY;
} else {
tso->flags &= ~TSO_DIRTY;
}
failed_to_evac = rtsTrue; // always on the mutable list
p += tso_sizeW(tso);
break;
}
......@@ -3388,10 +3397,19 @@ linear_scan:
case TSO:
{
StgTSO *tso = (StgTSO *)p;
evac_gen = 0;
rtsBool saved_eager = eager_promotion;
eager_promotion = rtsFalse;
scavengeTSO(tso);
evac_gen = saved_evac_gen;
failed_to_evac = rtsTrue;
eager_promotion = saved_eager;
if (failed_to_evac) {
tso->flags |= TSO_DIRTY;
} else {
tso->flags &= ~TSO_DIRTY;
}
failed_to_evac = rtsTrue; // always on the mutable list
break;
}
......@@ -3731,11 +3749,19 @@ scavenge_one(StgPtr p)
case TSO:
{
StgTSO *tso = (StgTSO *)p;
evac_gen = 0; // repeatedly mutable
rtsBool saved_eager = eager_promotion;
eager_promotion = rtsFalse;
scavengeTSO(tso);
evac_gen = saved_evac_gen;
failed_to_evac = rtsTrue;
eager_promotion = saved_eager;
if (failed_to_evac) {
tso->flags |= TSO_DIRTY;
} else {
tso->flags &= ~TSO_DIRTY;
}
failed_to_evac = rtsTrue; // always on the mutable list
break;
}
......@@ -3935,17 +3961,38 @@ scavenge_mutable_list(generation *gen)
}
#endif
// We don't need to scavenge clean arrays. This is the
// Whole Point of MUT_ARR_PTRS_CLEAN.
if (get_itbl((StgClosure *)p)->type == MUT_ARR_PTRS_CLEAN) {
// Check whether this object is "clean", that is it
// definitely doesn't point into a young generation.
// Clean objects don't need to be scavenged. Some clean
// objects (MUT_VAR_CLEAN) are not kept on the mutable
// list at all; others, such as MUT_ARR_PTRS_CLEAN and
// TSO, are always on the mutable list.
//
switch (get_itbl((StgClosure *)p)->type) {
case MUT_ARR_PTRS_CLEAN:
recordMutableGen((StgClosure *)p,gen);
continue;
case TSO: {
StgTSO *tso = (StgTSO *)p;
if ((tso->flags & TSO_DIRTY) == 0) {
// A clean TSO: we don't have to traverse its
// stack. However, we *do* follow the link field:
// we don't want to have to mark a TSO dirty just
// because we put it on a different queue.
if (tso->why_blocked != BlockedOnBlackHole) {
tso->link = (StgTSO *)evacuate((StgClosure *)tso->link);
}
recordMutableGen((StgClosure *)p,gen);
continue;
}
}
default:
;
}
if (scavenge_one(p)) {
/* didn't manage to promote everything, so put the
* object back on the list.
*/
// didn't manage to promote everything, so put the
// object back on the list.
recordMutableGen((StgClosure *)p,gen);
}
}
......
......@@ -564,6 +564,8 @@ run_thread:
errno = t->saved_errno;
cap->in_haskell = rtsTrue;
dirtyTSO(t);
recent_activity = ACTIVITY_YES;
switch (prev_what_next) {
......@@ -2248,6 +2250,9 @@ resumeThread (void *task_)
cap->in_haskell = rtsTrue;
errno = saved_errno;
/* We might have GC'd, mark the TSO dirty again */
dirtyTSO(tso);
return &cap->r;
}
......@@ -2361,6 +2366,7 @@ createThread(Capability *cap, nat size)
tso->why_blocked = NotBlocked;
tso->blocked_exceptions = NULL;
tso->flags = TSO_DIRTY;
tso->saved_errno = 0;
tso->bound = NULL;
......@@ -3652,6 +3658,9 @@ raiseAsync_(Capability *cap, StgTSO *tso, StgClosure *exception,
// Remove it from any blocking queues
unblockThread(cap,tso);
// mark it dirty; we're about to change its stack.
dirtyTSO(tso);
sp = tso->sp;
// The stack freezing code assumes there's a closure pointer on
......
......@@ -288,6 +288,12 @@ emptyThreadQueues(Capability *cap)
;
}
STATIC_INLINE void
dirtyTSO (StgTSO *tso)
{
tso->flags |= TSO_DIRTY;
}
#ifdef DEBUG
void sched_belch(char *s, ...)
GNU_ATTRIBUTE(format (printf, 1, 2));
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment