Commit 77798610 authored by simonmarhaskell@gmail.com's avatar simonmarhaskell@gmail.com
Browse files

Use the BF_EVACUATED flag to indicate to-space consistently

BF_EVACUATED is now set on all blocks except those that we are
copying.  This means we don't need a separate test for gen>N in
evacuate(), because in generations older than N, BF_EVACUATED will be
set anyway.  The disadvantage is that we have to reset the
BF_EVACUATED flag on the blocks of any generation we're collecting
before starting GC.  Results in a small speed improvement.
parent 49780c2e
......@@ -83,11 +83,8 @@ thread (StgClosure **p)
if (HEAP_ALLOCED(q)) {
bd = Bdescr(q);
// a handy way to discover whether the ptr is into the
// compacted area of the old gen, is that the EVACUATED flag
// is zero (it's non-zero for all the other areas of live
// memory).
if ((bd->flags & BF_EVACUATED) == 0)
if (bd->flags & BF_COMPACTED)
{
iptr = *q;
switch (GET_CLOSURE_TAG((StgClosure *)iptr))
......
......@@ -228,7 +228,7 @@ selector_chain:
// save any space in any case, and updating with an indirection is
// trickier in a non-collected gen: we would have to update the
// mutable list.
if ((bd->gen_no > N) || (bd->flags & BF_EVACUATED)) {
if (bd->flags & BF_EVACUATED) {
unchain_thunk_selectors(prev_thunk_selector, (StgClosure *)p);
*q = (StgClosure *)p;
return;
......
......@@ -305,28 +305,18 @@ loop:
bd = Bdescr((P_)q);
if (bd->gen_no > N) {
/* Can't evacuate this object, because it's in a generation
* older than the ones we're collecting. Let's hope that it's
* in gct->evac_step or older, or we will have to arrange to track
* this pointer using the mutable list.
*/
if (bd->step < gct->evac_step) {
// nope
gct->failed_to_evac = rtsTrue;
TICK_GC_FAILED_PROMOTION();
}
return;
}
if ((bd->flags & (BF_LARGE | BF_COMPACTED | BF_EVACUATED)) != 0) {
/* pointer into to-space: just return it. This normally
* shouldn't happen, but alllowing it makes certain things
* slightly easier (eg. the mutable list can contain the same
* object twice, for example).
*/
// pointer into to-space: just return it. It might be a pointer
// into a generation that we aren't collecting (> N), or it
// might just be a pointer into to-space. The latter doesn't
// happen often, but allowing it makes certain things a bit
// easier; e.g. scavenging an object is idempotent, so it's OK to
// have an object on the mutable list multiple times.
if (bd->flags & BF_EVACUATED) {
// We aren't copying this object, so we have to check
// whether it is already in the target generation. (this is
// the write barrier).
if (bd->step < gct->evac_step) {
gct->failed_to_evac = rtsTrue;
TICK_GC_FAILED_PROMOTION();
......
......@@ -438,7 +438,6 @@ GarbageCollect ( rtsBool force_major_gc )
prev = NULL;
for (bd = ws->scavd_list; bd != NULL; bd = bd->link) {
bd->flags &= ~BF_EVACUATED; // now from-space
ws->step->n_words += bd->free - bd->start;
prev = bd;
}
......@@ -460,7 +459,6 @@ GarbageCollect ( rtsBool force_major_gc )
freeGroup(bd);
ws->n_part_blocks--;
} else {
bd->flags &= ~BF_EVACUATED; // now from-space
ws->step->n_words += bd->free - bd->start;
prev = bd;
}
......@@ -543,7 +541,6 @@ GarbageCollect ( rtsBool force_major_gc )
// for a compacted step, just shift the new to-space
// onto the front of the now-compacted existing blocks.
for (bd = stp->blocks; bd != NULL; bd = bd->link) {
bd->flags &= ~BF_EVACUATED; // now from-space
stp->n_words += bd->free - bd->start;
}
// tack the new blocks on the end of the existing blocks
......@@ -585,10 +582,6 @@ GarbageCollect ( rtsBool force_major_gc )
bd = next;
}
// update the count of blocks used by large objects
for (bd = stp->scavenged_large_objects; bd != NULL; bd = bd->link) {
bd->flags &= ~BF_EVACUATED;
}
stp->large_objects = stp->scavenged_large_objects;
stp->n_large_blocks = stp->n_scavenged_large_blocks;
......@@ -601,7 +594,6 @@ GarbageCollect ( rtsBool force_major_gc )
*/
for (bd = stp->scavenged_large_objects; bd; bd = next) {
next = bd->link;
bd->flags &= ~BF_EVACUATED;
dbl_link_onto(bd, &stp->large_objects);
}
......@@ -1095,7 +1087,12 @@ init_collected_gen (nat g, nat n_threads)
stp->scavenged_large_objects = NULL;
stp->n_scavenged_large_blocks = 0;
// mark the large objects as not evacuated yet
// mark the small objects as from-space
for (bd = stp->old_blocks; bd; bd = bd->link) {
bd->flags &= ~BF_EVACUATED;
}
// mark the large objects as from-space
for (bd = stp->large_objects; bd; bd = bd->link) {
bd->flags &= ~BF_EVACUATED;
}
......
......@@ -55,9 +55,6 @@ isAlive(StgClosure *p)
// ignore closures in generations that we're not collecting.
bd = Bdescr((P_)q);
if (bd->gen_no > N) {
return p;
}
// if it's a pointer into to-space, then we're done
if (bd->flags & BF_EVACUATED) {
......
......@@ -205,7 +205,6 @@ StgPtr
alloc_todo_block (step_workspace *ws, nat size)
{
bdescr *bd, *hd, *tl;
StgWord32 flags;
// Grab a part block if we have one, and it has enough room
if (ws->part_list != NULL &&
......@@ -217,15 +216,8 @@ alloc_todo_block (step_workspace *ws, nat size)
}
else
{
// blocks in to-space in generations up to and including N
// get the BF_EVACUATED flag.
if (ws->step->gen_no <= N) {
flags = BF_EVACUATED;
} else {
flags = 0;
}
allocBlocks_sync(4, &hd, &tl,
ws->step->gen_no, ws->step, flags);
ws->step->gen_no, ws->step, BF_EVACUATED);
tl->link = ws->part_list;
ws->part_list = hd->link;
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment