Commit 32907722 authored by Simon Marlow's avatar Simon Marlow

Remove the per-generation mutable lists

Now that we use the per-capability mutable lists exclusively.
parent 0b22a782
......@@ -245,7 +245,6 @@ main(int argc, char *argv[])
struct_field(bdescr, link);
struct_size(generation);
struct_field(generation, mut_list);
struct_field(generation, n_new_large_words);
struct_size(CostCentreStack);
......
......@@ -71,7 +71,6 @@ typedef struct generation_ {
// (for allocation stats)
unsigned int max_blocks; // max blocks
bdescr *mut_list; // mut objects in this gen (not G0)
StgTSO * threads; // threads in this gen
// linked via global_link
......@@ -102,8 +101,6 @@ typedef struct generation_ {
unsigned int n_old_blocks; // number of blocks in from-space
unsigned int live_estimate; // for sweeping: estimate of live data
bdescr * saved_mut_list;
bdescr * part_blocks; // partially-full scanned blocks
unsigned int n_part_blocks; // count of above
......
......@@ -771,7 +771,7 @@ stat_exit(int alloc)
void
statDescribeGens(void)
{
nat g, mut, lge;
nat g, mut, lge, i;
lnat live, slop;
lnat tot_live, tot_slop;
bdescr *bd;
......@@ -787,8 +787,8 @@ statDescribeGens(void)
tot_slop = 0;
for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
mut = 0;
for (bd = generations[g].mut_list; bd != NULL; bd = bd->link) {
mut += (bd->free - bd->start) * sizeof(W_);
for (i = 0; i < n_capabilities; i++) {
mut += countOccupied(capabilities[i].mut_lists[g]);
}
gen = &generations[g];
......
......@@ -954,11 +954,6 @@ compact(StgClosure *static_objects)
bdescr *bd;
StgPtr p;
nat n;
for (bd = generations[g].mut_list; bd != NULL; bd = bd->link) {
for (p = bd->start; p < bd->free; p++) {
thread((StgClosure **)p);
}
}
for (n = 0; n < n_capabilities; n++) {
for (bd = capabilities[n].mut_lists[g];
bd != NULL; bd = bd->link) {
......
......@@ -327,27 +327,6 @@ SET_GCT(gc_threads[0]);
inc_running();
wakeup_gc_threads(n_gc_threads, gct->thread_index);
// Mutable lists from each generation > N
// we want to *scavenge* these roots, not evacuate them: they're not
// going to move in this GC.
// Also do them in reverse generation order, for the usual reason:
// namely to reduce the likelihood of spurious old->new pointers.
//
for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
#if defined(THREADED_RTS)
if (n_gc_threads > 1) {
scavenge_mutable_list(generations[g].saved_mut_list, &generations[g]);
} else {
scavenge_mutable_list1(generations[g].saved_mut_list, &generations[g]);
}
#else
scavenge_mutable_list(generations[g].saved_mut_list, &generations[g]);
#endif
freeChain_sync(generations[g].saved_mut_list);
generations[g].saved_mut_list = NULL;
}
// scavenge the capability-private mutable lists. This isn't part
// of markSomeCapabilities() because markSomeCapabilities() can only
// call back into the GC via mark_root() (due to the gct register
......@@ -557,14 +536,8 @@ SET_GCT(gc_threads[0]);
// stats. Every mutable list is copied during every GC.
if (g > 0) {
nat mut_list_size = 0;
for (bd = generations[g].mut_list; bd != NULL; bd = bd->link) {
mut_list_size += bd->free - bd->start;
}
for (n = 0; n < n_capabilities; n++) {
for (bd = capabilities[n].mut_lists[g];
bd != NULL; bd = bd->link) {
mut_list_size += bd->free - bd->start;
}
mut_list_size += countOccupied(capabilities[n].mut_lists[g]);
}
copied += mut_list_size;
......@@ -1235,9 +1208,7 @@ init_collected_gen (nat g, nat n_threads)
// list always has at least one block; this means we can avoid a
// check for NULL in recordMutable().
if (g != 0) {
freeChain(generations[g].mut_list);
generations[g].mut_list = allocBlock();
for (i = 0; i < n_capabilities; i++) {
for (i = 0; i < n_capabilities; i++) {
freeChain(capabilities[i].mut_lists[g]);
capabilities[i].mut_lists[g] = allocBlock();
}
......@@ -1356,8 +1327,6 @@ init_uncollected_gen (nat g, nat threads)
// save the current mutable lists for this generation, and
// allocate a fresh block for each one. We'll traverse these
// mutable lists as roots early on in the GC.
generations[g].saved_mut_list = generations[g].mut_list;
generations[g].mut_list = allocBlock();
for (n = 0; n < n_capabilities; n++) {
capabilities[n].saved_mut_lists[g] = capabilities[n].mut_lists[g];
capabilities[n].mut_lists[g] = allocBlock();
......
......@@ -294,14 +294,13 @@ alloc_todo_block (gen_workspace *ws, nat size)
#if DEBUG
void
printMutableList(generation *gen)
printMutableList(bdescr *bd)
{
bdescr *bd;
StgPtr p;
debugBelch("mutable list %p: ", gen->mut_list);
debugBelch("mutable list %p: ", bd);
for (bd = gen->mut_list; bd != NULL; bd = bd->link) {
for (; bd != NULL; bd = bd->link) {
for (p = bd->start; p < bd->free; p++) {
debugBelch("%p (%s), ", (void *)*p, info_type((StgClosure *)*p));
}
......
......@@ -39,7 +39,7 @@ isPartiallyFull(bdescr *bd)
#if DEBUG
void printMutableList (generation *gen);
void printMutableList (bdescr *bd);
#endif
// Version of recordMutableGen for use during GC. This uses the
......
......@@ -619,7 +619,6 @@ checkMutableLists (rtsBool checkTSOs)
nat g, i;
for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
checkMutableList(generations[g].mut_list, g);
for (i = 0; i < n_capabilities; i++) {
checkMutableList(capabilities[i].mut_lists[g], g);
}
......@@ -738,7 +737,6 @@ findMemoryLeak (void)
for (i = 0; i < n_capabilities; i++) {
markBlocks(capabilities[i].mut_lists[g]);
}
markBlocks(generations[g].mut_list);
markBlocks(generations[g].blocks);
markBlocks(generations[g].large_objects);
}
......@@ -826,7 +824,6 @@ memInventory (rtsBool show)
for (i = 0; i < n_capabilities; i++) {
gen_blocks[g] += countBlocks(capabilities[i].mut_lists[g]);
}
gen_blocks[g] += countAllocdBlocks(generations[g].mut_list);
gen_blocks[g] += genBlocks(&generations[g]);
}
......
......@@ -78,7 +78,6 @@ initGeneration (generation *gen, int g)
gen->large_objects = NULL;
gen->n_large_blocks = 0;
gen->n_new_large_words = 0;
gen->mut_list = allocBlock();
gen->scavenged_large_objects = NULL;
gen->n_scavenged_large_blocks = 0;
gen->mark = 0;
......
......@@ -38,11 +38,6 @@ bdescr * splitLargeBlock (bdescr *bd, nat blocks);
/* -----------------------------------------------------------------------------
Generational garbage collection support
recordMutable(StgPtr p) Informs the garbage collector that a
previously immutable object has
become (permanently) mutable. Used
by thawArray and similar.
updateWithIndirection(p1,p2) Updates the object at p1 with an
indirection pointing to p2. This is
normally called for objects in an old
......@@ -69,48 +64,6 @@ extern Mutex sm_mutex;
#define ASSERT_SM_LOCK()
#endif
INLINE_HEADER void
recordMutableGen(StgClosure *p, nat gen_no)
{
bdescr *bd;
bd = generations[gen_no].mut_list;
if (bd->free >= bd->start + BLOCK_SIZE_W) {
bdescr *new_bd;
new_bd = allocBlock();
new_bd->link = bd;
bd = new_bd;
generations[gen_no].mut_list = bd;
}
*bd->free++ = (StgWord)p;
}
INLINE_HEADER void
recordMutableGenLock(StgClosure *p, nat gen_no)
{
ACQUIRE_SM_LOCK;
recordMutableGen(p,gen_no);
RELEASE_SM_LOCK;
}
INLINE_HEADER void
recordMutable(StgClosure *p)
{
bdescr *bd;
ASSERT(closure_MUTABLE(p));
bd = Bdescr((P_)p);
if (bd->gen_no > 0) recordMutableGen(p, bd->gen_no);
}
INLINE_HEADER void
recordMutableLock(StgClosure *p)
{
ACQUIRE_SM_LOCK;
recordMutable(p);
RELEASE_SM_LOCK;
}
/* -----------------------------------------------------------------------------
The write barrier for MVARs
-------------------------------------------------------------------------- */
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment