Commit 8cbded11 authored by Ömer Sinan Ağacan's avatar Ömer Sinan Ağacan Committed by Ben Gamari

rts: COMPACT_NFDATA support for the nonmoving collector

This largely follows the model used for large objects, with appropriate
adjustments made to account for references in the sharing deduplication
hashtable.
parent 481a33b2
......@@ -276,7 +276,10 @@ compactFree(StgCompactNFData *str)
for ( ; block; block = next) {
next = block->next;
bd = Bdescr((StgPtr)block);
ASSERT((bd->flags & BF_EVACUATED) == 0);
ASSERT(RtsFlags.GcFlags.useNonmoving || ((bd->flags & BF_EVACUATED) == 0));
// When using the non-moving collector we leave compact object
// evacuated to the oldset gen as BF_EVACUATED to avoid evacuating
// objects in the non-moving heap.
freeGroup(bd);
}
}
......
......@@ -436,12 +436,22 @@ evacuate_compact (StgPtr p)
bd = Bdescr((StgPtr)str);
gen_no = bd->gen_no;
if (bd->flags & BF_NONMOVING) {
// We may have evacuated the block to the nonmoving generation. If so
// we need to make sure it is added to the mark queue since the only
// reference to it may be from the moving heap.
if (major_gc && !deadlock_detect_gc)
markQueuePushClosureGC(&gct->cap->upd_rem_set.queue, (StgClosure *) str);
return;
}
// already evacuated? (we're about to do the same check,
// but we avoid taking the spin-lock)
if (bd->flags & BF_EVACUATED) {
/* Don't forget to set the gct->failed_to_evac flag if we didn't get
* the desired destination (see comments in evacuate()).
*/
debugTrace(DEBUG_compact, "Compact %p already evacuated", str);
if (gen_no < gct->evac_gen_no) {
gct->failed_to_evac = true;
TICK_GC_FAILED_PROMOTION();
......@@ -490,9 +500,15 @@ evacuate_compact (StgPtr p)
// for that - the only code touching the generation of the block is
// in the GC, and that should never see blocks other than the first)
bd->flags |= BF_EVACUATED;
if (RtsFlags.GcFlags.useNonmoving && new_gen == oldest_gen) {
bd->flags |= BF_NONMOVING;
}
initBdescr(bd, new_gen, new_gen->to);
if (str->hash) {
// If there is a hash-table for sharing preservation then we need to add
// the compact to the scavenging work list to ensure that the hashtable
// is scavenged.
gen_workspace *ws = &gct->gens[new_gen_no];
bd->link = ws->todo_large_objects;
ws->todo_large_objects = bd;
......@@ -658,13 +674,6 @@ loop:
// they are not)
if (bd->flags & BF_COMPACT) {
evacuate_compact((P_)q);
// We may have evacuated the block to the nonmoving generation. If so
// we need to make sure it is added to the mark queue since the only
// reference to it may be from the moving heap.
if (major_gc && bd->flags & BF_NONMOVING && !deadlock_detect_gc) {
markQueuePushClosureGC(&gct->cap->upd_rem_set.queue, q);
}
return;
}
......
......@@ -751,6 +751,22 @@ static void nonmovingPrepareMark(void)
oldest_gen->n_large_blocks = 0;
nonmoving_live_words = 0;
// Move new compact objects from younger generations to nonmoving_compact_objects
for (bdescr *bd = oldest_gen->compact_objects; bd; bd = next) {
next = bd->link;
bd->flags |= BF_NONMOVING_SWEEPING;
dbl_link_onto(bd, &nonmoving_compact_objects);
}
n_nonmoving_compact_blocks += oldest_gen->n_compact_blocks;
oldest_gen->n_compact_blocks = 0;
oldest_gen->compact_objects = NULL;
// TODO (osa): what about "in import" stuff??
// Clear compact object mark bits
for (bdescr *bd = nonmoving_compact_objects; bd; bd = bd->link) {
bd->flags &= ~BF_MARKED;
}
// Clear large object bits
for (bdescr *bd = nonmoving_large_objects; bd; bd = bd->link) {
bd->flags &= ~BF_MARKED;
......@@ -811,6 +827,8 @@ void nonmovingCollect(StgWeak **dead_weaks, StgTSO **resurrected_threads)
// N.B. These should have been cleared at the end of the last sweep.
ASSERT(nonmoving_marked_large_objects == NULL);
ASSERT(n_nonmoving_marked_large_blocks == 0);
ASSERT(nonmoving_marked_compact_objects == NULL);
ASSERT(n_nonmoving_marked_compact_blocks == 0);
MarkQueue *mark_queue = stgMallocBytes(sizeof(MarkQueue), "mark queue");
initMarkQueue(mark_queue);
......@@ -1057,6 +1075,7 @@ static void nonmovingMark_(MarkQueue *mark_queue, StgWeak **dead_weaks, StgTSO *
// collect them in a map in mark_queue and we pass it here to sweep large
// objects
nonmovingSweepLargeObjects();
nonmovingSweepCompactObjects();
nonmovingSweepStableNameTable();
nonmovingSweep();
......
......@@ -25,6 +25,7 @@
#include "STM.h"
#include "MarkWeak.h"
#include "sm/Storage.h"
#include "CNF.h"
static void mark_closure (MarkQueue *queue, const StgClosure *p, StgClosure **origin);
static void mark_tso (MarkQueue *queue, StgTSO *tso);
......@@ -69,6 +70,12 @@ bdescr *nonmoving_large_objects = NULL;
bdescr *nonmoving_marked_large_objects = NULL;
memcount n_nonmoving_large_blocks = 0;
memcount n_nonmoving_marked_large_blocks = 0;
bdescr *nonmoving_compact_objects = NULL;
bdescr *nonmoving_marked_compact_objects = NULL;
memcount n_nonmoving_compact_blocks = 0;
memcount n_nonmoving_marked_compact_blocks = 0;
#if defined(THREADED_RTS)
/* Protects everything above. Furthermore, we only set the BF_MARKED bit of
* large object blocks when this is held. This ensures that the write barrier
......@@ -76,6 +83,9 @@ memcount n_nonmoving_marked_large_blocks = 0;
* move the same large object to nonmoving_marked_large_objects more than once.
*/
static Mutex nonmoving_large_objects_mutex;
// Note that we don't need a similar lock for compact objects becuase we never
// mark a compact object eagerly in a write barrier; all compact objects are
// marked by the mark thread, so there can't be any races here.
#endif
/*
......@@ -1249,9 +1259,22 @@ mark_closure (MarkQueue *queue, const StgClosure *p0, StgClosure **origin)
ASSERT(!IS_FORWARDING_PTR(p->header.info));
if (bd->flags & BF_NONMOVING) {
// N.B. only the first block of a compact region is guaranteed to carry
// BF_NONMOVING; conseqently we must separately check for BF_COMPACT.
if (bd->flags & (BF_COMPACT | BF_NONMOVING)) {
if (bd->flags & BF_LARGE) {
if (bd->flags & BF_COMPACT) {
StgCompactNFData *str = objectGetCompact((StgClosure*)p);
bd = Bdescr((P_)str);
if (! (bd->flags & BF_NONMOVING_SWEEPING)) {
// Not in the snapshot
return;
}
if (bd->flags & BF_MARKED) {
goto done;
}
} else if (bd->flags & BF_LARGE) {
if (! (bd->flags & BF_NONMOVING_SWEEPING)) {
// Not in the snapshot
goto done;
......@@ -1572,6 +1595,9 @@ mark_closure (MarkQueue *queue, const StgClosure *p0, StgClosure **origin)
while (get_volatile_itbl(p)->type == WHITEHOLE);
goto try_again;
case COMPACT_NFDATA:
break;
default:
barf("mark_closure: unimplemented/strange closure type %d @ %p",
info->type, p);
......@@ -1583,7 +1609,15 @@ mark_closure (MarkQueue *queue, const StgClosure *p0, StgClosure **origin)
* the object's pointers since in the case of marking stacks there may be a
* mutator waiting for us to finish so it can start execution.
*/
if (bd->flags & BF_LARGE) {
if (bd->flags & BF_COMPACT) {
StgCompactNFData *str = objectGetCompact((StgClosure*)p);
dbl_link_remove(bd, &nonmoving_compact_objects);
dbl_link_onto(bd, &nonmoving_marked_compact_objects);
StgWord blocks = str->totalW / BLOCK_SIZE_W;
n_nonmoving_compact_blocks -= blocks;
n_nonmoving_marked_compact_blocks += blocks;
bd->flags |= BF_MARKED;
} else if (bd->flags & BF_LARGE) {
/* Marking a large object isn't idempotent since we move it to
* nonmoving_marked_large_objects; to ensure that we don't repeatedly
* mark a large object, we only set BF_MARKED on large objects in the
......@@ -1704,7 +1738,11 @@ bool nonmovingIsAlive (StgClosure *p)
// BF_NONMOVING
ASSERT(bd->flags & BF_NONMOVING);
if (bd->flags & BF_LARGE) {
if (bd->flags & (BF_COMPACT | BF_LARGE)) {
if (bd->flags & BF_COMPACT) {
StgCompactNFData *str = objectGetCompact((StgClosure*)p);
bd = Bdescr((P_)str);
}
return (bd->flags & BF_NONMOVING_SWEEPING) == 0
// the large object wasn't in the snapshot and therefore wasn't marked
|| (bd->flags & BF_MARKED) != 0;
......
......@@ -128,8 +128,10 @@ typedef struct {
// The length of MarkQueueBlock.entries
#define MARK_QUEUE_BLOCK_ENTRIES ((MARK_QUEUE_BLOCKS * BLOCK_SIZE - sizeof(MarkQueueBlock)) / sizeof(MarkQueueEnt))
extern bdescr *nonmoving_large_objects, *nonmoving_marked_large_objects;
extern memcount n_nonmoving_large_blocks, n_nonmoving_marked_large_blocks;
extern bdescr *nonmoving_large_objects, *nonmoving_marked_large_objects,
*nonmoving_compact_objects, *nonmoving_marked_compact_objects;
extern memcount n_nonmoving_large_blocks, n_nonmoving_marked_large_blocks,
n_nonmoving_compact_blocks, n_nonmoving_marked_compact_blocks;
extern StgTSO *nonmoving_old_threads;
extern StgWeak *nonmoving_old_weak_ptr_list;
......
......@@ -337,6 +337,10 @@ nonmovingScavengeOne (StgClosure *q)
evacuate(&((StgInd *)p)->indirectee);
break;
case COMPACT_NFDATA:
scavenge_compact((StgCompactNFData*)p);
break;
default:
barf("nonmoving scavenge: unimplemented/strange closure type %d @ %p",
info->type, p);
......
......@@ -16,6 +16,7 @@
#include "Storage.h"
#include "Trace.h"
#include "StableName.h"
#include "CNF.h" // compactFree
// On which list should a particular segment be placed?
enum SweepResult {
......@@ -301,6 +302,21 @@ void nonmovingSweepLargeObjects()
n_nonmoving_marked_large_blocks = 0;
}
void nonmovingSweepCompactObjects()
{
bdescr *next;
ACQUIRE_SM_LOCK;
for (bdescr *bd = nonmoving_compact_objects; bd; bd = next) {
next = bd->link;
compactFree(((StgCompactNFDataBlock*)bd->start)->owner);
}
RELEASE_SM_LOCK;
nonmoving_compact_objects = nonmoving_marked_compact_objects;
n_nonmoving_compact_blocks = n_nonmoving_marked_compact_blocks;
nonmoving_marked_compact_objects = NULL;
n_nonmoving_marked_compact_blocks = 0;
}
// Helper for nonmovingSweepStableNameTable. Essentially nonmovingIsAlive,
// but works when the object died in moving heap, see
// nonmovingSweepStableNameTable
......
......@@ -19,6 +19,9 @@ void nonmovingSweepMutLists(void);
// Remove unmarked entries in oldest generation scavenged_large_objects list
void nonmovingSweepLargeObjects(void);
// Remove unmarked entries in oldest generation compact_objects list
void nonmovingSweepCompactObjects(void);
// Remove dead entries in the stable name table
void nonmovingSweepStableNameTable(void);
......
......@@ -820,9 +820,33 @@ static void checkGeneration (generation *gen,
#endif
if (RtsFlags.GcFlags.useNonmoving && gen == oldest_gen) {
ASSERT(countNonMovingSegments(nonmovingHeap.free) == (W_) nonmovingHeap.n_free * NONMOVING_SEGMENT_BLOCKS);
ASSERT(countBlocks(nonmoving_large_objects) == n_nonmoving_large_blocks);
ASSERT(countBlocks(nonmoving_marked_large_objects) == n_nonmoving_marked_large_blocks);
ASSERT(countNonMovingSegments(nonmovingHeap.free) == (W_) nonmovingHeap.n_free * NONMOVING_SEGMENT_BLOCKS);
// Compact regions
// Accounting here is tricky due to the fact that the CNF allocation
// code modifies generation->n_compact_blocks directly. However, most
// objects being swept by the nonmoving GC are tracked in
// nonmoving_*_compact_objects. Consequently we can only maintain a very loose
// sanity invariant here.
uint32_t counted_cnf_blocks = 0;
counted_cnf_blocks += countCompactBlocks(nonmoving_marked_compact_objects);
counted_cnf_blocks += countCompactBlocks(nonmoving_compact_objects);
counted_cnf_blocks += countCompactBlocks(oldest_gen->compact_objects);
uint32_t total_cnf_blocks = 0;
total_cnf_blocks += n_nonmoving_compact_blocks + oldest_gen->n_compact_blocks;
total_cnf_blocks += n_nonmoving_marked_compact_blocks;
if (counted_cnf_blocks != total_cnf_blocks) {
debugBelch("count(nonmoving_compact_objects = %ld\n", countCompactBlocks(nonmoving_compact_objects));
debugBelch("n_nonmoving_compact_blocks = %ld\n", n_nonmoving_compact_blocks);
debugBelch("oldest_gen->n_compact_blocks = %ld\n", oldest_gen->n_compact_blocks);
debugBelch("count(nonmoving_marked_compact_objects = %ld\n", countCompactBlocks(nonmoving_marked_compact_objects));
debugBelch("n_nonmoving_marked_compact_blocks = %ld\n", n_nonmoving_marked_compact_blocks);
}
ASSERT(counted_cnf_blocks == total_cnf_blocks);
}
checkHeapChain(gen->blocks);
......@@ -919,6 +943,8 @@ findMemoryLeak (void)
markBlocks(upd_rem_set_block_list);
markBlocks(nonmoving_large_objects);
markBlocks(nonmoving_marked_large_objects);
markBlocks(nonmoving_compact_objects);
markBlocks(nonmoving_marked_compact_objects);
for (i = 0; i < NONMOVING_ALLOCA_CNT; i++) {
struct NonmovingAllocator *alloc = nonmovingHeap.allocators[i];
markNonMovingSegments(alloc->filled);
......@@ -997,17 +1023,19 @@ genBlocks (generation *gen)
ASSERT(countNonMovingHeap(&nonmovingHeap) == gen->n_blocks);
ret += countAllocdBlocks(nonmoving_large_objects);
ret += countAllocdBlocks(nonmoving_marked_large_objects);
ret += countAllocdCompactBlocks(nonmoving_compact_objects);
ret += countAllocdCompactBlocks(nonmoving_marked_compact_objects);
ret += countNonMovingHeap(&nonmovingHeap);
if (current_mark_queue)
ret += countBlocks(current_mark_queue->blocks);
} else {
ASSERT(countBlocks(gen->blocks) == gen->n_blocks);
ASSERT(countCompactBlocks(gen->compact_objects) == gen->n_compact_blocks);
ASSERT(countCompactBlocks(gen->compact_blocks_in_import) == gen->n_compact_blocks_in_import);
ret += gen->n_blocks;
}
ASSERT(countBlocks(gen->large_objects) == gen->n_large_blocks);
ASSERT(countCompactBlocks(gen->compact_objects) == gen->n_compact_blocks);
ASSERT(countCompactBlocks(gen->compact_blocks_in_import) == gen->n_compact_blocks_in_import);
ret += gen->n_old_blocks +
countAllocdBlocks(gen->large_objects) +
......
......@@ -84,6 +84,7 @@ static void scavenge_large_bitmap (StgPtr p,
# define scavenge_mut_arr_ptrs(info) scavenge_mut_arr_ptrs1(info)
# define scavenge_PAP(pap) scavenge_PAP1(pap)
# define scavenge_AP(ap) scavenge_AP1(ap)
# define scavenge_compact(str) scavenge_compact1(str)
#endif
static void do_evacuate(StgClosure **p, void *user STG_UNUSED)
......@@ -173,7 +174,10 @@ evacuate_hash_entry(MapHashData *dat, StgWord key, const void *value)
SET_GCT(old_gct);
}
static void
/* Here we scavenge the sharing-preservation hash-table, which may contain keys
* living in from-space.
*/
void
scavenge_compact(StgCompactNFData *str)
{
bool saved_eager;
......
......@@ -24,6 +24,7 @@ void scavenge_thunk_srt (const StgInfoTable *info);
StgPtr scavenge_mut_arr_ptrs (StgMutArrPtrs *a);
StgPtr scavenge_PAP (StgPAP *pap);
StgPtr scavenge_AP (StgAP *ap);
void scavenge_compact (StgCompactNFData *str);
#if defined(THREADED_RTS)
void scavenge_loop1 (void);
......@@ -35,6 +36,7 @@ void scavenge_thunk_srt1 (const StgInfoTable *info);
StgPtr scavenge_mut_arr_ptrs1 (StgMutArrPtrs *a);
StgPtr scavenge_PAP1 (StgPAP *pap);
StgPtr scavenge_AP1 (StgAP *ap);
void scavenge_compact1 (StgCompactNFData *str);
#endif
#include "EndPrivate.h"
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment