Commit 481a33b2 authored by Ben Gamari's avatar Ben Gamari 🐢

Merge branches 'wip/gc/segment-header-to-bdescr' and 'wip/gc/docs' into wip/gc/everything2

......@@ -88,17 +88,23 @@ typedef struct bdescr_ {
StgPtr start; // [READ ONLY] start addr of memory
StgPtr free; // First free byte of memory.
// allocGroup() sets this to the value of start.
// NB. during use this value should lie
// between start and start + blocks *
// BLOCK_SIZE. Values outside this
// range are reserved for use by the
// block allocator. In particular, the
// value (StgPtr)(-1) is used to
// indicate that a block is unallocated.
//
// Unused by the non-moving allocator.
union {
StgPtr free; // First free byte of memory.
// allocGroup() sets this to the value of start.
// NB. during use this value should lie
// between start and start + blocks *
// BLOCK_SIZE. Values outside this
// range are reserved for use by the
// block allocator. In particular, the
// value (StgPtr)(-1) is used to
// indicate that a block is unallocated.
//
// Unused by the non-moving allocator.
struct NonmovingSegmentInfo {
StgWord8 log_block_size;
StgWord16 next_free_snap;
} nonmoving_segment;
};
struct bdescr_ *link; // used for chaining blocks together
......
......@@ -231,6 +231,9 @@ typedef struct StgTSO_ {
* setting the stack's mark bit (either the BF_MARKED bit for large objects
* or otherwise its bit in its segment's mark bitmap).
*
* To ensure that mutation does not proceed until the stack is fully marked the
* mark phase must not set the mark bit until it has finished tracing.
*
*/
#define STACK_DIRTY 1
......
This diff is collapsed.
......@@ -38,13 +38,21 @@ struct NonmovingSegment {
struct NonmovingSegment *link; // for linking together segments into lists
struct NonmovingSegment *todo_link; // NULL when not in todo list
nonmoving_block_idx next_free; // index of the next unallocated block
nonmoving_block_idx next_free_snap; // snapshot of next_free
uint8_t block_size; // log2 of block size in bytes
uint8_t bitmap[]; // liveness bitmap
// After the liveness bitmap comes the data blocks. Note that we need to
// ensure that the size of this struct (including the bitmap) is a multiple
// of the word size since GHC assumes that all object pointers are
// so-aligned.
// N.B. There are also bits of information which are stored in the
// NonmovingBlockInfo stored in the segment's block descriptor. Namely:
//
// * the block size can be found in nonmovingBlockInfo(seg)->log_block_size.
// * the next_free snapshot can be found in
// nonmovingBlockInfo(seg)->next_free_snap.
//
// This allows us to mark a nonmoving closure without bringing the
// NonmovingSegment header into cache.
};
// This is how we mark end of todo lists. Not NULL because todo_link == NULL
......@@ -123,11 +131,20 @@ void *nonmovingAllocate(Capability *cap, StgWord sz);
void nonmovingAddCapabilities(uint32_t new_n_caps);
void nonmovingPushFreeSegment(struct NonmovingSegment *seg);
INLINE_HEADER struct NonmovingSegmentInfo *nonmovingSegmentInfo(struct NonmovingSegment *seg) {
return &Bdescr((StgPtr) seg)->nonmoving_segment;
}
INLINE_HEADER uint8_t nonmovingSegmentLogBlockSize(struct NonmovingSegment *seg) {
return nonmovingSegmentInfo(seg)->log_block_size;
}
// Add a segment to the appropriate active list.
INLINE_HEADER void nonmovingPushActiveSegment(struct NonmovingSegment *seg)
{
struct NonmovingAllocator *alloc =
nonmovingHeap.allocators[seg->block_size - NONMOVING_ALLOCA0];
nonmovingHeap.allocators[nonmovingSegmentLogBlockSize(seg) - NONMOVING_ALLOCA0];
while (true) {
struct NonmovingSegment *current_active = (struct NonmovingSegment*)VOLATILE_LOAD(&alloc->active);
seg->link = current_active;
......@@ -141,7 +158,7 @@ INLINE_HEADER void nonmovingPushActiveSegment(struct NonmovingSegment *seg)
INLINE_HEADER void nonmovingPushFilledSegment(struct NonmovingSegment *seg)
{
struct NonmovingAllocator *alloc =
nonmovingHeap.allocators[seg->block_size - NONMOVING_ALLOCA0];
nonmovingHeap.allocators[nonmovingSegmentLogBlockSize(seg) - NONMOVING_ALLOCA0];
while (true) {
struct NonmovingSegment *current_filled = (struct NonmovingSegment*)VOLATILE_LOAD(&alloc->filled);
seg->link = current_filled;
......@@ -162,7 +179,7 @@ void assert_in_nonmoving_heap(StgPtr p);
// The block size of a given segment in bytes.
INLINE_HEADER unsigned int nonmovingSegmentBlockSize(struct NonmovingSegment *seg)
{
return 1 << seg->block_size;
return 1 << nonmovingSegmentLogBlockSize(seg);
}
// How many blocks does a segment with the given block size have?
......@@ -180,7 +197,7 @@ unsigned int nonmovingBlockCountFromSize(uint8_t log_block_size);
// How many blocks does the given segment contain? Also the size of the bitmap.
INLINE_HEADER unsigned int nonmovingSegmentBlockCount(struct NonmovingSegment *seg)
{
return nonmovingBlockCountFromSize(seg->block_size);
return nonmovingBlockCountFromSize(nonmovingSegmentLogBlockSize(seg));
}
// Get a pointer to the given block index assuming that the block size is as
......@@ -188,7 +205,7 @@ INLINE_HEADER unsigned int nonmovingSegmentBlockCount(struct NonmovingSegment *s
// available). The log_block_size argument must be equal to seg->block_size.
INLINE_HEADER void *nonmovingSegmentGetBlock_(struct NonmovingSegment *seg, uint8_t log_block_size, nonmoving_block_idx i)
{
ASSERT(log_block_size == seg->block_size);
ASSERT(log_block_size == nonmovingSegmentLogBlockSize(seg));
// Block size in bytes
unsigned int blk_size = 1 << log_block_size;
// Bitmap size in bytes
......@@ -204,7 +221,7 @@ INLINE_HEADER void *nonmovingSegmentGetBlock_(struct NonmovingSegment *seg, uint
// Get a pointer to the given block index.
INLINE_HEADER void *nonmovingSegmentGetBlock(struct NonmovingSegment *seg, nonmoving_block_idx i)
{
return nonmovingSegmentGetBlock_(seg, seg->block_size, i);
return nonmovingSegmentGetBlock_(seg, nonmovingSegmentLogBlockSize(seg), i);
}
// Get the segment which a closure resides in. Assumes that pointer points into
......@@ -227,7 +244,7 @@ INLINE_HEADER nonmoving_block_idx nonmovingGetBlockIdx(StgPtr p)
struct NonmovingSegment *seg = nonmovingGetSegment(p);
ptrdiff_t blk0 = (ptrdiff_t)nonmovingSegmentGetBlock(seg, 0);
ptrdiff_t offset = (ptrdiff_t)p - blk0;
return (nonmoving_block_idx) (offset >> seg->block_size);
return (nonmoving_block_idx) (offset >> nonmovingSegmentLogBlockSize(seg));
}
// TODO: Eliminate this
......@@ -268,8 +285,9 @@ INLINE_HEADER bool nonmovingClosureMarked(StgPtr p)
// segment is in the set of segments that will be swept this collection cycle.
INLINE_HEADER bool nonmovingSegmentBeingSwept(struct NonmovingSegment *seg)
{
unsigned int n = nonmovingSegmentBlockCount(seg);
return seg->next_free_snap >= n;
struct NonmovingSegmentInfo *seginfo = nonmovingSegmentInfo(seg);
unsigned int n = nonmovingBlockCountFromSize(seginfo->log_block_size);
return seginfo->next_free_snap >= n;
}
// Can be called during a major collection to determine whether a particular
......
......@@ -121,6 +121,9 @@ StgIndStatic *debug_caf_list_snapshot = (StgIndStatic*)END_OF_CAF_LIST;
*
* - In the code generated by the STG code generator for pointer array writes
*
* - In thunk updates (e.g. updateWithIndirection) to ensure that the free
* variables of the original thunk remain reachable.
*
* There is also a read barrier to handle weak references, as described in
* Note [Concurrent read barrier on deRefWeak#].
*
......@@ -563,6 +566,13 @@ inline void updateRemembSetPushThunk(Capability *cap, StgThunk *thunk)
updateRemembSetPushThunkEager(cap, (StgThunkInfoTable *) info, thunk);
}
/* Push the free variables of a thunk to the update remembered set.
* This is called by the thunk update code (e.g. updateWithIndirection) before
* we update the indirectee to ensure that the thunk's free variables remain
* visible to the concurrent collector.
*
* See Note [Update rememembered set].
*/
void updateRemembSetPushThunkEager(Capability *cap,
const StgThunkInfoTable *info,
StgThunk *thunk)
......@@ -822,7 +832,7 @@ static MarkQueueEnt markQueuePop (MarkQueue *q)
// MarkQueueEnt encoding always places the pointer to the object to be
// marked first.
prefetchForRead(&new.mark_closure.p->header.info);
prefetchForRead(&nonmovingGetSegment_unchecked((StgPtr) new.mark_closure.p)->block_size);
prefetchForRead(Bdescr((StgPtr) new.mark_closure.p));
q->prefetch_queue[i] = new;
i = (i + 1) % MARK_PREFETCH_QUEUE_DEPTH;
}
......@@ -1266,7 +1276,7 @@ mark_closure (MarkQueue *queue, const StgClosure *p0, StgClosure **origin)
goto done;
StgClosure *snapshot_loc =
(StgClosure *) nonmovingSegmentGetBlock(seg, seg->next_free_snap);
(StgClosure *) nonmovingSegmentGetBlock(seg, nonmovingSegmentInfo(seg)->next_free_snap);
if (p >= snapshot_loc && mark == 0) {
/*
* In this case we are looking at a block that wasn't allocated
......@@ -1703,7 +1713,7 @@ bool nonmovingIsAlive (StgClosure *p)
struct NonmovingSegment *seg = nonmovingGetSegment((StgPtr) p);
nonmoving_block_idx i = nonmovingGetBlockIdx((StgPtr) p);
uint8_t mark = nonmovingGetMark(seg, i);
if (i >= seg->next_free_snap) {
if (i >= nonmovingSegmentInfo(seg)->next_free_snap) {
// If the object is allocated after next_free_snap then one of the
// following must be true:
//
......
......@@ -41,7 +41,7 @@ nonmovingSweepSegment(struct NonmovingSegment *seg)
} else if (!found_free) {
found_free = true;
seg->next_free = i;
seg->next_free_snap = i;
nonmovingSegmentInfo(seg)->next_free_snap = i;
Bdescr((P_)seg)->u.scan = (P_)nonmovingSegmentGetBlock(seg, i);
seg->bitmap[i] = 0;
} else {
......@@ -63,7 +63,7 @@ nonmovingSweepSegment(struct NonmovingSegment *seg)
return SEGMENT_FILLED;
} else {
ASSERT(seg->next_free == 0);
ASSERT(seg->next_free_snap == 0);
ASSERT(nonmovingSegmentInfo(seg)->next_free_snap == 0);
return SEGMENT_FREE;
}
}
......
......@@ -497,7 +497,7 @@ static void checkNonmovingSegments (struct NonmovingSegment *seg)
if (seg->bitmap[i] == nonmovingMarkEpoch) {
StgPtr p = nonmovingSegmentGetBlock(seg, i);
checkClosure((StgClosure *) p);
} else if (i < seg->next_free_snap){
} else if (i < nonmovingSegmentInfo(seg)->next_free_snap){
seg->bitmap[i] = 0;
}
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment