Commit 3b961883 authored by Ben Gamari's avatar Ben Gamari 🐢

Disable aging when doing deadlock detection GC

parent 7ff503fd
......@@ -164,7 +164,8 @@ static void scheduleHandleThreadBlocked( StgTSO *t );
static bool scheduleHandleThreadFinished( Capability *cap, Task *task,
StgTSO *t );
static bool scheduleNeedHeapProfile(bool ready_to_gc);
static void scheduleDoGC(Capability **pcap, Task *task, bool force_major);
static void scheduleDoGC( Capability **pcap, Task *task,
bool force_major, bool deadlock_detect );
static void deleteThread (StgTSO *tso);
static void deleteAllThreads (void);
......@@ -264,7 +265,7 @@ schedule (Capability *initialCapability, Task *task)
case SCHED_INTERRUPTING:
debugTrace(DEBUG_sched, "SCHED_INTERRUPTING");
/* scheduleDoGC() deletes all the threads */
scheduleDoGC(&cap,task,true);
scheduleDoGC(&cap,task,true,false);
// after scheduleDoGC(), we must be shutting down. Either some
// other Capability did the final GC, or we did it above,
......@@ -561,7 +562,7 @@ run_thread:
}
if (ready_to_gc || scheduleNeedHeapProfile(ready_to_gc)) {
scheduleDoGC(&cap,task,false);
scheduleDoGC(&cap,task,false,false);
}
} /* end of while() */
}
......@@ -935,7 +936,7 @@ scheduleDetectDeadlock (Capability **pcap, Task *task)
// they are unreachable and will therefore be sent an
// exception. Any threads thus released will be immediately
// runnable.
scheduleDoGC (pcap, task, true/*force major GC*/);
scheduleDoGC (pcap, task, true/*force major GC*/, true/*deadlock detection*/);
cap = *pcap;
// when force_major == true. scheduleDoGC sets
// recent_activity to ACTIVITY_DONE_GC and turns off the timer
......@@ -1005,7 +1006,7 @@ scheduleProcessInbox (Capability **pcap USED_IF_THREADS)
while (!emptyInbox(cap)) {
// Executing messages might use heap, so we should check for GC.
if (doYouWantToGC(cap)) {
scheduleDoGC(pcap, cap->running_task, false);
scheduleDoGC(pcap, cap->running_task, false, false);
cap = *pcap;
}
......@@ -1552,9 +1553,11 @@ void releaseAllCapabilities(uint32_t n, Capability *keep_cap, Task *task)
* Perform a garbage collection if necessary
* -------------------------------------------------------------------------- */
// N.B. See Note [Deadlock detection under nonmoving collector] for rationale
// behind deadlock_detect argument.
static void
scheduleDoGC (Capability **pcap, Task *task USED_IF_THREADS,
bool force_major)
bool force_major, bool deadlock_detect)
{
Capability *cap = *pcap;
bool heap_census;
......@@ -1847,9 +1850,9 @@ delete_threads_and_gc:
// emerge they don't immediately re-enter the GC.
pending_sync = 0;
signalCondition(&sync_finished_cond);
GarbageCollect(collect_gen, heap_census, gc_type, cap, idle_cap);
GarbageCollect(collect_gen, heap_census, deadlock_detect, gc_type, cap, idle_cap);
#else
GarbageCollect(collect_gen, heap_census, 0, cap, NULL);
GarbageCollect(collect_gen, heap_census, deadlock_detect, 0, cap, NULL);
#endif
// If we're shutting down, don't leave any idle GC work to do.
......@@ -2723,7 +2726,7 @@ exitScheduler (bool wait_foreign USED_IF_THREADS)
nonmovingStop();
Capability *cap = task->cap;
waitForCapability(&cap,task);
scheduleDoGC(&cap,task,true);
scheduleDoGC(&cap,task,true,false);
ASSERT(task->incall->tso == NULL);
releaseCapability(cap);
}
......@@ -2791,7 +2794,7 @@ performGC_(bool force_major)
// TODO: do we need to traceTask*() here?
waitForCapability(&cap,task);
scheduleDoGC(&cap,task,force_major);
scheduleDoGC(&cap,task,force_major,false);
releaseCapability(cap);
boundTaskExiting(task);
}
......
......@@ -85,22 +85,34 @@ alloc_for_copy (uint32_t size, uint32_t gen_no)
}
}
if (RtsFlags.GcFlags.useNonmoving && gen_no == oldest_gen->no) {
gct->copied += size;
to = nonmovingAllocate(gct->cap, size);
// Add segment to the todo list unless it's already there
// current->todo_link == NULL means not in todo list
struct NonmovingSegment *seg = nonmovingGetSegment(to);
if (!seg->todo_link) {
gen_workspace *ws = &gct->gens[oldest_gen->no];
seg->todo_link = ws->todo_seg;
ws->todo_seg = seg;
}
if (RtsFlags.GcFlags.useNonmoving) {
/* See Note [Deadlock detection under nonmoving collector]. */
if (deadlock_detect_gc)
gen_no = oldest_gen->no;
if (gen_no == oldest_gen->no) {
gct->copied += size;
to = nonmovingAllocate(gct->cap, size);
// Add segment to the todo list unless it's already there
// current->todo_link == NULL means not in todo list
struct NonmovingSegment *seg = nonmovingGetSegment(to);
if (!seg->todo_link) {
gen_workspace *ws = &gct->gens[oldest_gen->no];
seg->todo_link = ws->todo_seg;
ws->todo_seg = seg;
}
if (major_gc)
markQueuePushClosureGC(&gct->cap->upd_rem_set.queue, (StgClosure *) to);
return to;
// The object which refers to this closure may have been aged (i.e.
// retained in a younger generation). Consequently, we must add the
// closure to the mark queue to ensure that it will be marked.
//
// However, if we are in a deadlock detection GC then we disable aging
// so there is no need.
if (major_gc && !deadlock_detect_gc)
markQueuePushClosureGC(&gct->cap->upd_rem_set.queue, (StgClosure *) to);
return to;
}
}
ws = &gct->gens[gen_no]; // zero memory references here
......@@ -319,7 +331,10 @@ evacuate_large(StgPtr p)
*/
new_gen_no = bd->dest_no;
if (new_gen_no < gct->evac_gen_no) {
if (deadlock_detect_gc) {
/* See Note [Deadlock detection under nonmoving collector]. */
new_gen_no = oldest_gen->no;
} else if (new_gen_no < gct->evac_gen_no) {
if (gct->eager_promotion) {
new_gen_no = gct->evac_gen_no;
} else {
......@@ -370,7 +385,7 @@ evacuate_static_object (StgClosure **link_field, StgClosure *q)
{
if (RTS_UNLIKELY(RtsFlags.GcFlags.useNonmoving)) {
// See Note [Static objects under the nonmoving collector] in Storage.c.
if (major_gc)
if (major_gc && !deadlock_detect_gc)
markQueuePushClosureGC(&gct->cap->upd_rem_set.queue, q);
return;
}
......@@ -615,7 +630,7 @@ loop:
// NOTE: large objects in nonmoving heap are also marked with
// BF_NONMOVING. Those are moved to scavenged_large_objects list in
// mark phase.
if (major_gc)
if (major_gc && !deadlock_detect_gc)
markQueuePushClosureGC(&gct->cap->upd_rem_set.queue, q);
return;
}
......@@ -647,7 +662,7 @@ loop:
// We may have evacuated the block to the nonmoving generation. If so
// we need to make sure it is added to the mark queue since the only
// reference to it may be from the moving heap.
if (major_gc && bd->flags & BF_NONMOVING) {
if (major_gc && bd->flags & BF_NONMOVING && !deadlock_detect_gc) {
markQueuePushClosureGC(&gct->cap->upd_rem_set.queue, q);
}
return;
......@@ -661,7 +676,7 @@ loop:
// We may have evacuated the block to the nonmoving generation. If so
// we need to make sure it is added to the mark queue since the only
// reference to it may be from the moving heap.
if (major_gc && bd->flags & BF_NONMOVING) {
if (major_gc && bd->flags & BF_NONMOVING && !deadlock_detect_gc) {
markQueuePushClosureGC(&gct->cap->upd_rem_set.queue, q);
}
return;
......@@ -965,7 +980,7 @@ evacuate_BLACKHOLE(StgClosure **p)
ASSERT((bd->flags & BF_COMPACT) == 0);
if (bd->flags & BF_NONMOVING) {
if (major_gc)
if (major_gc && !deadlock_detect_gc)
markQueuePushClosureGC(&gct->cap->upd_rem_set.queue, q);
return;
}
......
......@@ -104,6 +104,7 @@
*/
uint32_t N;
bool major_gc;
bool deadlock_detect_gc;
/* Data used for allocation area sizing.
*/
......@@ -194,6 +195,7 @@ StgPtr mark_sp; // pointer to the next unallocated mark stack entry
void
GarbageCollect (uint32_t collect_gen,
const bool do_heap_census,
const bool deadlock_detect,
uint32_t gc_type USED_IF_THREADS,
Capability *cap,
bool idle_cap[])
......@@ -263,6 +265,9 @@ GarbageCollect (uint32_t collect_gen,
N = collect_gen;
major_gc = (N == RtsFlags.GcFlags.generations-1);
/* See Note [Deadlock detection under nonmoving collector]. */
deadlock_detect_gc = deadlock_detect;
/* N.B. The nonmoving collector works a bit differently. See
* Note [Static objects under the nonmoving collector].
*/
......
......@@ -17,9 +17,12 @@
#include "HeapAlloc.h"
void GarbageCollect (uint32_t force_major_gc,
void GarbageCollect (uint32_t collect_gen,
bool do_heap_census,
uint32_t gc_type, Capability *cap, bool idle_cap[]);
bool deadlock_detect,
uint32_t gc_type,
Capability *cap,
bool idle_cap[]);
typedef void (*evac_fn)(void *user, StgClosure **root);
......@@ -30,6 +33,8 @@ bool doIdleGCWork(Capability *cap, bool all);
extern uint32_t N;
extern bool major_gc;
/* See Note [Deadlock detection under nonmoving collector]. */
extern bool deadlock_detect_gc;
extern bdescr *mark_stack_bd;
extern bdescr *mark_stack_top_bd;
......
......@@ -382,6 +382,11 @@ push (MarkQueue *q, const MarkQueueEnt *ent)
void
markQueuePushClosureGC (MarkQueue *q, StgClosure *p)
{
/* We should not make it here if we are doing a deadlock detect GC.
* See Note [Deadlock detection under nonmoving collector].
*/
ASSERT(!deadlock_detect_gc);
// Are we at the end of the block?
if (q->top->head == MARK_QUEUE_BLOCK_ENTRIES) {
// Yes, this block is full.
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment