Commit 17a59602 authored by Simon Marlow's avatar Simon Marlow
Browse files

traverse the spark pools only once during GC rather than twice

parent 99df892c
......@@ -821,7 +821,8 @@ freeCapability (Capability *cap) {
------------------------------------------------------------------------ */
void
markSomeCapabilities (evac_fn evac, void *user, nat i0, nat delta)
markSomeCapabilities (evac_fn evac, void *user, nat i0, nat delta,
rtsBool prune_sparks USED_IF_THREADS)
{
nat i;
Capability *cap;
......@@ -848,7 +849,11 @@ markSomeCapabilities (evac_fn evac, void *user, nat i0, nat delta)
}
#if defined(THREADED_RTS)
traverseSparkQueue (evac, user, cap);
if (prune_sparks) {
pruneSparkQueue (evac, user, cap);
} else {
traverseSparkQueue (evac, user, cap);
}
#endif
}
......@@ -859,22 +864,8 @@ markSomeCapabilities (evac_fn evac, void *user, nat i0, nat delta)
#endif
}
// This function is used by the compacting GC to thread all the
// pointers from spark queues.
void
traverseSparkQueues (evac_fn evac USED_IF_THREADS, void *user USED_IF_THREADS)
{
#if defined(THREADED_RTS)
nat i;
for (i = 0; i < n_capabilities; i++) {
traverseSparkQueue (evac, user, &capabilities[i]);
}
#endif // THREADED_RTS
}
void
markCapabilities (evac_fn evac, void *user)
{
markSomeCapabilities(evac, user, 0, 1);
markSomeCapabilities(evac, user, 0, 1, rtsFalse);
}
......@@ -264,7 +264,8 @@ void setContextSwitches(void);
void freeCapability (Capability *cap);
// FOr the GC:
void markSomeCapabilities (evac_fn evac, void *user, nat i0, nat delta);
void markSomeCapabilities (evac_fn evac, void *user, nat i0, nat delta,
rtsBool prune_sparks);
void markCapabilities (evac_fn evac, void *user);
void traverseSparkQueues (evac_fn evac, void *user);
......
......@@ -371,13 +371,14 @@ newSpark (StgRegTable *reg, StgClosure *p)
* the spark pool only contains sparkable closures.
* -------------------------------------------------------------------------- */
static void
pruneSparkQueue (Capability *cap)
void
pruneSparkQueue (evac_fn evac, void *user, Capability *cap)
{
SparkPool *pool;
StgClosurePtr spark, *elements;
StgClosurePtr spark, tmp, *elements;
nat n, pruned_sparks; // stats only
StgWord botInd,oldBotInd,currInd; // indices in array (always < size)
const StgInfoTable *info;
PAR_TICKY_MARK_SPARK_QUEUE_START();
......@@ -440,14 +441,31 @@ pruneSparkQueue (Capability *cap)
botInd, otherwise move on */
spark = elements[currInd];
/* if valuable work: shift inside the pool */
if ( closure_SHOULD_SPARK(spark) ) {
elements[botInd] = spark; // keep entry (new address)
botInd++;
n++;
} else {
pruned_sparks++; // discard spark
cap->sparks_pruned++;
// We have to be careful here: in the parallel GC, another
// thread might evacuate this closure while we're looking at it,
// so grab the info pointer just once.
info = spark->header.info;
if (IS_FORWARDING_PTR(info)) {
tmp = (StgClosure*)UN_FORWARDING_PTR(info);
/* if valuable work: shift inside the pool */
if (closure_SHOULD_SPARK(tmp)) {
elements[botInd] = tmp; // keep entry (new address)
botInd++;
n++;
} else {
pruned_sparks++; // discard spark
cap->sparks_pruned++;
}
} else {
if (!(closure_flags[INFO_PTR_TO_STRUCT(info)->type] & _NS)) {
elements[botInd] = spark; // keep entry (new address)
evac (user, &elements[botInd]);
botInd++;
n++;
} else {
pruned_sparks++; // discard spark
cap->sparks_pruned++;
}
}
currInd++;
......@@ -477,15 +495,6 @@ pruneSparkQueue (Capability *cap)
ASSERT_SPARK_POOL_INVARIANTS(pool);
}
void
pruneSparkQueues (void)
{
nat i;
for (i = 0; i < n_capabilities; i++) {
pruneSparkQueue(&capabilities[i]);
}
}
/* GC for the spark pool, called inside Capability.c for all
capabilities in turn. Blindly "evac"s complete spark pool. */
void
......
......@@ -76,8 +76,8 @@ rtsBool looksEmpty(SparkPool* deque);
StgClosure * tryStealSpark (SparkPool *pool);
void freeSparkPool (SparkPool *pool);
void createSparkThread (Capability *cap, StgClosure *p);
void pruneSparkQueues (void);
void traverseSparkQueue(evac_fn evac, void *user, Capability *cap);
void pruneSparkQueue (evac_fn evac, void *user, Capability *cap);
INLINE_HEADER void discardSparks (SparkPool *pool);
INLINE_HEADER nat sparkPoolSize (SparkPool *pool);
......
......@@ -335,7 +335,8 @@ GarbageCollect ( rtsBool force_major_gc )
// follow all the roots that the application knows about.
gct->evac_step = 0;
markSomeCapabilities(mark_root, gct, gct->thread_index, n_gc_threads);
markSomeCapabilities(mark_root, gct, gct->thread_index, n_gc_threads,
rtsTrue/*prune sparks*/);
#if defined(RTS_USER_SIGNALS)
// mark the signal handlers (signals should be already blocked)
......@@ -724,11 +725,6 @@ GarbageCollect ( rtsBool force_major_gc )
// Update the stable pointer hash table.
updateStablePtrTable(major_gc);
// Remove useless sparks from the spark pools
#ifdef THREADED_RTS
pruneSparkQueues();
#endif
// check sanity after GC
IF_DEBUG(sanity, checkSanity());
......@@ -1009,7 +1005,8 @@ gc_thread_work (void)
// Every thread evacuates some roots.
gct->evac_step = 0;
markSomeCapabilities(mark_root, gct, gct->thread_index, n_gc_threads);
markSomeCapabilities(mark_root, gct, gct->thread_index, n_gc_threads,
rtsTrue/*prune sparks*/);
scavenge_until_all_done();
}
......
......@@ -32,8 +32,6 @@ extern long copied;
extern nat mutlist_MUTVARS, mutlist_MUTARRS, mutlist_MVARS, mutlist_OTHERS;
#endif
extern void markSomeCapabilities (evac_fn evac, void *user, nat i0, nat delta);
#ifdef THREADED_RTS
extern SpinLock gc_alloc_block_sync;
#endif
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment