Commit f9ce05ef authored by Simon Marlow's avatar Simon Marlow
Browse files

Make sparks into weak pointers (#2185)

The new strategies library (parallel-2.0+, preferably 2.2+) is now
required for parallel programming, otherwise parallelism will be lost.
parent 6c016f38
...@@ -819,7 +819,7 @@ freeCapabilities (void) ...@@ -819,7 +819,7 @@ freeCapabilities (void)
void void
markSomeCapabilities (evac_fn evac, void *user, nat i0, nat delta, markSomeCapabilities (evac_fn evac, void *user, nat i0, nat delta,
rtsBool prune_sparks USED_IF_THREADS) rtsBool no_mark_sparks USED_IF_THREADS)
{ {
nat i; nat i;
Capability *cap; Capability *cap;
...@@ -843,9 +843,7 @@ markSomeCapabilities (evac_fn evac, void *user, nat i0, nat delta, ...@@ -843,9 +843,7 @@ markSomeCapabilities (evac_fn evac, void *user, nat i0, nat delta,
} }
#if defined(THREADED_RTS) #if defined(THREADED_RTS)
if (prune_sparks) { if (!no_mark_sparks) {
pruneSparkQueue (evac, user, cap);
} else {
traverseSparkQueue (evac, user, cap); traverseSparkQueue (evac, user, cap);
} }
#endif #endif
......
...@@ -276,7 +276,7 @@ void freeCapabilities (void); ...@@ -276,7 +276,7 @@ void freeCapabilities (void);
// For the GC: // For the GC:
void markSomeCapabilities (evac_fn evac, void *user, nat i0, nat delta, void markSomeCapabilities (evac_fn evac, void *user, nat i0, nat delta,
rtsBool prune_sparks); rtsBool no_mark_sparks);
void markCapabilities (evac_fn evac, void *user); void markCapabilities (evac_fn evac, void *user);
void traverseSparkQueues (evac_fn evac, void *user); void traverseSparkQueues (evac_fn evac, void *user);
......
...@@ -112,7 +112,7 @@ tryStealSpark (Capability *cap) ...@@ -112,7 +112,7 @@ tryStealSpark (Capability *cap)
* -------------------------------------------------------------------------- */ * -------------------------------------------------------------------------- */
void void
pruneSparkQueue (evac_fn evac, void *user, Capability *cap) pruneSparkQueue (Capability *cap)
{ {
SparkPool *pool; SparkPool *pool;
StgClosurePtr spark, tmp, *elements; StgClosurePtr spark, tmp, *elements;
...@@ -208,17 +208,21 @@ pruneSparkQueue (evac_fn evac, void *user, Capability *cap) ...@@ -208,17 +208,21 @@ pruneSparkQueue (evac_fn evac, void *user, Capability *cap)
pruned_sparks++; // discard spark pruned_sparks++; // discard spark
cap->sparks_pruned++; cap->sparks_pruned++;
} }
} else { } else if (HEAP_ALLOCED(spark) &&
if (!(closure_flags[INFO_PTR_TO_STRUCT(info)->type] & _NS)) { (Bdescr(spark)->flags & BF_EVACUATED)) {
if (closure_SHOULD_SPARK(spark)) {
elements[botInd] = spark; // keep entry (new address) elements[botInd] = spark; // keep entry (new address)
evac (user, &elements[botInd]);
botInd++; botInd++;
n++; n++;
} else { } else {
pruned_sparks++; // discard spark pruned_sparks++; // discard spark
cap->sparks_pruned++; cap->sparks_pruned++;
} }
} else {
pruned_sparks++; // discard spark
cap->sparks_pruned++;
} }
currInd++; currInd++;
// in the loop, we may reach the bounds, and instantly wrap around // in the loop, we may reach the bounds, and instantly wrap around
......
...@@ -34,7 +34,7 @@ StgClosure * tryStealSpark (Capability *cap); ...@@ -34,7 +34,7 @@ StgClosure * tryStealSpark (Capability *cap);
void freeSparkPool (SparkPool *pool); void freeSparkPool (SparkPool *pool);
void createSparkThread (Capability *cap); void createSparkThread (Capability *cap);
void traverseSparkQueue(evac_fn evac, void *user, Capability *cap); void traverseSparkQueue(evac_fn evac, void *user, Capability *cap);
void pruneSparkQueue (evac_fn evac, void *user, Capability *cap); void pruneSparkQueue (Capability *cap);
INLINE_HEADER void discardSparks (SparkPool *pool); INLINE_HEADER void discardSparks (SparkPool *pool);
INLINE_HEADER long sparkPoolSize (SparkPool *pool); INLINE_HEADER long sparkPoolSize (SparkPool *pool);
......
...@@ -411,6 +411,16 @@ SET_GCT(gc_threads[0]); ...@@ -411,6 +411,16 @@ SET_GCT(gc_threads[0]);
// Now see which stable names are still alive. // Now see which stable names are still alive.
gcStablePtrTable(); gcStablePtrTable();
#ifdef THREADED_RTS
if (n_gc_threads == 1) {
for (n = 0; n < n_capabilities; n++) {
pruneSparkQueue(&capabilities[n]);
}
} else {
pruneSparkQueue(&capabilities[gct->thread_index]);
}
#endif
#ifdef PROFILING #ifdef PROFILING
// We call processHeapClosureForDead() on every closure destroyed during // We call processHeapClosureForDead() on every closure destroyed during
// the current garbage collection, so we invoke LdvCensusForDead(). // the current garbage collection, so we invoke LdvCensusForDead().
...@@ -1072,6 +1082,16 @@ gcWorkerThread (Capability *cap) ...@@ -1072,6 +1082,16 @@ gcWorkerThread (Capability *cap)
scavenge_until_all_done(); scavenge_until_all_done();
#ifdef THREADED_RTS
// Now that the whole heap is marked, we discard any sparks that
// were found to be unreachable. The main GC thread is currently
// marking heap reachable via weak pointers, so it is
// non-deterministic whether a spark will be retained if it is
// only reachable via weak pointers. To fix this problem would
// require another GC barrier, which is too high a price.
pruneSparkQueue(cap);
#endif
#ifdef USE_PAPI #ifdef USE_PAPI
// count events in this thread towards the GC totals // count events in this thread towards the GC totals
papi_thread_stop_gc1_count(gct->papi_events); papi_thread_stop_gc1_count(gct->papi_events);
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment