Commit 01ccdeea authored by simonmarhaskell@gmail.com's avatar simonmarhaskell@gmail.com
Browse files

FIX #2185: sparks should not be treated as roots by the GC

parent 1d0e86e6
......@@ -791,10 +791,6 @@ markSomeCapabilities (evac_fn evac, void *user, nat i0, nat delta)
"evac'ing suspended TSO %lu", (unsigned long)task->suspended_tso->id);
evac(user, (StgClosure **)(void *)&task->suspended_tso);
}
#if defined(THREADED_RTS)
markSparkQueue (evac, user, cap);
#endif
}
#if !defined(THREADED_RTS)
......@@ -804,6 +800,34 @@ markSomeCapabilities (evac_fn evac, void *user, nat i0, nat delta)
#endif
}
// Sparks are not roots for GC, so we don't mark them in
// markSomeCapabilities(). Instead, we traverse the spark queues
// after GC and throw away any that are unreachable.
void
updateCapabilitiesPostGC (void)
{
#if defined(THREADED_RTS)
nat i;
for (i = 0; i < n_capabilities; i++) {
updateSparkQueue (&capabilities[i]);
}
#endif // THREADED_RTS
}
// This function is used by the compacting GC to thread all the
// pointers from spark queues.
void
traverseSparkQueues (evac_fn evac USED_IF_THREADS, void *user USED_IF_THREADS)
{
#if defined(THREADED_RTS)
nat i;
for (i = 0; i < n_capabilities; i++) {
traverseSparkQueue (evac, user, &capabilities[i]);
}
#endif // THREADED_RTS
}
void
markCapabilities (evac_fn evac, void *user)
{
......
......@@ -238,6 +238,8 @@ void freeCapability (Capability *cap);
// FOr the GC:
void markSomeCapabilities (evac_fn evac, void *user, nat i0, nat delta);
void markCapabilities (evac_fn evac, void *user);
void updateCapabilitiesPostGC (void);
void traverseSparkQueues (evac_fn evac, void *user);
/* -----------------------------------------------------------------------------
* INLINE functions... private below here
......
......@@ -169,9 +169,9 @@ newSpark (StgRegTable *reg, StgClosure *p)
* -------------------------------------------------------------------------- */
void
markSparkQueue (evac_fn evac, void *user, Capability *cap)
updateSparkQueue (Capability *cap)
{
StgClosure **sparkp, **to_sparkp;
StgClosure *spark, **sparkp, **to_sparkp;
nat n, pruned_sparks; // stats only
StgSparkPool *pool;
......@@ -184,21 +184,15 @@ markSparkQueue (evac_fn evac, void *user, Capability *cap)
ASSERT_SPARK_POOL_INVARIANTS(pool);
#if defined(PARALLEL_HASKELL)
// stats only
n = 0;
pruned_sparks = 0;
#endif
sparkp = pool->hd;
to_sparkp = pool->hd;
while (sparkp != pool->tl) {
ASSERT(*sparkp!=NULL);
ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgClosure *)*sparkp)));
// ToDo?: statistics gathering here (also for GUM!)
if (closure_SHOULD_SPARK(*sparkp)) {
evac(user, sparkp);
*to_sparkp++ = *sparkp;
spark = isAlive(*sparkp);
if (spark != NULL && closure_SHOULD_SPARK(spark)) {
*to_sparkp++ = spark;
if (to_sparkp == pool->lim) {
to_sparkp = pool->base;
}
......@@ -215,21 +209,32 @@ markSparkQueue (evac_fn evac, void *user, Capability *cap)
PAR_TICKY_MARK_SPARK_QUEUE_END(n);
#if defined(PARALLEL_HASKELL)
debugTrace(DEBUG_sched,
"marked %d sparks and pruned %d sparks on [%x]",
n, pruned_sparks, mytid);
#else
debugTrace(DEBUG_sched,
"marked %d sparks and pruned %d sparks",
"updated %d sparks and pruned %d sparks",
n, pruned_sparks);
#endif
debugTrace(DEBUG_sched,
"new spark queue len=%d; (hd=%p; tl=%p)\n",
sparkPoolSize(pool), pool->hd, pool->tl);
}
void
traverseSparkQueue (evac_fn evac, void *user, Capability *cap)
{
StgClosure **sparkp;
StgSparkPool *pool;
pool = &(cap->r.rSparks);
sparkp = pool->hd;
while (sparkp != pool->tl) {
evac(sparkp, user);
sparkp++;
if (sparkp == pool->lim) {
sparkp = pool->base;
}
}
}
#else
StgInt
......
......@@ -9,12 +9,13 @@
#ifndef SPARKS_H
#define SPARKS_H
#if defined(PARALLEL_HASKELL) || defined(THREADED_RTS)
#if defined(THREADED_RTS)
StgClosure * findSpark (Capability *cap);
void initSparkPools (void);
void freeSparkPool (StgSparkPool *pool);
void createSparkThread (Capability *cap, StgClosure *p);
void markSparkQueue (evac_fn evac, void *user, Capability *cap);
void updateSparkQueue (Capability *cap);
void traverseSparkQueue(evac_fn evac, void *user, Capability *cap);
INLINE_HEADER void discardSparks (StgSparkPool *pool);
INLINE_HEADER nat sparkPoolSize (StgSparkPool *pool);
......@@ -25,30 +26,6 @@ INLINE_HEADER nat sparkPoolSizeCap (Capability *cap);
INLINE_HEADER rtsBool emptySparkPoolCap (Capability *cap);
#endif
#if defined(PARALLEL_HASKELL)
StgTSO *activateSpark (rtsSpark spark) ;
rtsBool add_to_spark_queue( StgClosure *closure, StgSparkPool *pool );
void markSparkQueue( void );
nat spark_queue_len( StgSparkPool *pool );
void disposeSpark( StgClosure *spark );
#endif
#if defined(GRAN)
void findLocalSpark (rtsEvent *event, rtsBool *found_res, rtsSparkQ *spark_res);
rtsBool activateSpark (rtsEvent *event, rtsSparkQ spark);
rtsSpark *newSpark(StgClosure *node, nat name, nat gran_info,
nat size_info, nat par_info, nat local);
void add_to_spark_queue(rtsSpark *spark);
rtsSpark *delete_from_sparkq (rtsSpark *spark, PEs p, rtsBool dispose_too);
void disposeSpark(rtsSpark *spark);
void disposeSparkQ(rtsSparkQ spark);
void print_spark(rtsSpark *spark);
void print_sparkq(PEs proc);
void print_sparkq_stats(void);
nat spark_queue_len(PEs proc);
void markSparkQueue(void);
#endif
/* -----------------------------------------------------------------------------
* PRIVATE below here
* -------------------------------------------------------------------------- */
......
......@@ -966,6 +966,9 @@ compact(StgClosure *static_objects)
// 1. thread the roots
markCapabilities((evac_fn)thread_root, NULL);
// spark queues
traverseSparkQueues((evac_fn)thread_root, NULL);
// the weak pointer lists...
if (weak_ptr_list != NULL) {
thread((void *)&weak_ptr_list);
......
......@@ -39,7 +39,6 @@
#include "Trace.h"
#include "RetainerProfile.h"
#include "RaiseAsync.h"
#include "Sparks.h"
#include "Papi.h"
#include "GC.h"
......@@ -377,6 +376,9 @@ GarbageCollect ( rtsBool force_major_gc )
// Update pointers from the Task list
update_task_list();
// Update pointers from capabilities (probably just the spark queues)
updateCapabilitiesPostGC();
// Now see which stable names are still alive.
gcStablePtrTable();
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment