Sparks.c 10.1 KB
Newer Older
1
/* ---------------------------------------------------------------------------
2
 *
3
 * (c) The GHC Team, 2000-2008
4
 *
thomie's avatar
thomie committed
5
 * Sparking support for THREADED_RTS version of the RTS.
6
 *
7
 -------------------------------------------------------------------------*/
8

9
#include "PosixSource.h"
10
#include "Rts.h"
Simon Marlow's avatar
Simon Marlow committed
11

12 13
#include "Schedule.h"
#include "RtsUtils.h"
Simon Marlow's avatar
Simon Marlow committed
14
#include "Trace.h"
15
#include "Prelude.h"
16
#include "Sparks.h"
17
#include "sm/HeapAlloc.h"
18

Simon Marlow's avatar
Simon Marlow committed
19
#if defined(THREADED_RTS)
20

21 22
SparkPool *
allocSparkPool( void )
23
{
24
    return newWSDeque(RtsFlags.ParFlags.maxLocalSparks);
25 26
}

Ian Lynagh's avatar
Ian Lynagh committed
27
void
28 29
freeSparkPool (SparkPool *pool)
{
30
    freeWSDeque(pool);
31
}
32

33
/* -----------------------------------------------------------------------------
34
 *
35 36 37 38 39
 * Turn a spark into a real thread
 *
 * -------------------------------------------------------------------------- */

void
40
createSparkThread (Capability *cap)
41 42 43
{
    StgTSO *tso;

44
    tso = createIOThread (cap, RtsFlags.GcFlags.initialStkSize,
45
                          (StgClosure *)runSparks_closure);
46

47
    traceEventCreateSparkThread(cap, tso->id);
48

49 50 51
    appendToRunQueue(cap,tso);
}

52 53 54 55 56
/* --------------------------------------------------------------------------
 * newSpark: create a new spark, as a result of calling "par"
 * Called directly from STG.
 * -------------------------------------------------------------------------- */

57 58 59
StgInt
newSpark (StgRegTable *reg, StgClosure *p)
{
60 61
    Capability *cap = regTableToCapability(reg);
    SparkPool *pool = cap->sparks;
62

63
    if (!fizzledSpark(p)) {
64
        if (pushWSDeque(pool,p)) {
65 66
            cap->spark_stats.created++;
            traceEventSparkCreate(cap);
67 68 69
        } else {
            /* overflowing the spark pool */
            cap->spark_stats.overflowed++;
70
            traceEventSparkOverflow(cap);
71
        }
72
    } else {
73
        cap->spark_stats.dud++;
74
        traceEventSparkDud(cap);
75
    }
76

77 78 79
    return 1;
}

80 81 82 83 84
/* --------------------------------------------------------------------------
 * Remove all sparks from the spark queues which should not spark any
 * more.  Called after GC. We assume exclusive access to the structure
 * and replace  all sparks in the queue, see explanation below. At exit,
 * the spark pool only contains sparkable closures.
85 86
 * -------------------------------------------------------------------------- */

87
void
88
pruneSparkQueue (Capability *cap)
89
{
90
    SparkPool *pool;
91
    StgClosurePtr spark, tmp, *elements;
92
    uint32_t n, pruned_sparks; // stats only
93
    StgWord botInd,oldBotInd,currInd; // indices in array (always < size)
94
    const StgInfoTable *info;
95

96 97
    n = 0;
    pruned_sparks = 0;
98

99
    pool = cap->sparks;
100

101 102 103 104 105 106
    // it is possible that top > bottom, indicating an empty pool.  We
    // fix that here; this is only necessary because the loop below
    // assumes it.
    if (pool->top > pool->bottom)
        pool->top = pool->bottom;

107 108 109 110 111 112 113
    // Take this opportunity to reset top/bottom modulo the size of
    // the array, to avoid overflow.  This is only possible because no
    // stealing is happening during GC.
    pool->bottom  -= pool->top & ~pool->moduloSize;
    pool->top     &= pool->moduloSize;
    pool->topBound = pool->top;

114
    debugTrace(DEBUG_sparks,
115
               "markSparkQueue: current spark queue len=%ld; (hd=%ld; tl=%ld)",
116 117
               sparkPoolSize(pool), pool->bottom, pool->top);

118 119 120
    ASSERT_WSDEQUE_INVARIANTS(pool);

    elements = (StgClosurePtr *)pool->elements;
121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161

    /* We have exclusive access to the structure here, so we can reset
       bottom and top counters, and prune invalid sparks. Contents are
       copied in-place if they are valuable, otherwise discarded. The
       routine uses "real" indices t and b, starts by computing them
       as the modulus size of top and bottom,

       Copying:

       At the beginning, the pool structure can look like this:
       ( bottom % size >= top % size , no wrap-around)
                  t          b
       ___________***********_________________

       or like this ( bottom % size < top % size, wrap-around )
                  b         t
       ***********__________******************
       As we need to remove useless sparks anyway, we make one pass
       between t and b, moving valuable content to b and subsequent
       cells (wrapping around when the size is reached).

                     b      t
       ***********OOO_______XX_X__X?**********
                     ^____move?____/

       After this movement, botInd becomes the new bottom, and old
       bottom becomes the new top index, both as indices in the array
       size range.
    */
    // starting here
    currInd = (pool->top) & (pool->moduloSize); // mod

    // copies of evacuated closures go to space from botInd on
    // we keep oldBotInd to know when to stop
    oldBotInd = botInd = (pool->bottom) & (pool->moduloSize); // mod

    // on entry to loop, we are within the bounds
    ASSERT( currInd < pool->size && botInd  < pool->size );

    while (currInd != oldBotInd ) {
      /* must use != here, wrap-around at size
162
         subtle: loop not entered if queue empty
163 164 165
       */

      /* check element at currInd. if valuable, evacuate and move to
166
         botInd, otherwise move on */
167 168
      spark = elements[currInd];

169 170 171
      // We have to be careful here: in the parallel GC, another
      // thread might evacuate this closure while we're looking at it,
      // so grab the info pointer just once.
172 173 174 175 176 177 178 179
      if (GET_CLOSURE_TAG(spark) != 0) {
          // Tagged pointer is a value, so the spark has fizzled.  It
          // probably never happens that we get a tagged pointer in
          // the spark pool, because we would have pruned the spark
          // during the previous GC cycle if it turned out to be
          // evaluated, but it doesn't hurt to have this check for
          // robustness.
          pruned_sparks++;
180
          cap->spark_stats.fizzled++;
181
          traceEventSparkFizzle(cap);
182 183 184 185 186 187 188 189 190 191 192
      } else {
          info = spark->header.info;
          if (IS_FORWARDING_PTR(info)) {
              tmp = (StgClosure*)UN_FORWARDING_PTR(info);
              /* if valuable work: shift inside the pool */
              if (closure_SHOULD_SPARK(tmp)) {
                  elements[botInd] = tmp; // keep entry (new address)
                  botInd++;
                  n++;
              } else {
                  pruned_sparks++; // discard spark
193
                  cap->spark_stats.fizzled++;
194
                  traceEventSparkFizzle(cap);
195
              }
Simon Marlow's avatar
Simon Marlow committed
196 197 198 199 200 201 202 203
          } else if (HEAP_ALLOCED(spark)) {
              if ((Bdescr((P_)spark)->flags & BF_EVACUATED)) {
                  if (closure_SHOULD_SPARK(spark)) {
                      elements[botInd] = spark; // keep entry (new address)
                      botInd++;
                      n++;
                  } else {
                      pruned_sparks++; // discard spark
204
                      cap->spark_stats.fizzled++;
205
                      traceEventSparkFizzle(cap);
Simon Marlow's avatar
Simon Marlow committed
206
                  }
207 208
              } else {
                  pruned_sparks++; // discard spark
209
                  cap->spark_stats.gcd++;
210
                  traceEventSparkGC(cap);
211
              }
212
          } else {
Simon Marlow's avatar
Simon Marlow committed
213
              if (INFO_PTR_TO_STRUCT(info)->type == THUNK_STATIC) {
Simon Marlow's avatar
Simon Marlow committed
214 215 216 217 218 219
                  // We can't tell whether a THUNK_STATIC is garbage or not.
                  // See also Note [STATIC_LINK fields]
                  // isAlive() also ignores static closures (see GCAux.c)
                  elements[botInd] = spark; // keep entry (new address)
                  botInd++;
                  n++;
Simon Marlow's avatar
Simon Marlow committed
220 221
              } else {
                  pruned_sparks++; // discard spark
222
                  cap->spark_stats.fizzled++;
223
                  traceEventSparkFizzle(cap);
Simon Marlow's avatar
Simon Marlow committed
224
              }
225
          }
226
      }
227

228 229 230 231 232 233 234 235 236 237 238 239 240 241
      currInd++;

      // in the loop, we may reach the bounds, and instantly wrap around
      ASSERT( currInd <= pool->size && botInd <= pool->size );
      if ( currInd == pool->size ) { currInd = 0; }
      if ( botInd == pool->size )  { botInd = 0;  }

    } // while-loop over spark pool elements

    ASSERT(currInd == oldBotInd);

    pool->top = oldBotInd; // where we started writing
    pool->topBound = pool->top;

242
    pool->bottom = (oldBotInd <= botInd) ? botInd : (botInd + pool->size);
243 244
    // first free place we did not use (corrected by wraparound)

245
    debugTrace(DEBUG_sparks, "pruned %d sparks", pruned_sparks);
246

247
    debugTrace(DEBUG_sparks,
248
               "new spark queue len=%ld; (hd=%ld; tl=%ld)",
249 250
               sparkPoolSize(pool), pool->bottom, pool->top);

251
    ASSERT_WSDEQUE_INVARIANTS(pool);
252 253
}

254 255
/* GC for the spark pool, called inside Capability.c for all
   capabilities in turn. Blindly "evac"s complete spark pool. */
256 257 258 259
void
traverseSparkQueue (evac_fn evac, void *user, Capability *cap)
{
    StgClosure **sparkp;
260 261
    SparkPool *pool;
    StgWord top,bottom, modMask;
262

263 264
    pool = cap->sparks;

265
    ASSERT_WSDEQUE_INVARIANTS(pool);
266 267 268

    top = pool->top;
    bottom = pool->bottom;
269
    sparkp = (StgClosurePtr*)pool->elements;
270 271 272 273 274 275 276
    modMask = pool->moduloSize;

    while (top < bottom) {
    /* call evac for all closures in range (wrap-around via modulo)
     * In GHC-6.10, evac takes an additional 1st argument to hold a
     * GC-specific register, see rts/sm/GC.c::mark_root()
     */
277
      evac( user , sparkp + (top & modMask) );
278
      top++;
279
    }
280

281
    debugTrace(DEBUG_sparks,
282
               "traversed spark queue, len=%ld; (hd=%ld; tl=%ld)",
283 284 285 286 287 288 289 290
               sparkPoolSize(pool), pool->bottom, pool->top);
}

/* ----------------------------------------------------------------------------
 * balanceSparkPoolsCaps: takes an array of capabilities (usually: all
 * capabilities) and its size. Accesses all spark pools and equally
 * distributes the sparks among them.
 *
291
 * Could be called after GC, before Cap. release, from scheduler.
292
 * -------------------------------------------------------------------------- */
293
void balanceSparkPoolsCaps(uint32_t n_caps, Capability caps[])
Simon Marlow's avatar
Simon Marlow committed
294
   GNUC3_ATTRIBUTE(__noreturn__);
295

296
void balanceSparkPoolsCaps(uint32_t n_caps STG_UNUSED,
297
                           Capability caps[] STG_UNUSED) {
298
  barf("not implemented");
299 300
}

301 302 303
#else

StgInt
Simon Marlow's avatar
Simon Marlow committed
304
newSpark (StgRegTable *reg STG_UNUSED, StgClosure *p STG_UNUSED)
305 306 307 308 309
{
    /* nothing */
    return 1;
}

Simon Marlow's avatar
Simon Marlow committed
310
#endif /* THREADED_RTS */