Storage.c 29.3 KB
Newer Older
1
/* -----------------------------------------------------------------------------
2
 *
3
 * (c) The GHC Team, 1998-2008
4
5
6
 *
 * Storage manager front end
 *
7
8
9
10
11
 * Documentation on the architecture of the Storage Manager can be
 * found in the online commentary:
 * 
 *   http://hackage.haskell.org/trac/ghc/wiki/Commentary/Rts/Storage
 *
12
13
 * ---------------------------------------------------------------------------*/

14
#include "PosixSource.h"
15
#include "Rts.h"
Simon Marlow's avatar
Simon Marlow committed
16
17

#include "Storage.h"
18
19
20
21
#include "RtsUtils.h"
#include "Stats.h"
#include "BlockAlloc.h"
#include "Weak.h"
22
#include "Sanity.h"
23
#include "Arena.h"
24
#include "Capability.h"
25
#include "Schedule.h"
26
#include "RetainerProfile.h"	// for counting memory blocks (memInventory)
27
#include "OSMem.h"
Simon Marlow's avatar
Simon Marlow committed
28
#include "Trace.h"
29
#include "GC.h"
30
#include "Evac.h"
31

32
33
#include <string.h>

34
35
#include "ffi.h"

36
/* 
37
 * All these globals require sm_mutex to access in THREADED_RTS mode.
38
 */
39
StgClosure    *caf_list         = NULL;
40
41
StgClosure    *revertible_caf_list = NULL;
rtsBool       keepCAFs;
42

43
44
nat alloc_blocks_lim;    /* GC if n_large_blocks in any nursery
                          * reaches this. */
45

Simon Marlow's avatar
Simon Marlow committed
46
bdescr *exec_block;
47

48
49
50
generation *generations = NULL;	/* all the generations */
generation *g0		= NULL; /* generation 0, for convenience */
generation *oldest_gen  = NULL; /* oldest generation, for convenience */
51

52
ullong total_allocated = 0;	/* total memory allocated during run */
53

Simon Marlow's avatar
Simon Marlow committed
54
nursery *nurseries = NULL;     /* array of nurseries, size == n_capabilities */
55

56
#ifdef THREADED_RTS
57
58
59
60
/*
 * Storage manager mutex:  protects all the above state from
 * simultaneous access by two STG threads.
 */
61
Mutex sm_mutex;
62
63
#endif

Simon Marlow's avatar
Simon Marlow committed
64
static void allocNurseries ( void );
65

66
static void
Simon Marlow's avatar
Simon Marlow committed
67
initGeneration (generation *gen, int g)
68
{
Simon Marlow's avatar
Simon Marlow committed
69
70
71
72
73
74
75
76
77
78
79
80
81
    gen->no = g;
    gen->collections = 0;
    gen->par_collections = 0;
    gen->failed_promotions = 0;
    gen->max_blocks = 0;
    gen->blocks = NULL;
    gen->n_blocks = 0;
    gen->n_words = 0;
    gen->live_estimate = 0;
    gen->old_blocks = NULL;
    gen->n_old_blocks = 0;
    gen->large_objects = NULL;
    gen->n_large_blocks = 0;
82
    gen->n_new_large_blocks = 0;
Simon Marlow's avatar
Simon Marlow committed
83
84
85
86
87
88
    gen->mut_list = allocBlock();
    gen->scavenged_large_objects = NULL;
    gen->n_scavenged_large_blocks = 0;
    gen->mark = 0;
    gen->compact = 0;
    gen->bitmap = NULL;
89
#ifdef THREADED_RTS
Simon Marlow's avatar
Simon Marlow committed
90
    initSpinLock(&gen->sync_large_objects);
91
#endif
Simon Marlow's avatar
Simon Marlow committed
92
93
    gen->threads = END_TSO_QUEUE;
    gen->old_threads = END_TSO_QUEUE;
94
95
}

96
void
97
initStorage( void )
98
{
Simon Marlow's avatar
Simon Marlow committed
99
  nat g;
100

101
102
103
104
105
  if (generations != NULL) {
      // multi-init protection
      return;
  }

106
107
  initMBlocks();

sof's avatar
sof committed
108
109
110
  /* Sanity check to make sure the LOOKS_LIKE_ macros appear to be
   * doing something reasonable.
   */
Ian Lynagh's avatar
Ian Lynagh committed
111
  /* We use the NOT_NULL variant or gcc warns that the test is always true */
112
  ASSERT(LOOKS_LIKE_INFO_PTR_NOT_NULL((StgWord)&stg_BLACKHOLE_info));
sof's avatar
sof committed
113
114
115
  ASSERT(LOOKS_LIKE_CLOSURE_PTR(&stg_dummy_ret_closure));
  ASSERT(!HEAP_ALLOCED(&stg_dummy_ret_closure));
  
116
117
  if (RtsFlags.GcFlags.maxHeapSize != 0 &&
      RtsFlags.GcFlags.heapSizeSuggestion > 
118
      RtsFlags.GcFlags.maxHeapSize) {
119
    RtsFlags.GcFlags.maxHeapSize = RtsFlags.GcFlags.heapSizeSuggestion;
120
121
  }

122
123
124
  if (RtsFlags.GcFlags.maxHeapSize != 0 &&
      RtsFlags.GcFlags.minAllocAreaSize > 
      RtsFlags.GcFlags.maxHeapSize) {
125
      errorBelch("maximum heap size (-M) is smaller than minimum alloc area size (-A)");
126
      RtsFlags.GcFlags.minAllocAreaSize = RtsFlags.GcFlags.maxHeapSize;
127
128
  }

129
130
  initBlockAllocator();
  
131
#if defined(THREADED_RTS)
sof's avatar
sof committed
132
  initMutex(&sm_mutex);
sof's avatar
sof committed
133
134
#endif

135
136
  ACQUIRE_SM_LOCK;

137
138
  /* allocate generation info array */
  generations = (generation *)stgMallocBytes(RtsFlags.GcFlags.generations 
139
					     * sizeof(struct generation_),
140
141
					     "initStorage: gens");

142
  /* Initialise all generations */
143
  for(g = 0; g < RtsFlags.GcFlags.generations; g++) {
Simon Marlow's avatar
Simon Marlow committed
144
      initGeneration(&generations[g], g);
145
146
  }

147
148
149
150
  /* A couple of convenience pointers */
  g0 = &generations[0];
  oldest_gen = &generations[RtsFlags.GcFlags.generations-1];

Simon Marlow's avatar
Simon Marlow committed
151
152
  nurseries = stgMallocBytes(n_capabilities * sizeof(struct nursery_),
                             "initStorage: nurseries");
153
  
154
155
  /* Set up the destination pointers in each younger gen. step */
  for (g = 0; g < RtsFlags.GcFlags.generations-1; g++) {
Simon Marlow's avatar
Simon Marlow committed
156
      generations[g].to = &generations[g+1];
157
  }
Simon Marlow's avatar
Simon Marlow committed
158
  oldest_gen->to = oldest_gen;
159
160
  
  /* The oldest generation has one step. */
161
  if (RtsFlags.GcFlags.compact || RtsFlags.GcFlags.sweep) {
162
      if (RtsFlags.GcFlags.generations == 1) {
163
	  errorBelch("WARNING: compact/sweep is incompatible with -G1; disabled");
164
      } else {
Simon Marlow's avatar
Simon Marlow committed
165
	  oldest_gen->mark = 1;
166
          if (RtsFlags.GcFlags.compact)
Simon Marlow's avatar
Simon Marlow committed
167
              oldest_gen->compact = 1;
168
      }
169
  }
170

171
172
  generations[0].max_blocks = 0;

173
  /* The allocation area.  Policy: keep the allocation area
174
175
176
177
178
   * small to begin with, even if we have a large suggested heap
   * size.  Reason: we're going to do a major collection first, and we
   * don't want it to be a big one.  This vague idea is borne out by 
   * rigorous experimental evidence.
   */
179
  allocNurseries();
180
181
182

  weak_ptr_list = NULL;
  caf_list = NULL;
183
  revertible_caf_list = NULL;
184
185
186
187
   
  /* initialise the allocate() interface */
  alloc_blocks_lim = RtsFlags.GcFlags.minAllocAreaSize;

188
189
  exec_block = NULL;

190
191
#ifdef THREADED_RTS
  initSpinLock(&gc_alloc_block_sync);
192
  whitehole_spin = 0;
193
194
#endif

195
196
197
198
  N = 0;

  initGcThreads();

199
  IF_DEBUG(gc, statDescribeGens());
200
201

  RELEASE_SM_LOCK;
202
203
}

204
205
206
void
exitStorage (void)
{
207
    stat_exit(calcAllocated());
Simon Marlow's avatar
Simon Marlow committed
208
209
210
211
212
}

void
freeStorage (void)
{
213
    stgFree(generations);
214
    freeAllMBlocks();
215
216
217
#if defined(THREADED_RTS)
    closeMutex(&sm_mutex);
#endif
218
    stgFree(nurseries);
219
    freeGcThreads();
220
221
}

222
223
/* -----------------------------------------------------------------------------
   CAF management.
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260

   The entry code for every CAF does the following:
     
      - builds a CAF_BLACKHOLE in the heap
      - pushes an update frame pointing to the CAF_BLACKHOLE
      - invokes UPD_CAF(), which:
          - calls newCaf, below
	  - updates the CAF with a static indirection to the CAF_BLACKHOLE
      
   Why do we build a BLACKHOLE in the heap rather than just updating
   the thunk directly?  It's so that we only need one kind of update
   frame - otherwise we'd need a static version of the update frame too.

   newCaf() does the following:
       
      - it puts the CAF on the oldest generation's mut-once list.
        This is so that we can treat the CAF as a root when collecting
	younger generations.

   For GHCI, we have additional requirements when dealing with CAFs:

      - we must *retain* all dynamically-loaded CAFs ever entered,
        just in case we need them again.
      - we must be able to *revert* CAFs that have been evaluated, to
        their pre-evaluated form.

      To do this, we use an additional CAF list.  When newCaf() is
      called on a dynamically-loaded CAF, we add it to the CAF list
      instead of the old-generation mutable list, and save away its
      old info pointer (in caf->saved_info) for later reversion.

      To revert all the CAFs, we traverse the CAF list and reset the
      info pointer to caf->saved_info, then throw away the CAF list.
      (see GC.c:revertCAFs()).

      -- SDM 29/1/01

261
262
   -------------------------------------------------------------------------- */

263
264
265
void
newCAF(StgClosure* caf)
{
sof's avatar
sof committed
266
  ACQUIRE_SM_LOCK;
267

268
#ifdef DYNAMIC
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
  if(keepCAFs)
  {
    // HACK:
    // If we are in GHCi _and_ we are using dynamic libraries,
    // then we can't redirect newCAF calls to newDynCAF (see below),
    // so we make newCAF behave almost like newDynCAF.
    // The dynamic libraries might be used by both the interpreted
    // program and GHCi itself, so they must not be reverted.
    // This also means that in GHCi with dynamic libraries, CAFs are not
    // garbage collected. If this turns out to be a problem, we could
    // do another hack here and do an address range test on caf to figure
    // out whether it is from a dynamic library.
    ((StgIndStatic *)caf)->saved_info  = (StgInfoTable *)caf->header.info;
    ((StgIndStatic *)caf)->static_link = caf_list;
    caf_list = caf;
  }
  else
286
#endif
287
288
289
290
291
292
293
294
295
  {
    /* Put this CAF on the mutable list for the old generation.
    * This is a HACK - the IND_STATIC closure doesn't really have
    * a mut_link field, but we pretend it has - in fact we re-use
    * the STATIC_LINK field for the time being, because when we
    * come to do a major GC we won't need the mut_link field
    * any more and can use it as a STATIC_LINK.
    */
    ((StgIndStatic *)caf)->saved_info = NULL;
296
    recordMutableGen(caf, oldest_gen->no);
297
298
  }
  
sof's avatar
sof committed
299
  RELEASE_SM_LOCK;
300
301
}

302
303
304
305
// An alternate version of newCaf which is used for dynamically loaded
// object code in GHCi.  In this case we want to retain *all* CAFs in
// the object code, because they might be demanded at any time from an
// expression evaluated on the command line.
306
307
// Also, GHCi might want to revert CAFs, so we add these to the
// revertible_caf_list.
308
309
310
311
312
313
314
315
316
//
// The linker hackily arranges that references to newCaf from dynamic
// code end up pointing to newDynCAF.
void
newDynCAF(StgClosure *caf)
{
    ACQUIRE_SM_LOCK;

    ((StgIndStatic *)caf)->saved_info  = (StgInfoTable *)caf->header.info;
317
318
    ((StgIndStatic *)caf)->static_link = revertible_caf_list;
    revertible_caf_list = caf;
319
320
321
322

    RELEASE_SM_LOCK;
}

323
324
325
326
/* -----------------------------------------------------------------------------
   Nursery management.
   -------------------------------------------------------------------------- */

327
static bdescr *
Simon Marlow's avatar
Simon Marlow committed
328
allocNursery (bdescr *tail, nat blocks)
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
{
    bdescr *bd;
    nat i;

    // Allocate a nursery: we allocate fresh blocks one at a time and
    // cons them on to the front of the list, not forgetting to update
    // the back pointer on the tail of the list to point to the new block.
    for (i=0; i < blocks; i++) {
	// @LDV profiling
	/*
	  processNursery() in LdvProfile.c assumes that every block group in
	  the nursery contains only a single block. So, if a block group is
	  given multiple blocks, change processNursery() accordingly.
	*/
	bd = allocBlock();
	bd->link = tail;
	// double-link the nursery: we might need to insert blocks
	if (tail != NULL) {
	    tail->u.back = bd;
	}
Simon Marlow's avatar
Simon Marlow committed
349
        initBdescr(bd, g0, g0);
350
351
352
353
354
355
356
357
358
359
360
361
	bd->flags = 0;
	bd->free = bd->start;
	tail = bd;
    }
    tail->u.back = NULL;
    return tail;
}

static void
assignNurseriesToCapabilities (void)
{
    nat i;
sof's avatar
sof committed
362

Simon Marlow's avatar
Simon Marlow committed
363
    for (i = 0; i < n_capabilities; i++) {
364
365
	capabilities[i].r.rNursery        = &nurseries[i];
	capabilities[i].r.rCurrentNursery = nurseries[i].blocks;
366
	capabilities[i].r.rCurrentAlloc   = NULL;
367
    }
368
}
369

Simon Marlow's avatar
Simon Marlow committed
370
static void
371
372
373
374
allocNurseries( void )
{ 
    nat i;

Simon Marlow's avatar
Simon Marlow committed
375
    for (i = 0; i < n_capabilities; i++) {
376
	nurseries[i].blocks = 
Simon Marlow's avatar
Simon Marlow committed
377
378
379
            allocNursery(NULL, RtsFlags.GcFlags.minAllocAreaSize);
	nurseries[i].n_blocks =
            RtsFlags.GcFlags.minAllocAreaSize;
380
381
382
    }
    assignNurseriesToCapabilities();
}
383
384
385
386
      
void
resetNurseries( void )
{
387
388
    nat i;
    bdescr *bd;
389

Simon Marlow's avatar
Simon Marlow committed
390
    for (i = 0; i < n_capabilities; i++) {
Simon Marlow's avatar
Simon Marlow committed
391
	for (bd = nurseries[i].blocks; bd; bd = bd->link) {
392
393
	    bd->free = bd->start;
	    ASSERT(bd->gen_no == 0);
Simon Marlow's avatar
Simon Marlow committed
394
	    ASSERT(bd->gen == g0);
395
396
	    IF_DEBUG(sanity,memset(bd->start, 0xaa, BLOCK_SIZE));
	}
397
    }
398
    assignNurseriesToCapabilities();
399
400
}

401
402
lnat
countNurseryBlocks (void)
403
{
404
405
    nat i;
    lnat blocks = 0;
406

Simon Marlow's avatar
Simon Marlow committed
407
    for (i = 0; i < n_capabilities; i++) {
408
	blocks += nurseries[i].n_blocks;
409
    }
410
    return blocks;
411
412
}

413
static void
Simon Marlow's avatar
Simon Marlow committed
414
resizeNursery ( nursery *nursery, nat blocks )
415
416
{
  bdescr *bd;
417
  nat nursery_blocks;
418

Simon Marlow's avatar
Simon Marlow committed
419
  nursery_blocks = nursery->n_blocks;
420
  if (nursery_blocks == blocks) return;
421

422
  if (nursery_blocks < blocks) {
Simon Marlow's avatar
Simon Marlow committed
423
424
      debugTrace(DEBUG_gc, "increasing size of nursery to %d blocks", 
		 blocks);
Simon Marlow's avatar
Simon Marlow committed
425
    nursery->blocks = allocNursery(nursery->blocks, blocks-nursery_blocks);
426
427
428
429
  } 
  else {
    bdescr *next_bd;
    
Simon Marlow's avatar
Simon Marlow committed
430
431
    debugTrace(DEBUG_gc, "decreasing size of nursery to %d blocks", 
	       blocks);
432

Simon Marlow's avatar
Simon Marlow committed
433
    bd = nursery->blocks;
434
435
436
437
438
439
    while (nursery_blocks > blocks) {
	next_bd = bd->link;
	next_bd->u.back = NULL;
	nursery_blocks -= bd->blocks; // might be a large block
	freeGroup(bd);
	bd = next_bd;
440
    }
Simon Marlow's avatar
Simon Marlow committed
441
    nursery->blocks = bd;
442
443
444
    // might have gone just under, by freeing a large block, so make
    // up the difference.
    if (nursery_blocks < blocks) {
Simon Marlow's avatar
Simon Marlow committed
445
	nursery->blocks = allocNursery(nursery->blocks, blocks-nursery_blocks);
446
    }
447
448
  }
  
Simon Marlow's avatar
Simon Marlow committed
449
450
  nursery->n_blocks = blocks;
  ASSERT(countBlocks(nursery->blocks) == nursery->n_blocks);
451
}
452

453
454
455
456
// 
// Resize each of the nurseries to the specified size.
//
void
457
resizeNurseriesFixed (nat blocks)
458
459
{
    nat i;
Simon Marlow's avatar
Simon Marlow committed
460
    for (i = 0; i < n_capabilities; i++) {
461
462
	resizeNursery(&nurseries[i], blocks);
    }
463
464
}

465
466
467
468
469
470
471
472
// 
// Resize the nurseries to the total specified size.
//
void
resizeNurseries (nat blocks)
{
    // If there are multiple nurseries, then we just divide the number
    // of available blocks between them.
Simon Marlow's avatar
Simon Marlow committed
473
    resizeNurseriesFixed(blocks / n_capabilities);
474
475
}

476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491

/* -----------------------------------------------------------------------------
   move_TSO is called to update the TSO structure after it has been
   moved from one place to another.
   -------------------------------------------------------------------------- */

void
move_TSO (StgTSO *src, StgTSO *dest)
{
    ptrdiff_t diff;

    // relocate the stack pointer... 
    diff = (StgPtr)dest - (StgPtr)src; // In *words* 
    dest->sp = (StgPtr)dest->sp + diff;
}

492
/* -----------------------------------------------------------------------------
493
494
495
   split N blocks off the front of the given bdescr, returning the
   new block group.  We add the remainder to the large_blocks list
   in the same step as the original block.
496
497
   -------------------------------------------------------------------------- */

498
499
500
501
502
bdescr *
splitLargeBlock (bdescr *bd, nat blocks)
{
    bdescr *new_bd;

503
504
    ACQUIRE_SM_LOCK;

Simon Marlow's avatar
Simon Marlow committed
505
    ASSERT(countBlocks(bd->gen->large_objects) == bd->gen->n_large_blocks);
506

507
    // subtract the original number of blocks from the counter first
Simon Marlow's avatar
Simon Marlow committed
508
    bd->gen->n_large_blocks -= bd->blocks;
509
510

    new_bd = splitBlockGroup (bd, blocks);
Simon Marlow's avatar
Simon Marlow committed
511
    initBdescr(new_bd, bd->gen, bd->gen->to);
512
513
    new_bd->flags   = BF_LARGE | (bd->flags & BF_EVACUATED); 
    // if new_bd is in an old generation, we have to set BF_EVACUATED
514
    new_bd->free    = bd->free;
Simon Marlow's avatar
Simon Marlow committed
515
    dbl_link_onto(new_bd, &bd->gen->large_objects);
516

517
    ASSERT(new_bd->free <= new_bd->start + new_bd->blocks * BLOCK_SIZE_W);
518
519

    // add the new number of blocks to the counter.  Due to the gaps
520
    // for block descriptors, new_bd->blocks + bd->blocks might not be
521
    // equal to the original bd->blocks, which is why we do it this way.
Simon Marlow's avatar
Simon Marlow committed
522
    bd->gen->n_large_blocks += bd->blocks + new_bd->blocks;
523

Simon Marlow's avatar
Simon Marlow committed
524
    ASSERT(countBlocks(bd->gen->large_objects) == bd->gen->n_large_blocks);
525
526

    RELEASE_SM_LOCK;
527
528

    return new_bd;
529
}
530

531
/* -----------------------------------------------------------------------------
532
   allocate()
533
534
535
536
537
538
539
540
541
542
543
544

   This allocates memory in the current thread - it is intended for
   use primarily from STG-land where we have a Capability.  It is
   better than allocate() because it doesn't require taking the
   sm_mutex lock in the common case.

   Memory is allocated directly from the nursery if possible (but not
   from the current nursery block, so as not to interfere with
   Hp/HpLim).
   -------------------------------------------------------------------------- */

StgPtr
545
allocate (Capability *cap, lnat n)
546
547
548
549
550
{
    bdescr *bd;
    StgPtr p;

    if (n >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) {
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
	lnat req_blocks =  (lnat)BLOCK_ROUND_UP(n*sizeof(W_)) / BLOCK_SIZE;

        // Attempting to allocate an object larger than maxHeapSize
        // should definitely be disallowed.  (bug #1791)
        if (RtsFlags.GcFlags.maxHeapSize > 0 && 
            req_blocks >= RtsFlags.GcFlags.maxHeapSize) {
            heapOverflow();
            // heapOverflow() doesn't exit (see #2592), but we aren't
            // in a position to do a clean shutdown here: we
            // either have to allocate the memory or exit now.
            // Allocating the memory would be bad, because the user
            // has requested that we not exceed maxHeapSize, so we
            // just exit.
	    stg_exit(EXIT_HEAPOVERFLOW);
        }

567
        ACQUIRE_SM_LOCK
568
	bd = allocGroup(req_blocks);
Simon Marlow's avatar
Simon Marlow committed
569
570
	dbl_link_onto(bd, &g0->large_objects);
	g0->n_large_blocks += bd->blocks; // might be larger than req_blocks
571
	g0->n_new_large_blocks += bd->blocks;
572
        RELEASE_SM_LOCK;
Simon Marlow's avatar
Simon Marlow committed
573
        initBdescr(bd, g0, g0);
574
575
576
	bd->flags = BF_LARGE;
	bd->free = bd->start + n;
	return bd->start;
577
    }
578

579
    /* small allocation (<LARGE_OBJECT_THRESHOLD) */
580

581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
    TICK_ALLOC_HEAP_NOCTR(n);
    CCS_ALLOC(CCCS,n);
    
    bd = cap->r.rCurrentAlloc;
    if (bd == NULL || bd->free + n > bd->start + BLOCK_SIZE_W) {
        
        // The CurrentAlloc block is full, we need to find another
        // one.  First, we try taking the next block from the
        // nursery:
        bd = cap->r.rCurrentNursery->link;
        
        if (bd == NULL || bd->free + n > bd->start + BLOCK_SIZE_W) {
            // The nursery is empty, or the next block is already
            // full: allocate a fresh block (we can't fail here).
            ACQUIRE_SM_LOCK;
            bd = allocBlock();
            cap->r.rNursery->n_blocks++;
            RELEASE_SM_LOCK;
Simon Marlow's avatar
Simon Marlow committed
599
            initBdescr(bd, g0, g0);
600
            bd->flags = 0;
601
602
            // If we had to allocate a new block, then we'll GC
            // pretty quickly now, because MAYBE_GC() will
603
            // notice that CurrentNursery->link is NULL.
604
605
606
607
608
609
610
611
612
613
614
615
        } else {
            // we have a block in the nursery: take it and put
            // it at the *front* of the nursery list, and use it
            // to allocate() from.
            cap->r.rCurrentNursery->link = bd->link;
            if (bd->link != NULL) {
                bd->link->u.back = cap->r.rCurrentNursery;
            }
        }
        dbl_link_onto(bd, &cap->r.rNursery->blocks);
        cap->r.rCurrentAlloc = bd;
        IF_DEBUG(sanity, checkNurserySanity(cap->r.rNursery));
616
617
618
619
620
621
    }
    p = bd->free;
    bd->free += n;
    return p;
}

622
623
624
625
626
/* ---------------------------------------------------------------------------
   Allocate a fixed/pinned object.

   We allocate small pinned objects into a single block, allocating a
   new block when the current one overflows.  The block is chained
Simon Marlow's avatar
Simon Marlow committed
627
   onto the large_object_list of generation 0.
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645

   NOTE: The GC can't in general handle pinned objects.  This
   interface is only safe to use for ByteArrays, which have no
   pointers and don't require scavenging.  It works because the
   block's descriptor has the BF_LARGE flag set, so the block is
   treated as a large object and chained onto various lists, rather
   than the individual objects being copied.  However, when it comes
   to scavenge the block, the GC will only scavenge the first object.
   The reason is that the GC can't linearly scan a block of pinned
   objects at the moment (doing so would require using the
   mostly-copying techniques).  But since we're restricting ourselves
   to pinned ByteArrays, not scavenging is ok.

   This function is called by newPinnedByteArray# which immediately
   fills the allocated memory with a MutableByteArray#.
   ------------------------------------------------------------------------- */

StgPtr
646
allocatePinned (Capability *cap, lnat n)
647
648
{
    StgPtr p;
649
    bdescr *bd;
650
651
652
653

    // If the request is for a large object, then allocate()
    // will give us a pinned object anyway.
    if (n >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) {
654
	p = allocate(cap, n);
Simon Marlow's avatar
Simon Marlow committed
655
656
        Bdescr(p)->flags |= BF_PINNED;
        return p;
657
658
    }

sof's avatar
sof committed
659
660
661
    TICK_ALLOC_HEAP_NOCTR(n);
    CCS_ALLOC(CCCS,n);

662
663
    bd = cap->pinned_object_block;
    
664
665
666
    // If we don't have a block of pinned objects yet, or the current
    // one isn't large enough to hold the new object, allocate a new one.
    if (bd == NULL || (bd->free + n) > (bd->start + BLOCK_SIZE_W)) {
Simon Marlow's avatar
Simon Marlow committed
667
        ACQUIRE_SM_LOCK;
668
	cap->pinned_object_block = bd = allocBlock();
Simon Marlow's avatar
Simon Marlow committed
669
670
	dbl_link_onto(bd, &g0->large_objects);
	g0->n_large_blocks++;
671
	g0->n_new_large_blocks++;
Simon Marlow's avatar
Simon Marlow committed
672
673
        RELEASE_SM_LOCK;
        initBdescr(bd, g0, g0);
674
	bd->flags  = BF_PINNED | BF_LARGE;
675
676
677
678
679
680
681
682
	bd->free   = bd->start;
    }

    p = bd->free;
    bd->free += n;
    return p;
}

683
/* -----------------------------------------------------------------------------
684
685
686
687
   Write Barriers
   -------------------------------------------------------------------------- */

/*
688
689
690
691
   This is the write barrier for MUT_VARs, a.k.a. IORefs.  A
   MUT_VAR_CLEAN object is not on the mutable list; a MUT_VAR_DIRTY
   is.  When written to, a MUT_VAR_CLEAN turns into a MUT_VAR_DIRTY
   and is put on the mutable list.
692
*/
693
void
694
dirty_MUT_VAR(StgRegTable *reg, StgClosure *p)
695
{
696
    Capability *cap = regTableToCapability(reg);
697
    bdescr *bd;
698
699
    if (p->header.info == &stg_MUT_VAR_CLEAN_info) {
	p->header.info = &stg_MUT_VAR_DIRTY_info;
Simon Marlow's avatar
Simon Marlow committed
700
	bd = Bdescr((StgPtr)p);
701
	if (bd->gen_no > 0) recordMutableCap(p,cap,bd->gen_no);
702
703
704
    }
}

705
706
707
708
709
710
711
712
713
714
// Setting a TSO's link field with a write barrier.
// It is *not* necessary to call this function when
//    * setting the link field to END_TSO_QUEUE
//    * putting a TSO on the blackhole_queue
//    * setting the link field of the currently running TSO, as it
//      will already be dirty.
void
setTSOLink (Capability *cap, StgTSO *tso, StgTSO *target)
{
    bdescr *bd;
715
    if (tso->dirty == 0 && (tso->flags & TSO_LINK_DIRTY) == 0) {
716
717
718
719
720
721
722
723
724
725
726
        tso->flags |= TSO_LINK_DIRTY;
	bd = Bdescr((StgPtr)tso);
	if (bd->gen_no > 0) recordMutableCap((StgClosure*)tso,cap,bd->gen_no);
    }
    tso->_link = target;
}

void
dirty_TSO (Capability *cap, StgTSO *tso)
{
    bdescr *bd;
727
    if (tso->dirty == 0 && (tso->flags & TSO_LINK_DIRTY) == 0) {
728
729
730
	bd = Bdescr((StgPtr)tso);
	if (bd->gen_no > 0) recordMutableCap((StgClosure*)tso,cap,bd->gen_no);
    }
731
    tso->dirty = 1;
732
733
}

734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
/*
   This is the write barrier for MVARs.  An MVAR_CLEAN objects is not
   on the mutable list; a MVAR_DIRTY is.  When written to, a
   MVAR_CLEAN turns into a MVAR_DIRTY and is put on the mutable list.
   The check for MVAR_CLEAN is inlined at the call site for speed,
   this really does make a difference on concurrency-heavy benchmarks
   such as Chaneneos and cheap-concurrency.
*/
void
dirty_MVAR(StgRegTable *reg, StgClosure *p)
{
    Capability *cap = regTableToCapability(reg);
    bdescr *bd;
    bd = Bdescr((StgPtr)p);
    if (bd->gen_no > 0) recordMutableCap(p,cap,bd->gen_no);
}

751
/* -----------------------------------------------------------------------------
752
753
754
755
756
757
758
759
 * Stats and stuff
 * -------------------------------------------------------------------------- */

/* -----------------------------------------------------------------------------
 * calcAllocated()
 *
 * Approximate how much we've allocated: number of blocks in the
 * nursery + blocks allocated via allocate() - unused nusery blocks.
Simon Marlow's avatar
Simon Marlow committed
760
 * This leaves a little slop at the end of each block.
761
762
763
764
765
766
767
 * -------------------------------------------------------------------------- */

lnat
calcAllocated( void )
{
  nat allocated;
  bdescr *bd;
768
  nat i;
769

770
  allocated = countNurseryBlocks() * BLOCK_SIZE_W;
771
  
772
  for (i = 0; i < n_capabilities; i++) {
773
      Capability *cap;
774
      for ( bd = capabilities[i].r.rCurrentNursery->link; 
775
776
777
778
779
780
781
782
783
	    bd != NULL; bd = bd->link ) {
	  allocated -= BLOCK_SIZE_W;
      }
      cap = &capabilities[i];
      if (cap->r.rCurrentNursery->free < 
	  cap->r.rCurrentNursery->start + BLOCK_SIZE_W) {
	  allocated -= (cap->r.rCurrentNursery->start + BLOCK_SIZE_W)
	      - cap->r.rCurrentNursery->free;
      }
784
785
786
787
      if (cap->pinned_object_block != NULL) {
          allocated -= (cap->pinned_object_block->start + BLOCK_SIZE_W) - 
              cap->pinned_object_block->free;
      }
788
  }
789

790
791
  allocated += g0->n_new_large_blocks * BLOCK_SIZE_W;

792
  total_allocated += allocated;
793
794
  return allocated;
}  
795
796
797
798

/* Approximate the amount of live data in the heap.  To be called just
 * after garbage collection (see GarbageCollect()).
 */
Simon Marlow's avatar
Simon Marlow committed
799
lnat calcLiveBlocks (void)
800
{
Simon Marlow's avatar
Simon Marlow committed
801
  nat g;
802
  lnat live = 0;
Simon Marlow's avatar
Simon Marlow committed
803
  generation *gen;
804
805
806

  for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
      /* approximate amount of live data (doesn't take into account slop
807
808
       * at end of each block).
       */
Simon Marlow's avatar
Simon Marlow committed
809
810
      gen = &generations[g];
      live += gen->n_large_blocks + gen->n_blocks;
811
812
813
814
  }
  return live;
}

Simon Marlow's avatar
Simon Marlow committed
815
lnat countOccupied (bdescr *bd)
816
817
818
819
820
{
    lnat words;

    words = 0;
    for (; bd != NULL; bd = bd->link) {
821
        ASSERT(bd->free <= bd->start + bd->blocks * BLOCK_SIZE_W);
822
823
824
825
826
        words += bd->free - bd->start;
    }
    return words;
}

Simon Marlow's avatar
Simon Marlow committed
827
828
// Return an accurate count of the live data in the heap, excluding
// generation 0.
Simon Marlow's avatar
Simon Marlow committed
829
lnat calcLiveWords (void)
830
{
Simon Marlow's avatar
Simon Marlow committed
831
    nat g;
832
    lnat live;
Simon Marlow's avatar
Simon Marlow committed
833
    generation *gen;
834
835
836
    
    live = 0;
    for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
Simon Marlow's avatar
Simon Marlow committed
837
838
        gen = &generations[g];
        live += gen->n_words + countOccupied(gen->large_objects);
839
840
841
842
    }
    return live;
}

843
844
845
/* Approximate the number of blocks that will be needed at the next
 * garbage collection.
 *
Simon Marlow's avatar
Simon Marlow committed
846
847
848
 * Assume: all data currently live will remain live.  Generationss
 * that will be collected next time will therefore need twice as many
 * blocks since all the data will be copied.
849
850
851
852
 */
extern lnat 
calcNeeded(void)
{
853
    lnat needed = 0;
Simon Marlow's avatar
Simon Marlow committed
854
855
    nat g;
    generation *gen;
856
857
    
    for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
Simon Marlow's avatar
Simon Marlow committed
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
        gen = &generations[g];

        // we need at least this much space
        needed += gen->n_blocks + gen->n_large_blocks;
        
        // any additional space needed to collect this gen next time?
        if (g == 0 || // always collect gen 0
            (gen->n_blocks + gen->n_large_blocks > gen->max_blocks)) {
            // we will collect this gen next time
            if (gen->mark) {
                //  bitmap:
                needed += gen->n_blocks / BITS_IN(W_);
                //  mark stack:
                needed += gen->n_blocks / 100;
            }
            if (gen->compact) {
                continue; // no additional space needed for compaction
            } else {
                needed += gen->n_blocks;
            }
        }
879
    }
880
    return needed;
881
882
}

883
884
885
886
887
888
889
890
891
892
893
894
895
/* ----------------------------------------------------------------------------
   Executable memory

   Executable memory must be managed separately from non-executable
   memory.  Most OSs these days require you to jump through hoops to
   dynamically allocate executable memory, due to various security
   measures.

   Here we provide a small memory allocator for executable memory.
   Memory is managed with a page granularity; we allocate linearly
   in the page, and when the page is emptied (all objects on the page
   are free) we free the page again, not forgetting to make it
   non-executable.
896
897
898
899
900

   TODO: The inability to handle objects bigger than BLOCK_SIZE_W means that
         the linker cannot use allocateExec for loading object code files
         on Windows. Once allocateExec can handle larger objects, the linker
         should be modified to use allocateExec instead of VirtualAlloc.
901
902
   ------------------------------------------------------------------------- */

903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
#if defined(linux_HOST_OS)

// On Linux we need to use libffi for allocating executable memory,
// because it knows how to work around the restrictions put in place
// by SELinux.

void *allocateExec (nat bytes, void **exec_ret)
{
    void **ret, **exec;
    ACQUIRE_SM_LOCK;
    ret = ffi_closure_alloc (sizeof(void *) + (size_t)bytes, (void**)&exec);
    RELEASE_SM_LOCK;
    if (ret == NULL) return ret;
    *ret = ret; // save the address of the writable mapping, for freeExec().
    *exec_ret = exec + 1;
    return (ret + 1);
}

// freeExec gets passed the executable address, not the writable address. 
void freeExec (void *addr)
{
    void *writable;
    writable = *((void**)addr - 1);
    ACQUIRE_SM_LOCK;
    ffi_closure_free (writable);
    RELEASE_SM_LOCK
}
930

931
932
933
#else

void *allocateExec (nat bytes, void **exec_ret)
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
{
    void *ret;
    nat n;

    ACQUIRE_SM_LOCK;

    // round up to words.
    n  = (bytes + sizeof(W_) + 1) / sizeof(W_);

    if (n+1 > BLOCK_SIZE_W) {
	barf("allocateExec: can't handle large objects");
    }

    if (exec_block == NULL || 
	exec_block->free + n + 1 > exec_block->start + BLOCK_SIZE_W) {
	bdescr *bd;
	lnat pagesize = getPageSize();
	bd = allocGroup(stg_max(1, pagesize / BLOCK_SIZE));
Simon Marlow's avatar
Simon Marlow committed
952
	debugTrace(DEBUG_gc, "allocate exec block %p", bd->start);
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
	bd->gen_no = 0;
	bd->flags = BF_EXEC;
	bd->link = exec_block;
	if (exec_block != NULL) {
	    exec_block->u.back = bd;
	}
	bd->u.back = NULL;
	setExecutable(bd->start, bd->blocks * BLOCK_SIZE, rtsTrue);
	exec_block = bd;
    }
    *(exec_block->free) = n;  // store the size of this chunk
    exec_block->gen_no += n;  // gen_no stores the number of words allocated
    ret = exec_block->free + 1;
    exec_block->free += n + 1;

    RELEASE_SM_LOCK
969
    *exec_ret = ret;
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
    return ret;
}

void freeExec (void *addr)
{
    StgPtr p = (StgPtr)addr - 1;
    bdescr *bd = Bdescr((StgPtr)p);

    if ((bd->flags & BF_EXEC) == 0) {
	barf("freeExec: not executable");
    }

    if (*(StgPtr)p == 0) {
	barf("freeExec: already free?");
    }

    ACQUIRE_SM_LOCK;

    bd->gen_no -= *(StgPtr)p;
    *(StgPtr)p = 0;

991
992
993
994
995
996
997
998
999
1000
1001
    if (bd->gen_no == 0) {
        // Free the block if it is empty, but not if it is the block at
        // the head of the queue.
        if (bd != exec_block) {
            debugTrace(DEBUG_gc, "free exec block %p", bd->start);
            dbl_link_remove(bd, &exec_block);
            setExecutable(bd->start, bd->blocks * BLOCK_SIZE, rtsFalse);
            freeGroup(bd);
        } else {
            bd->free = bd->start;
        }
1002
1003
1004
1005
1006
    }

    RELEASE_SM_LOCK
}    

1007
1008
#endif /* mingw32_HOST_OS */

1009
1010
#ifdef DEBUG

1011
1012
1013
1014
1015
1016
1017
1018
1019
// handy function for use in gdb, because Bdescr() is inlined.
extern bdescr *_bdescr( StgPtr p );

bdescr *
_bdescr( StgPtr p )
{
    return Bdescr(p);
}

1020
#endif