GC.c 75.6 KB
Newer Older
1
/* -----------------------------------------------------------------------------
2
 * $Id: GC.c,v 1.41 1999/02/24 17:24:07 simonm Exp $
3
 *
4
5
6
 * (c) The GHC Team 1998-1999
 *
 * Generational garbage collector
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
 *
 * ---------------------------------------------------------------------------*/

#include "Rts.h"
#include "RtsFlags.h"
#include "RtsUtils.h"
#include "Storage.h"
#include "StoragePriv.h"
#include "Stats.h"
#include "Schedule.h"
#include "SchedAPI.h" /* for ReverCAFs prototype */
#include "Sanity.h"
#include "GC.h"
#include "BlockAlloc.h"
#include "Main.h"
#include "DebugProf.h"
#include "SchedAPI.h"
#include "Weak.h"
25
#include "StablePriv.h"
26
27
28
29
30

StgCAF* enteredCAFs;

/* STATIC OBJECT LIST.
 *
31
 * During GC:
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
 * We maintain a linked list of static objects that are still live.
 * The requirements for this list are:
 *
 *  - we need to scan the list while adding to it, in order to
 *    scavenge all the static objects (in the same way that
 *    breadth-first scavenging works for dynamic objects).
 *
 *  - we need to be able to tell whether an object is already on
 *    the list, to break loops.
 *
 * Each static object has a "static link field", which we use for
 * linking objects on to the list.  We use a stack-type list, consing
 * objects on the front as they are added (this means that the
 * scavenge phase is depth-first, not breadth-first, but that
 * shouldn't matter).  
 *
 * A separate list is kept for objects that have been scavenged
 * already - this is so that we can zero all the marks afterwards.
 *
 * An object is on the list if its static link field is non-zero; this
 * means that we have to mark the end of the list with '1', not NULL.  
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
 *
 * Extra notes for generational GC:
 *
 * Each generation has a static object list associated with it.  When
 * collecting generations up to N, we treat the static object lists
 * from generations > N as roots.
 *
 * We build up a static object list while collecting generations 0..N,
 * which is then appended to the static object list of generation N+1.
 */
StgClosure* static_objects;	      /* live static objects */
StgClosure* scavenged_static_objects; /* static objects scavenged so far */

/* N is the oldest generation being collected, where the generations
 * are numbered starting at 0.  A major GC (indicated by the major_gc
 * flag) is when we're collecting all generations.  We only attempt to
 * deal with static objects and GC CAFs when doing a major GC.
 */
static nat N;
static rtsBool major_gc;

/* Youngest generation that objects should be evacuated to in
 * evacuate().  (Logically an argument to evacuate, but it's static
 * a lot of the time so we optimise it into a global variable).
77
 */
78
static nat evac_gen;
79
80
81
82
83
84

/* WEAK POINTERS
 */
static StgWeak *old_weak_ptr_list; /* also pending finaliser list */
static rtsBool weak_done;	/* all done for this pass */

85
86
/* Flag indicating failure to evacuate an object to the desired
 * generation.
87
 */
88
static rtsBool failed_to_evac;
89

90
91
92
93
/* Old to-space (used for two-space collector only)
 */
bdescr *old_to_space;

94
95
96
97
98
/* Data used for allocation area sizing.
 */
lnat new_blocks;		/* blocks allocated during this GC */
lnat g0s0_pcnt_kept = 30;	/* percentage of g0s0 live at last minor GC */

99
100
101
102
103
104
105
/* -----------------------------------------------------------------------------
   Static function declarations
   -------------------------------------------------------------------------- */

static StgClosure *evacuate(StgClosure *q);
static void    zeroStaticObjectList(StgClosure* first_static);
static rtsBool traverse_weak_ptr_list(void);
106
static void    zeroMutableList(StgMutClosure *first);
107
108
static void    revertDeadCAFs(void);

109
110
111
112
static void           scavenge_stack(StgPtr p, StgPtr stack_end);
static void           scavenge_large(step *step);
static void           scavenge(step *step);
static void           scavenge_static(void);
113
114
static void           scavenge_mutable_list(generation *g);
static void           scavenge_mut_once_list(generation *g);
115

116
117
118
119
120
121
122
#ifdef DEBUG
static void gcCAFs(void);
#endif

/* -----------------------------------------------------------------------------
   GarbageCollect

123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
   For garbage collecting generation N (and all younger generations):

     - follow all pointers in the root set.  the root set includes all 
       mutable objects in all steps in all generations.

     - for each pointer, evacuate the object it points to into either
       + to-space in the next higher step in that generation, if one exists,
       + if the object's generation == N, then evacuate it to the next
         generation if one exists, or else to-space in the current
	 generation.
       + if the object's generation < N, then evacuate it to to-space
         in the next generation.

     - repeatedly scavenge to-space from each step in each generation
       being collected until no more objects can be evacuated.
      
     - free from-space in each step, and set from-space = to-space.

141
142
143
144
   -------------------------------------------------------------------------- */

void GarbageCollect(void (*get_roots)(void))
{
145
146
  bdescr *bd;
  step *step;
147
  lnat live, allocated, collected = 0, copied = 0;
148
149
  nat g, s;

150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
#ifdef PROFILING
  CostCentreStack *prev_CCS;
#endif

  /* tell the stats department that we've started a GC */
  stat_startGC();

  /* attribute any costs to CCS_GC */
#ifdef PROFILING
  prev_CCS = CCCS;
  CCCS = CCS_GC;
#endif

  /* We might have been called from Haskell land by _ccall_GC, in
   * which case we need to call threadPaused() because the scheduler
   * won't have done it.
   */
167
  if (CurrentTSO) { threadPaused(CurrentTSO); }
168
169
170
171
172
173
174
175
176
177

  /* Approximate how much we allocated: number of blocks in the
   * nursery + blocks allocated via allocate() - unused nusery blocks.
   * This leaves a little slop at the end of each block, and doesn't
   * take into account large objects (ToDo).
   */
  allocated = (nursery_blocks * BLOCK_SIZE_W) + allocated_bytes();
  for ( bd = current_nursery->link; bd != NULL; bd = bd->link ) {
    allocated -= BLOCK_SIZE_W;
  }
178
179
180

  /* Figure out which generation to collect
   */
181
  N = 0;
182
183
184
185
186
187
188
  for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
    if (generations[g].steps[0].n_blocks >= generations[g].max_blocks) {
      N = g;
    }
  }
  major_gc = (N == RtsFlags.GcFlags.generations-1);

189
190
191
192
  /* check stack sanity *before* GC (ToDo: check all threads) */
  /*IF_DEBUG(sanity, checkTSO(MainTSO,0)); */
  IF_DEBUG(sanity, checkFreeListSanity());

193
194
  /* Initialise the static object lists
   */
195
196
197
  static_objects = END_OF_STATIC_LIST;
  scavenged_static_objects = END_OF_STATIC_LIST;

198
199
200
201
  /* zero the mutable list for the oldest generation (see comment by
   * zeroMutableList below).
   */
  if (major_gc) { 
202
    zeroMutableList(generations[RtsFlags.GcFlags.generations-1].mut_once_list);
203
204
  }

205
206
207
208
209
210
211
  /* Save the old to-space if we're doing a two-space collection
   */
  if (RtsFlags.GcFlags.generations == 1) {
    old_to_space = g0s0->to_space;
    g0s0->to_space = NULL;
  }

212
213
214
215
216
  /* Keep a count of how many new blocks we allocated during this GC
   * (used for resizing the allocation area, later).
   */
  new_blocks = 0;

217
218
219
220
  /* Initialise to-space in all the generations/steps that we're
   * collecting.
   */
  for (g = 0; g <= N; g++) {
221
    generations[g].mut_once_list = END_MUT_LIST;
222
223
224
    generations[g].mut_list = END_MUT_LIST;

    for (s = 0; s < generations[g].n_steps; s++) {
225

226
      /* generation 0, step 0 doesn't need to-space */
227
228
229
230
      if (g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1) { 
	continue; 
      }

231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
      /* Get a free block for to-space.  Extra blocks will be chained on
       * as necessary.
       */
      bd = allocBlock();
      step = &generations[g].steps[s];
      ASSERT(step->gen->no == g);
      ASSERT(step->hp ? Bdescr(step->hp)->step == step : rtsTrue);
      bd->gen  = &generations[g];
      bd->step = step;
      bd->link = NULL;
      bd->evacuated = 1;	/* it's a to-space block */
      step->hp        = bd->start;
      step->hpLim     = step->hp + BLOCK_SIZE_W;
      step->hp_bd     = bd;
      step->to_space  = bd;
246
      step->to_blocks = 1;
247
248
249
250
      step->scan      = bd->start;
      step->scan_bd   = bd;
      step->new_large_objects = NULL;
      step->scavenged_large_objects = NULL;
251
      new_blocks++;
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
      /* mark the large objects as not evacuated yet */
      for (bd = step->large_objects; bd; bd = bd->link) {
	bd->evacuated = 0;
      }
    }
  }

  /* make sure the older generations have at least one block to
   * allocate into (this makes things easier for copy(), see below.
   */
  for (g = N+1; g < RtsFlags.GcFlags.generations; g++) {
    for (s = 0; s < generations[g].n_steps; s++) {
      step = &generations[g].steps[s];
      if (step->hp_bd == NULL) {
	bd = allocBlock();
	bd->gen = &generations[g];
	bd->step = step;
	bd->link = NULL;
	bd->evacuated = 0;	/* *not* a to-space block */
	step->hp = bd->start;
	step->hpLim = step->hp + BLOCK_SIZE_W;
	step->hp_bd = bd;
	step->blocks = bd;
	step->n_blocks = 1;
276
	new_blocks++;
277
278
279
280
281
282
283
284
285
286
287
      }
      /* Set the scan pointer for older generations: remember we
       * still have to scavenge objects that have been promoted. */
      step->scan = step->hp;
      step->scan_bd = step->hp_bd;
      step->to_space = NULL;
      step->to_blocks = 0;
      step->new_large_objects = NULL;
      step->scavenged_large_objects = NULL;
    }
  }
288

289
  /* -----------------------------------------------------------------------
290
   * follow all the roots that we know about:
291
292
293
294
295
296
297
298
299
300
301
   *   - mutable lists from each generation > N
   * we want to *scavenge* these roots, not evacuate them: they're not
   * going to move in this GC.
   * Also: do them in reverse generation order.  This is because we
   * often want to promote objects that are pointed to by older
   * generations early, so we don't have to repeatedly copy them.
   * Doing the generations in reverse order ensures that we don't end
   * up in the situation where we want to evac an object to gen 3 and
   * it has already been evaced to gen 2.
   */
  { 
302
303
    int st;
    for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
304
      generations[g].saved_mut_list = generations[g].mut_list;
305
      generations[g].mut_list = END_MUT_LIST;
306
    }
307

308
309
    /* Do the mut-once lists first */
    for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
310
311
312
313
314
      scavenge_mut_once_list(&generations[g]);
      evac_gen = g;
      for (st = generations[g].n_steps-1; st >= 0; st--) {
	scavenge(&generations[g].steps[st]);
      }
315
316
    }

317
    for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
318
319
320
321
      scavenge_mutable_list(&generations[g]);
      evac_gen = g;
      for (st = generations[g].n_steps-1; st >= 0; st--) {
	scavenge(&generations[g].steps[st]);
322
323
      }
    }
324
325
326
327
328
329
330
  }

  /* follow all the roots that the application knows about.
   */
  evac_gen = 0;
  get_roots();

331
332
333
334
335
336
337
338
339
340
341
342
343
344
  /* And don't forget to mark the TSO if we got here direct from
   * Haskell! */
  if (CurrentTSO) {
    CurrentTSO = (StgTSO *)MarkRoot((StgClosure *)CurrentTSO);
  }

  /* Mark the weak pointer list, and prepare to detect dead weak
   * pointers.
   */
  markWeakList();
  old_weak_ptr_list = weak_ptr_list;
  weak_ptr_list = NULL;
  weak_done = rtsFalse;

345
346
347
348
  /* Mark the stable pointer table.
   */
  markStablePtrTable(major_gc);

349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
#ifdef INTERPRETER
  { 
      /* ToDo: To fix the caf leak, we need to make the commented out
       * parts of this code do something sensible - as described in 
       * the CAF document.
       */
      extern void markHugsObjects(void);
#if 0
      /* ToDo: This (undefined) function should contain the scavenge
       * loop immediately below this block of code - but I'm not sure
       * enough of the details to do this myself.
       */
      scavengeEverything();
      /* revert dead CAFs and update enteredCAFs list */
      revertDeadCAFs();
#endif      
      markHugsObjects();
#if 0
      /* This will keep the CAFs and the attached BCOs alive 
       * but the values will have been reverted
       */
      scavengeEverything();
#endif
  }
#endif

375
376
377
  /* -------------------------------------------------------------------------
   * Repeatedly scavenge all the areas we know about until there's no
   * more scavenging to be done.
378
379
   */
  { 
380
    rtsBool flag;
381
  loop:
382
383
384
385
    flag = rtsFalse;

    /* scavenge static objects */
    if (major_gc && static_objects != END_OF_STATIC_LIST) {
386
387
      scavenge_static();
    }
388
389
390
391
392
393
394
395
396
397
398
399

    /* When scavenging the older generations:  Objects may have been
     * evacuated from generations <= N into older generations, and we
     * need to scavenge these objects.  We're going to try to ensure that
     * any evacuations that occur move the objects into at least the
     * same generation as the object being scavenged, otherwise we
     * have to create new entries on the mutable list for the older
     * generation.
     */

    /* scavenge each step in generations 0..maxgen */
    { 
400
401
      int gen, st; 
    loop2:
402
      for (gen = RtsFlags.GcFlags.generations-1; gen >= 0; gen--) {
403
	for (st = generations[gen].n_steps-1; st >= 0 ; st--) {
404
	  if (gen == 0 && st == 0 && RtsFlags.GcFlags.generations > 1) { 
405
406
	    continue; 
	  }
407
	  step = &generations[gen].steps[st];
408
409
410
411
	  evac_gen = gen;
	  if (step->hp_bd != step->scan_bd || step->scan < step->hp) {
	    scavenge(step);
	    flag = rtsTrue;
412
	    goto loop2;
413
414
415
416
	  }
	  if (step->new_large_objects != NULL) {
	    scavenge_large(step);
	    flag = rtsTrue;
417
	    goto loop2;
418
419
420
	  }
	}
      }
421
    }
422
423
    if (flag) { goto loop; }

424
425
426
427
428
429
    /* must be last... */
    if (traverse_weak_ptr_list()) { /* returns rtsTrue if evaced something */
      goto loop;
    }
  }

430
431
432
433
  /* Now see which stable names are still alive
   */
  gcStablePtrTable(major_gc);

434
435
436
437
438
439
440
441
  /* Set the maximum blocks for the oldest generation, based on twice
   * the amount of live data now, adjusted to fit the maximum heap
   * size if necessary.  
   *
   * This is an approximation, since in the worst case we'll need
   * twice the amount of live data plus whatever space the other
   * generations need.
   */
442
443
444
445
446
447
448
449
450
451
452
  if (RtsFlags.GcFlags.generations > 1) {
    if (major_gc) {
      oldest_gen->max_blocks = 
	stg_max(oldest_gen->steps[0].to_blocks * RtsFlags.GcFlags.oldGenFactor,
		RtsFlags.GcFlags.minOldGenSize);
      if (oldest_gen->max_blocks > RtsFlags.GcFlags.maxHeapSize / 2) {
	oldest_gen->max_blocks = RtsFlags.GcFlags.maxHeapSize / 2;
	if (((int)oldest_gen->max_blocks - 
	     (int)oldest_gen->steps[0].to_blocks) < 
	    (RtsFlags.GcFlags.pcFreeHeap *
	     RtsFlags.GcFlags.maxHeapSize / 200)) {
453
	  heapOverflow();
454
455
456
	}
      }
    }
457
  }
458

459
460
  /* run through all the generations/steps and tidy up 
   */
461
  copied = new_blocks * BLOCK_SIZE_W;
462
  for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
463
464
465
466
467

    if (g <= N) {
      generations[g].collections++; /* for stats */
    }

468
469
470
471
    for (s = 0; s < generations[g].n_steps; s++) {
      bdescr *next;
      step = &generations[g].steps[s];

472
      if (!(g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1)) {
473
474
475
	/* Tidy the end of the to-space chains */
	step->hp_bd->free = step->hp;
	step->hp_bd->link = NULL;
476
477
478
479
480
	/* stats information: how much we copied */
	if (g <= N) {
	  copied -= step->hp_bd->start + BLOCK_SIZE_W -
	    step->hp_bd->free;
	}
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
      }

      /* for generations we collected... */
      if (g <= N) {

	collected += step->n_blocks * BLOCK_SIZE_W; /* for stats */

	/* free old memory and shift to-space into from-space for all
	 * the collected steps (except the allocation area).  These
	 * freed blocks will probaby be quickly recycled.
	 */
	if (!(g == 0 && s == 0)) {
	  freeChain(step->blocks);
	  step->blocks = step->to_space;
	  step->n_blocks = step->to_blocks;
	  step->to_space = NULL;
	  step->to_blocks = 0;
	  for (bd = step->blocks; bd != NULL; bd = bd->link) {
	    bd->evacuated = 0;	/* now from-space */
	  }
	}

	/* LARGE OBJECTS.  The current live large objects are chained on
	 * scavenged_large, having been moved during garbage
	 * collection from large_objects.  Any objects left on
	 * large_objects list are therefore dead, so we free them here.
	 */
	for (bd = step->large_objects; bd != NULL; bd = next) {
	  next = bd->link;
	  freeGroup(bd);
	  bd = next;
	}
	for (bd = step->scavenged_large_objects; bd != NULL; bd = bd->link) {
	  bd->evacuated = 0;
	}
	step->large_objects = step->scavenged_large_objects;

518
519
520
521
	/* Set the maximum blocks for this generation, interpolating
	 * between the maximum size of the oldest and youngest
	 * generations.
	 *
522
523
524
	 * max_blocks =    oldgen_max_blocks * G
	 *                 ----------------------
	 *                      oldest_gen
525
526
	 */
	if (g != 0) {
527
#if 0
528
529
	  generations[g].max_blocks = (oldest_gen->max_blocks * g)
	       / (RtsFlags.GcFlags.generations-1);
530
531
#endif
	  generations[g].max_blocks = oldest_gen->max_blocks;
532
	}
533

534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
      /* for older generations... */
      } else {
	
	/* For older generations, we need to append the
	 * scavenged_large_object list (i.e. large objects that have been
	 * promoted during this GC) to the large_object list for that step.
	 */
	for (bd = step->scavenged_large_objects; bd; bd = next) {
	  next = bd->link;
	  bd->evacuated = 0;
	  dbl_link_onto(bd, &step->large_objects);
	}

	/* add the new blocks we promoted during this GC */
	step->n_blocks += step->to_blocks;
      }
    }
  }
552
  
553
554
555
  /* Guess the amount of live data for stats. */
  live = calcLive();

556
557
558
559
560
561
562
563
  /* Free the small objects allocated via allocate(), since this will
   * all have been copied into G0S1 now.  
   */
  if (small_alloc_list != NULL) {
    freeChain(small_alloc_list);
  }
  small_alloc_list = NULL;
  alloc_blocks = 0;
564
565
  alloc_Hp = NULL;
  alloc_HpLim = NULL;
566
567
  alloc_blocks_lim = RtsFlags.GcFlags.minAllocAreaSize;

568
569
570
571
  /* Two-space collector:
   * Free the old to-space, and estimate the amount of live data.
   */
  if (RtsFlags.GcFlags.generations == 1) {
572
573
    nat blocks;
    
574
575
576
    if (old_to_space != NULL) {
      freeChain(old_to_space);
    }
577
578
579
    for (bd = g0s0->to_space; bd != NULL; bd = bd->link) {
      bd->evacuated = 0;	/* now from-space */
    }
580

581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
    /* For a two-space collector, we need to resize the nursery. */
    
    /* set up a new nursery.  Allocate a nursery size based on a
     * function of the amount of live data (currently a factor of 2,
     * should be configurable (ToDo)).  Use the blocks from the old
     * nursery if possible, freeing up any left over blocks.
     *
     * If we get near the maximum heap size, then adjust our nursery
     * size accordingly.  If the nursery is the same size as the live
     * data (L), then we need 3L bytes.  We can reduce the size of the
     * nursery to bring the required memory down near 2L bytes.
     * 
     * A normal 2-space collector would need 4L bytes to give the same
     * performance we get from 3L bytes, reducing to the same
     * performance at 2L bytes.  
     */
597
    blocks = g0s0->to_blocks;
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617

    if ( blocks * RtsFlags.GcFlags.oldGenFactor * 2 > 
	 RtsFlags.GcFlags.maxHeapSize ) {
      int adjusted_blocks;  /* signed on purpose */
      int pc_free; 
      
      adjusted_blocks = (RtsFlags.GcFlags.maxHeapSize - 2 * blocks);
      IF_DEBUG(gc, fprintf(stderr, "Near maximum heap size of 0x%x blocks, blocks = %d, adjusted to %d\n", RtsFlags.GcFlags.maxHeapSize, blocks, adjusted_blocks));
      pc_free = adjusted_blocks * 100 / RtsFlags.GcFlags.maxHeapSize;
      if (pc_free < RtsFlags.GcFlags.pcFreeHeap) /* might even be < 0 */ {
	heapOverflow();
      }
      blocks = adjusted_blocks;
      
    } else {
      blocks *= RtsFlags.GcFlags.oldGenFactor;
      if (blocks < RtsFlags.GcFlags.minAllocAreaSize) {
	blocks = RtsFlags.GcFlags.minAllocAreaSize;
      }
    }
618
    resizeNursery(blocks);
619
    
620
  } else {
621
    /* Generational collector:
622
623
     * If the user has given us a suggested heap size, adjust our
     * allocation area to make best use of the memory available.
624
625
626
     */

    if (RtsFlags.GcFlags.heapSizeSuggestion) {
627
628
629
630
      int blocks;
      nat needed = calcNeeded(); 	/* approx blocks needed at next GC */

      /* Guess how much will be live in generation 0 step 0 next time.
631
632
       * A good approximation is the obtained by finding the
       * percentage of g0s0 that was live at the last minor GC.
633
       */
634
635
      if (N == 0) {
	g0s0_pcnt_kept = (new_blocks * 100) / g0s0->n_blocks;
636
637
      }

638
639
640
641
642
643
644
645
646
647
648
      /* Estimate a size for the allocation area based on the
       * information available.  We might end up going slightly under
       * or over the suggested heap size, but we should be pretty
       * close on average.
       *
       * Formula:            suggested - needed
       *                ----------------------------
       *                    1 + g0s0_pcnt_kept/100
       *
       * where 'needed' is the amount of memory needed at the next
       * collection for collecting all steps except g0s0.
649
       */
650
651
652
      blocks = 
	(((int)RtsFlags.GcFlags.heapSizeSuggestion - (int)needed) * 100) /
	(100 + (int)g0s0_pcnt_kept);
653
654
655
      
      if (blocks < (int)RtsFlags.GcFlags.minAllocAreaSize) {
	blocks = RtsFlags.GcFlags.minAllocAreaSize;
656
      }
657
658
      
      resizeNursery((nat)blocks);
659
    }
660
661
  }

662
663
664
665
666
  /* revert dead CAFs and update enteredCAFs list */
  revertDeadCAFs();
  
  /* mark the garbage collected CAFs as dead */
#ifdef DEBUG
667
  if (major_gc) { gcCAFs(); }
668
669
#endif
  
670
671
672
673
  /* zero the scavenged static object list */
  if (major_gc) {
    zeroStaticObjectList(scavenged_static_objects);
  }
674

675
  /* Reset the nursery
676
   */
677
678
679
680
  for (bd = g0s0->blocks; bd; bd = bd->link) {
    bd->free = bd->start;
    ASSERT(bd->gen == g0);
    ASSERT(bd->step == g0s0);
681
    IF_DEBUG(sanity,memset(bd->start, 0xaa, BLOCK_SIZE));
682
683
684
  }
  current_nursery = g0s0->blocks;

685
686
  /* start any pending finalizers */
  scheduleFinalizers(old_weak_ptr_list);
687
  
688
  /* check sanity after GC */
689
  IF_DEBUG(sanity, checkSanity(N));
690

691
  /* extra GC trace info */
692
  IF_DEBUG(gc, stat_describe_gens());
693
694

#ifdef DEBUG
695
696
  /* symbol-table based profiling */
  /*  heapCensus(to_space); */ /* ToDo */
697
698
699
700
701
702
703
#endif

  /* restore enclosing cost centre */
#ifdef PROFILING
  CCCS = prev_CCS;
#endif

704
705
706
  /* check for memory leaks if sanity checking is on */
  IF_DEBUG(sanity, memInventory());

707
  /* ok, GC over: tell the stats department what happened. */
708
  stat_endGC(allocated, collected, live, copied, N);
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
}

/* -----------------------------------------------------------------------------
   Weak Pointers

   traverse_weak_ptr_list is called possibly many times during garbage
   collection.  It returns a flag indicating whether it did any work
   (i.e. called evacuate on any live pointers).

   Invariant: traverse_weak_ptr_list is called when the heap is in an
   idempotent state.  That means that there are no pending
   evacuate/scavenge operations.  This invariant helps the weak
   pointer code decide which weak pointers are dead - if there are no
   new live weak pointers, then all the currently unreachable ones are
   dead.
724

725
   For generational GC: we just don't try to finalize weak pointers in
726
727
728
   older generations than the one we're collecting.  This could
   probably be optimised by keeping per-generation lists of weak
   pointers, but for a few weak pointers this scheme will work.
729
730
731
732
733
734
   -------------------------------------------------------------------------- */

static rtsBool 
traverse_weak_ptr_list(void)
{
  StgWeak *w, **last_w, *next_w;
735
  StgClosure *new;
736
737
738
739
  rtsBool flag = rtsFalse;

  if (weak_done) { return rtsFalse; }

740
  /* doesn't matter where we evacuate values/finalizers to, since
741
742
743
744
   * these pointers are treated as roots (iff the keys are alive).
   */
  evac_gen = 0;

745
746
747
  last_w = &old_weak_ptr_list;
  for (w = old_weak_ptr_list; w; w = next_w) {

748
749
    if ((new = isAlive(w->key))) {
      w->key = new;
750
      /* evacuate the value and finalizer */
751
      w->value = evacuate(w->value);
752
      w->finalizer = evacuate(w->finalizer);
753
754
755
756
757
758
759
      /* remove this weak ptr from the old_weak_ptr list */
      *last_w = w->link;
      /* and put it on the new weak ptr list */
      next_w  = w->link;
      w->link = weak_ptr_list;
      weak_ptr_list = w;
      flag = rtsTrue;
760
761
762
763
      IF_DEBUG(weak, fprintf(stderr,"Weak pointer still alive at %p -> %p\n", w, w->key));
      continue;
    }
    else {
764
765
      last_w = &(w->link);
      next_w = w->link;
766
      continue;
767
768
769
770
771
    }
  }
  
  /* If we didn't make any changes, then we can go round and kill all
   * the dead weak pointers.  The old_weak_ptr list is used as a list
772
   * of pending finalizers later on.
773
774
775
776
   */
  if (flag == rtsFalse) {
    for (w = old_weak_ptr_list; w; w = w->link) {
      w->value = evacuate(w->value);
777
      w->finalizer = evacuate(w->finalizer);
778
779
780
781
782
783
784
    }
    weak_done = rtsTrue;
  }

  return rtsTrue;
}

785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
/* -----------------------------------------------------------------------------
   isAlive determines whether the given closure is still alive (after
   a garbage collection) or not.  It returns the new address of the
   closure if it is alive, or NULL otherwise.
   -------------------------------------------------------------------------- */

StgClosure *
isAlive(StgClosure *p)
{
  StgInfoTable *info;

  while (1) {

    info = get_itbl(p);

    /* ToDo: for static closures, check the static link field.
     * Problem here is that we sometimes don't set the link field, eg.
     * for static closures with an empty SRT or CONSTR_STATIC_NOCAFs.
     */

    /* ignore closures in generations that we're not collecting. */
    if (LOOKS_LIKE_STATIC(p) || Bdescr((P_)p)->gen->no > N) {
      return p;
    }
    
    switch (info->type) {
      
    case IND:
    case IND_STATIC:
    case IND_PERM:
    case IND_OLDGEN:		/* rely on compatible layout with StgInd */
    case IND_OLDGEN_PERM:
      /* follow indirections */
      p = ((StgInd *)p)->indirectee;
      continue;
      
    case EVACUATED:
      /* alive! */
      return ((StgEvacuated *)p)->evacuee;

    default:
      /* dead. */
      return NULL;
    }
  }
}

832
833
StgClosure *
MarkRoot(StgClosure *root)
834
{
835
  return evacuate(root);
836
837
}

838
static void addBlock(step *step)
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
{
  bdescr *bd = allocBlock();
  bd->gen = step->gen;
  bd->step = step;

  if (step->gen->no <= N) {
    bd->evacuated = 1;
  } else {
    bd->evacuated = 0;
  }

  step->hp_bd->free = step->hp;
  step->hp_bd->link = bd;
  step->hp = bd->start;
  step->hpLim = step->hp + BLOCK_SIZE_W;
  step->hp_bd = bd;
  step->to_blocks++;
856
  new_blocks++;
857
858
859
}

static __inline__ StgClosure *
860
copy(StgClosure *src, nat size, step *step)
861
862
863
{
  P_ to, from, dest;

864
  TICK_GC_WORDS_COPIED(size);
865
866
867
868
869
870
  /* Find out where we're going, using the handy "to" pointer in 
   * the step of the source object.  If it turns out we need to
   * evacuate to an older generation, adjust it here (see comment
   * by evacuate()).
   */
  if (step->gen->no < evac_gen) {
871
872
873
#ifdef NO_EAGER_PROMOTION    
    failed_to_evac = rtsTrue;
#else
874
    step = &generations[evac_gen].steps[0];
875
#endif
876
877
878
879
880
881
882
  }

  /* chain a new block onto the to-space for the destination step if
   * necessary.
   */
  if (step->hp + size >= step->hpLim) {
    addBlock(step);
883
884
  }

885
  for(to = step->hp, from = (P_)src; size>0; --size) {
886
887
    *to++ = *from++;
  }
888
889
890

  dest = step->hp;
  step->hp = to;
891
892
893
  return (StgClosure *)dest;
}

894
895
896
897
898
899
/* Special version of copy() for when we only want to copy the info
 * pointer of an object, but reserve some padding after it.  This is
 * used to optimise evacuation of BLACKHOLEs.
 */

static __inline__ StgClosure *
900
copyPart(StgClosure *src, nat size_to_reserve, nat size_to_copy, step *step)
901
902
903
{
  P_ dest, to, from;

904
  TICK_GC_WORDS_COPIED(size_to_copy);
905
  if (step->gen->no < evac_gen) {
906
907
908
#ifdef NO_EAGER_PROMOTION    
    failed_to_evac = rtsTrue;
#else
909
    step = &generations[evac_gen].steps[0];
910
#endif
911
912
913
914
915
916
  }

  if (step->hp + size_to_reserve >= step->hpLim) {
    addBlock(step);
  }

917
  for(to = step->hp, from = (P_)src; size_to_copy>0; --size_to_copy) {
918
919
920
    *to++ = *from++;
  }
  
921
922
  dest = step->hp;
  step->hp += size_to_reserve;
923
924
925
  return (StgClosure *)dest;
}

926
927
static __inline__ void 
upd_evacuee(StgClosure *p, StgClosure *dest)
928
929
930
931
932
933
934
935
936
937
938
939
940
{
  StgEvacuated *q = (StgEvacuated *)p;

  SET_INFO(q,&EVACUATED_info);
  q->evacuee = dest;
}

/* -----------------------------------------------------------------------------
   Evacuate a large object

   This just consists of removing the object from the (doubly-linked)
   large_alloc_list, and linking it on to the (singly-linked)
   new_large_objects list, from where it will be scavenged later.
941
942
943

   Convention: bd->evacuated is /= 0 for a large object that has been
   evacuated, or 0 otherwise.
944
945
   -------------------------------------------------------------------------- */

946
947
static inline void
evacuate_large(StgPtr p, rtsBool mutable)
948
949
{
  bdescr *bd = Bdescr(p);
950
  step *step;
951
952
953
954
955

  /* should point to the beginning of the block */
  ASSERT(((W_)p & BLOCK_MASK) == 0);
  
  /* already evacuated? */
956
957
958
959
960
961
  if (bd->evacuated) { 
    /* Don't forget to set the failed_to_evac flag if we didn't get
     * the desired destination (see comments in evacuate()).
     */
    if (bd->gen->no < evac_gen) {
      failed_to_evac = rtsTrue;
962
      TICK_GC_FAILED_PROMOTION();
963
    }
964
965
    return;
  }
966

967
968
  step = bd->step;
  /* remove from large_object list */
969
970
971
  if (bd->back) {
    bd->back->link = bd->link;
  } else { /* first object in the list */
972
    step->large_objects = bd->link;
973
974
975
976
977
  }
  if (bd->link) {
    bd->link->back = bd->back;
  }
  
978
979
980
981
  /* link it on to the evacuated large object list of the destination step
   */
  step = bd->step->to;
  if (step->gen->no < evac_gen) {
982
983
984
#ifdef NO_EAGER_PROMOTION    
    failed_to_evac = rtsTrue;
#else
985
    step = &generations[evac_gen].steps[0];
986
#endif
987
988
989
990
991
992
993
994
995
  }

  bd->step = step;
  bd->gen = step->gen;
  bd->link = step->new_large_objects;
  step->new_large_objects = bd;
  bd->evacuated = 1;

  if (mutable) {
996
    recordMutable((StgMutClosure *)p);
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
  }
}

/* -----------------------------------------------------------------------------
   Adding a MUT_CONS to an older generation.

   This is necessary from time to time when we end up with an
   old-to-new generation pointer in a non-mutable object.  We defer
   the promotion until the next GC.
   -------------------------------------------------------------------------- */

static StgClosure *
mkMutCons(StgClosure *ptr, generation *gen)
{
  StgMutVar *q;
  step *step;

  step = &gen->steps[0];

  /* chain a new block onto the to-space for the destination step if
   * necessary.
   */
  if (step->hp + sizeofW(StgIndOldGen) >= step->hpLim) {
    addBlock(step);
  }

  q = (StgMutVar *)step->hp;
  step->hp += sizeofW(StgMutVar);

  SET_HDR(q,&MUT_CONS_info,CCS_GC);
  q->var = ptr;
1028
  recordOldToNewPtrs((StgMutClosure *)q);
1029
1030
1031

  return (StgClosure *)q;
}
1032
1033
1034
1035
1036

/* -----------------------------------------------------------------------------
   Evacuate

   This is called (eventually) for every live object in the system.
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055

   The caller to evacuate specifies a desired generation in the
   evac_gen global variable.  The following conditions apply to
   evacuating an object which resides in generation M when we're
   collecting up to generation N

   if  M >= evac_gen 
           if  M > N     do nothing
	   else          evac to step->to

   if  M < evac_gen      evac to evac_gen, step 0

   if the object is already evacuated, then we check which generation
   it now resides in.

   if  M >= evac_gen     do nothing
   if  M <  evac_gen     set failed_to_evac flag to indicate that we
                         didn't manage to evacuate this object into evac_gen.

1056
1057
   -------------------------------------------------------------------------- */

1058
1059
1060

static StgClosure *
evacuate(StgClosure *q)
1061
1062
{
  StgClosure *to;
1063
  bdescr *bd = NULL;
1064
  step *step;
1065
1066
1067
  const StgInfoTable *info;

loop:
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
  if (!LOOKS_LIKE_STATIC(q)) {
    bd = Bdescr((P_)q);
    if (bd->gen->no > N) {
      /* Can't evacuate this object, because it's in a generation
       * older than the ones we're collecting.  Let's hope that it's
       * in evac_gen or older, or we will have to make an IND_OLDGEN object.
       */
      if (bd->gen->no < evac_gen) {
	/* nope */
	failed_to_evac = rtsTrue;
1078
	TICK_GC_FAILED_PROMOTION();
1079
1080
1081
      }
      return q;
    }
1082
    step = bd->step->to;
1083
1084
  }

1085
1086
1087
1088
1089
1090
1091
1092
  /* make sure the info pointer is into text space */
  ASSERT(q && (LOOKS_LIKE_GHC_INFO(GET_INFO(q))
	       || IS_HUGS_CONSTR_INFO(GET_INFO(q))));

  info = get_itbl(q);
  switch (info -> type) {

  case BCO:
1093
    to = copy(q,bco_sizeW(stgCast(StgBCO*,q)),step);
1094
1095
1096
    upd_evacuee(q,to);
    return to;

1097
  case MUT_VAR:
1098
    ASSERT(q->header.info != &MUT_CONS_info);
1099
  case MVAR:
1100
    to = copy(q,sizeW_fromITBL(info),step);
1101
    upd_evacuee(q,to);
1102
    recordMutable((StgMutClosure *)to);
1103
1104
    return to;

1105
1106
  case STABLE_NAME:
    stable_ptr_table[((StgStableName *)q)->sn].keep = rtsTrue;
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
    to = copy(q,sizeofW(StgStableName),step);
    upd_evacuee(q,to);
    return to;

  case FUN_1_0:
  case FUN_0_1:
  case CONSTR_1_0:
  case CONSTR_0_1:
    to = copy(q,sizeofW(StgHeader)+1,step);
    upd_evacuee(q,to);
    return to;

  case THUNK_1_0:		/* here because of MIN_UPD_SIZE */
  case THUNK_0_1:
  case FUN_1_1:
  case FUN_0_2:
  case FUN_2_0:
  case THUNK_1_1:
  case THUNK_0_2:
  case THUNK_2_0:
  case CONSTR_1_1:
  case CONSTR_0_2:
  case CONSTR_2_0:
    to = copy(q,sizeofW(StgHeader)+2,step);
1131
1132
1133
    upd_evacuee(q,to);
    return to;

1134
1135
1136
1137
1138
1139
1140
1141
1142
  case FUN:
  case THUNK:
  case CONSTR:
  case IND_PERM:
  case IND_OLDGEN_PERM:
  case CAF_UNENTERED:
  case CAF_ENTERED:
  case WEAK:
  case FOREIGN:
1143
    to = copy(q,sizeW_fromITBL(info),step);
1144
1145
1146
1147
1148
    upd_evacuee(q,to);
    return to;

  case CAF_BLACKHOLE:
  case BLACKHOLE:
1149
    to = copyPart(q,BLACKHOLE_sizeW(),sizeofW(StgHeader),step);
1150
1151
1152
    upd_evacuee(q,to);
    return to;

1153
  case BLACKHOLE_BQ:
1154
    to = copy(q,BLACKHOLE_sizeW(),step); 
1155
    upd_evacuee(q,to);
1156
    recordMutable((StgMutClosure *)to);
1157
1158
1159
1160
1161
    return to;

  case THUNK_SELECTOR:
    {
      const StgInfoTable* selectee_info;
1162
      StgClosure* selectee = ((StgSelector*)q)->selectee;
1163
1164
1165
1166
1167

    selector_loop:
      selectee_info = get_itbl(selectee);
      switch (selectee_info->type) {
      case CONSTR:
1168
1169
1170
1171
1172
      case CONSTR_1_0:
      case CONSTR_0_1:
      case CONSTR_2_0:
      case CONSTR_1_1:
      case CONSTR_0_2:
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
      case CONSTR_STATIC:
	{ 
	  StgNat32 offset = info->layout.selector_offset;

	  /* check that the size is in range */
	  ASSERT(offset < 
		 (StgNat32)(selectee_info->layout.payload.ptrs + 
		            selectee_info->layout.payload.nptrs));

	  /* perform the selection! */
	  q = selectee->payload[offset];

	  /* if we're already in to-space, there's no need to continue
	   * with the evacuation, just update the source address with
	   * a pointer to the (evacuated) constructor field.
	   */
1189
1190
1191
1192
1193
	  if (IS_USER_PTR(q)) {
	    bdescr *bd = Bdescr((P_)q);
	    if (bd->evacuated) {
	      if (bd->gen->no < evac_gen) {
		failed_to_evac = rtsTrue;
1194
		TICK_GC_FAILED_PROMOTION();
1195
1196
1197
	      }
	      return q;
	    }
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
	  }

	  /* otherwise, carry on and evacuate this constructor field,
	   * (but not the constructor itself)
	   */
	  goto loop;
	}

      case IND:
      case IND_STATIC:
      case IND_PERM:
      case IND_OLDGEN:
      case IND_OLDGEN_PERM:
	selectee = stgCast(StgInd *,selectee)->indirectee;
	goto selector_loop;

      case CAF_ENTERED:
	selectee = stgCast(StgCAF *,selectee)->value;
	goto selector_loop;

      case EVACUATED:
	selectee = stgCast(StgEvacuated*,selectee)->evacuee;
	goto selector_loop;

      case THUNK:
1223
1224
1225
1226
1227
      case THUNK_1_0:
      case THUNK_0_1:
      case THUNK_2_0:
      case THUNK_1_1:
      case THUNK_0_2:
1228
1229
1230
1231
1232
1233
      case THUNK_STATIC:
      case THUNK_SELECTOR:
	/* aargh - do recursively???? */
      case CAF_UNENTERED:
      case CAF_BLACKHOLE:
      case BLACKHOLE:
1234
      case BLACKHOLE_BQ:
1235
1236
1237
1238
1239
1240
1241
	/* not evaluated yet */
	break;

      default:
	barf("evacuate: THUNK_SELECTOR: strange selectee");
      }
    }
1242
    to = copy(q,THUNK_SELECTOR_sizeW(),step);
1243
1244
1245
1246
1247
1248
    upd_evacuee(q,to);
    return to;

  case IND:
  case IND_OLDGEN:
    /* follow chains of indirections, don't evacuate them */
1249
    q = ((StgInd*)q)->indirectee;
1250
1251
    goto loop;

1252
1253
1254
1255
1256
    /* ToDo: optimise STATIC_LINK for known cases.
       - FUN_STATIC       : payload[0]
       - THUNK_STATIC     : payload[1]
       - IND_STATIC       : payload[1]
    */
1257
1258
  case THUNK_STATIC:
  case FUN_STATIC:
1259
1260
1261
1262
1263
    if (info->srt_len == 0) {	/* small optimisation */
      return q;
    }
    /* fall through */
  case CONSTR_STATIC:
1264
1265
1266
1267
1268
1269
1270
  case IND_STATIC:
    /* don't want to evacuate these, but we do want to follow pointers
     * from SRTs  - see scavenge_static.
     */

    /* put the object on the static list, if necessary.
     */
1271
    if (major_gc && STATIC_LINK(info,(StgClosure *)q) == NULL) {
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
      STATIC_LINK(info,(StgClosure *)q) = static_objects;
      static_objects = (StgClosure *)q;
    }
    /* fall through */

  case CONSTR_INTLIKE:
  case CONSTR_CHARLIKE:
  case CONSTR_NOCAF_STATIC:
    /* no need to put these on the static linked list, they don't need
     * to be scavenged.
     */
    return q;

  case RET_BCO:
  case RET_SMALL:
  case RET_VEC_SMALL:
  case RET_BIG:
  case RET_VEC_BIG:
  case RET_DYN:
  case UPDATE_FRAME:
  case STOP_FRAME:
  case CATCH_FRAME:
  case SEQ_FRAME:
    /* shouldn't see these */
    barf("evacuate: stack frame\n");

  case AP_UPD:
  case PAP:
    /* these are special - the payload is a copy of a chunk of stack,
       tagging and all. */
1302
    to = copy(q,pap_sizeW(stgCast(StgPAP*,q)),step);
1303
1304
1305
1306
    upd_evacuee(q,to);
    return to;

  case EVACUATED:
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
    /* Already evacuated, just return the forwarding address.
     * HOWEVER: if the requested destination generation (evac_gen) is
     * older than the actual generation (because the object was
     * already evacuated to a younger generation) then we have to
     * set the failed_to_evac flag to indicate that we couldn't 
     * manage to promote the object to the desired generation.
     */
    if (evac_gen > 0) {		/* optimisation */
      StgClosure *p = ((StgEvacuated*)q)->evacuee;
      if (Bdescr((P_)p)->gen->no < evac_gen) {
	/*	fprintf(stderr,"evac failed!\n");*/
	failed_to_evac = rtsTrue;
1319
	TICK_GC_FAILED_PROMOTION();
1320
      }
1321
1322
    }
    return ((StgEvacuated*)q)->evacuee;
1323
1324
1325
1326
1327
1328

  case ARR_WORDS:
    {
      nat size = arr_words_sizeW(stgCast(StgArrWords*,q)); 

      if (size >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) {
1329
	evacuate_large((P_)q, rtsFalse);
1330
1331
1332
	return q;
      } else {
	/* just copy the block */
1333
	to = copy(q,size,step);
1334
1335
1336
1337
1338
	upd_evacuee(q,to);
	return to;
      }
    }

1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
  case MUT_ARR_PTRS:
  case MUT_ARR_PTRS_FROZEN:
    {
      nat size = mut_arr_ptrs_sizeW(stgCast(StgMutArrPtrs*,q)); 

      if (size >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) {
	evacuate_large((P_)q, info->type == MUT_ARR_PTRS);
	to = q;
      } else {
	/* just copy the block */
1349
	to = copy(q,size,step);
1350
1351
	upd_evacuee(q,to);
	if (info->type == MUT_ARR_PTRS) {
1352
	  recordMutable((StgMutClosure *)to);
1353
1354
1355
1356
1357
	}
      }
      return to;
    }

1358
1359
1360
1361
1362
1363
1364
1365
1366
  case TSO:
    {
      StgTSO *tso = stgCast(StgTSO *,q);
      nat size = tso_sizeW(tso);
      int diff;

      /* Large TSOs don't get moved, so no relocation is required.
       */
      if (size >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) {
1367
	evacuate_large((P_)q, rtsTrue);
1368
1369
1370
1371
1372
1373
	return q;

      /* To evacuate a small TSO, we need to relocate the update frame
       * list it contains.  
       */
      } else {
1374
	StgTSO *new_tso = (StgTSO *)copy((StgClosure *)tso,tso_sizeW(tso),step);
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384

	diff = (StgPtr)new_tso - (StgPtr)tso; /* In *words* */

	/* relocate the stack pointers... */
	new_tso->su = (StgUpdateFrame *) ((StgPtr)new_tso->su + diff);
	new_tso->sp = (StgPtr)new_tso->sp + diff;
	new_tso->splim = (StgPtr)new_tso->splim + diff;
	
	relocate_TSO(tso, new_tso);
	upd_evacuee(q,(StgClosure *)new_tso);
1385

1386
	recordMutable((StgMutClosure *)new_tso);
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
	return (StgClosure *)new_tso;
      }
    }

  case BLOCKED_FETCH:
  case FETCH_ME:
    fprintf(stderr,"evacuate: unimplemented/strange closure type\n");
    return q;

  default:
    barf("evacuate: strange closure type");
  }

  barf("evacuate");
}

/* -----------------------------------------------------------------------------
   relocate_TSO is called just after a TSO has been copied from src to
   dest.  It adjusts the update frame list for the new location.
   -------------------------------------------------------------------------- */

StgTSO *
relocate_TSO(StgTSO *src, StgTSO *dest)
{
  StgUpdateFrame *su;
  StgCatchFrame  *cf;
  StgSeqFrame    *sf;
  int diff;

  diff = (StgPtr)dest->sp - (StgPtr)src->sp; /* In *words* */

  su = dest->su;

  while ((P_)su < dest->stack + dest->stack_size) {
    switch (get_itbl(su)->type) {
   
      /* GCC actually manages to common up these three cases! */

    case UPDATE_FRAME:
      su->link = (StgUpdateFrame *) ((StgPtr)su->link + diff);
      su = su->link;
      continue;

    case CATCH_FRAME:
      cf = (StgCatchFrame *)su;
      cf->link = (StgUpdateFrame *) ((StgPtr)cf->link + diff);
      su = cf->link;
      continue;

    case SEQ_FRAME:
      sf = (StgSeqFrame *)su;
      sf->link = (StgUpdateFrame *) ((StgPtr)sf->link + diff);
      su = sf->link;
      continue;

    case STOP_FRAME:
      /* all done! */
      break;

    default:
      barf("relocate_TSO");
    }
    break;
  }

  return dest;
}

static inline void
1456
scavenge_srt(const StgInfoTable *info)
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
{
  StgClosure **srt, **srt_end;

  /* evacuate the SRT.  If srt_len is zero, then there isn't an
   * srt field in the info table.  That's ok, because we'll
   * never dereference it.
   */
  srt = stgCast(StgClosure **,info->srt);
  srt_end = srt + info->srt_len;
  for (; srt < srt_end; srt++) {
    evacuate(*srt);
  }
}

1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
/* -----------------------------------------------------------------------------
   Scavenge a given step until there are no more objects in this step
   to scavenge.

   evac_gen is set by the caller to be either zero (for a step in a
   generation < N) or G where G is the generation of the step being
   scavenged.  

   We sometimes temporarily change evac_gen back to zero if we're
   scavenging a mutable object where early promotion isn't such a good
   idea.  
   -------------------------------------------------------------------------- */
   

static void
scavenge(step *step)
1487
{
1488
  StgPtr p, q;
1489
1490
  const StgInfoTable *info;
  bdescr *bd;
1491
1492
1493
1494
  nat saved_evac_gen = evac_gen; /* used for temporarily changing evac_gen */

  p = step->scan;
  bd = step->scan_bd;
1495

1496
  failed_to_evac = rtsFalse;
1497
1498
1499
1500
1501

  /* scavenge phase - standard breadth-first scavenging of the
   * evacuated objects 
   */

1502
  while (bd != step->hp_bd || p < step->hp) {
1503
1504

    /* If we're at the end of this block, move on to the next block */
1505
    if (bd != step->hp_bd && p == bd->free) {
1506
1507
1508
1509
1510
      bd = bd->link;
      p = bd->start;
      continue;
    }

1511
1512
    q = p;			/* save ptr to object */

1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
    ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO((StgClosure *)p))
		 || IS_HUGS_CONSTR_INFO(GET_INFO((StgClosure *)p))));

    info = get_itbl((StgClosure *)p);
    switch (info -> type) {

    case BCO:
      {
	StgBCO* bco = stgCast(StgBCO*,p);
	nat i;
	for (i = 0; i < bco->n_ptrs; i++) {
	  bcoConstCPtr(bco,i) = evacuate(bcoConstCPtr(bco,i));
	}
	p += bco_sizeW(bco);
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
	break;
      }

    case MVAR:
      /* treat MVars specially, because we don't want to evacuate the
       * mut_link field in the middle of the closure.
       */
      { 
	StgMVar *mvar = ((StgMVar *)p);
	evac_gen = 0;
	(StgClosure *)mvar->head = evacuate((StgClosure *)mvar->head);
	(StgClosure *)mvar->tail = evacuate((StgClosure *)mvar->tail);
	(StgClosure *)mvar->value = evacuate((StgClosure *)mvar->value);
	p += sizeofW(StgMVar);
	evac_gen = saved_evac_gen;
	break;
1543
1544
      }

1545
1546
1547
1548
1549
1550
1551
1552