Schedule.c 21.9 KB
Newer Older
1
/* -----------------------------------------------------------------------------
sof's avatar
sof committed
2
 * $Id: Schedule.c,v 1.13 1999/03/02 20:04:03 sof Exp $
3
4
 *
 * (c) The GHC Team, 1998-1999
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
 *
 * Scheduler
 *
 * ---------------------------------------------------------------------------*/

#include "Rts.h"
#include "SchedAPI.h"
#include "RtsUtils.h"
#include "RtsFlags.h"
#include "Storage.h"
#include "StgRun.h"
#include "StgStartup.h"
#include "GC.h"
#include "Hooks.h"
#include "Schedule.h"
#include "StgMiscClosures.h"
#include "Storage.h"
#include "Evaluator.h"
#include "Printer.h"
#include "Main.h"
#include "Signals.h"
#include "Profiling.h"
#include "Sanity.h"

sof's avatar
sof committed
29
30
31
32
33
34
35
#ifdef HAVE_WINDOWS_H
#include <windows.h>
#endif

#define IS_CHARLIKE_CLOSURE(p)  ( stgCast(StgPtr,p) >= stgCast(StgPtr,CHARLIKE_closure) && stgCast(char*,p) <= (stgCast(char*,CHARLIKE_closure) + 255 * sizeof(StgIntCharlikeClosure)))
#define IS_INTLIKE_CLOSURE(p)  ( stgCast(StgPtr,p) >= stgCast(StgPtr,INTLIKE_closure) && stgCast(char*,p) <= (stgCast(char*,INTLIKE_closure) + 32 * sizeof(StgIntCharlikeClosure)))

36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
StgTSO *run_queue_hd, *run_queue_tl;
StgTSO *blocked_queue_hd, *blocked_queue_tl;
StgTSO *ccalling_threads;

#define MAX_SCHEDULE_NESTING 256
nat next_main_thread;
StgTSO *main_threads[MAX_SCHEDULE_NESTING];

static void GetRoots(void);
static StgTSO *threadStackOverflow(StgTSO *tso);

/* flag set by signal handler to precipitate a context switch */
nat context_switch;
/* if this flag is set as well, give up execution */
static nat interrupted;

/* Next thread ID to allocate */
StgThreadID next_thread_id = 1;

/*
 * Pointers to the state of the current thread.
 * Rule of thumb: if CurrentTSO != NULL, then we're running a Haskell
 * thread.  If CurrentTSO == NULL, then we're at the scheduler level.
 */
StgTSO      *CurrentTSO;
StgRegTable  MainRegTable;

/*
 * The thread state for the main thread.
 */
StgTSO   *MainTSO;

/* The smallest stack size that makes any sense is:
 *    RESERVED_STACK_WORDS    (so we can get back from the stack overflow)
 *  + sizeofW(StgStopFrame)   (the stg_stop_thread_info frame)
 *  + 1                       (the realworld token for an IO thread)
 *  + 1                       (the closure to enter)
 *
 * A thread with this stack will bomb immediately with a stack
 * overflow, which will increase its stack size.  
 */

#define MIN_STACK_WORDS (RESERVED_STACK_WORDS + sizeofW(StgStopFrame) + 2)

/* -----------------------------------------------------------------------------
   Create a new thread.

   The new thread starts with the given stack size.  Before the
   scheduler can run, however, this thread needs to have a closure
   (and possibly some arguments) pushed on its stack.  See
   pushClosure() in Schedule.h.

   createGenThread() and createIOThread() (in Schedule.h) are
   convenient packaged versions of this function.
   -------------------------------------------------------------------------- */

StgTSO *
createThread(nat stack_size)
{
  StgTSO *tso;

97
98
99
100
101
  /* catch ridiculously small stack sizes */
  if (stack_size < MIN_STACK_WORDS + TSO_STRUCT_SIZEW) {
    stack_size = MIN_STACK_WORDS + TSO_STRUCT_SIZEW;
  }

102
  tso = (StgTSO *)allocate(stack_size);
103
  TICK_ALLOC_TSO(stack_size-sizeofW(StgTSO),0);
104
  
105
  initThread(tso, stack_size - TSO_STRUCT_SIZEW);
106
107
108
109
110
111
112
113
114
115
116
117
118
  return tso;
}

void
initThread(StgTSO *tso, nat stack_size)
{
  SET_INFO(tso,&TSO_info);
  tso->whatNext     = ThreadEnterGHC;
  tso->state        = tso_state_runnable;
  tso->id           = next_thread_id++;

  tso->splim        = (P_)&(tso->stack) + RESERVED_STACK_WORDS;
  tso->stack_size   = stack_size;
119
120
  tso->max_stack_size = round_to_mblocks(RtsFlags.GcFlags.maxStkSize) 
                              - TSO_STRUCT_SIZEW;
121
122
123
124
125
126
127
128
  tso->sp           = (P_)&(tso->stack) + stack_size;

#ifdef PROFILING
  tso->prof.CCCS = CCS_MAIN;
#endif

  /* put a stop frame on the stack */
  tso->sp -= sizeofW(StgStopFrame);
129
130
  SET_HDR((StgClosure*)tso->sp,(StgInfoTable *)&stg_stop_thread_info,CCS_MAIN);
  tso->su = (StgUpdateFrame*)tso->sp;
131

132
  IF_DEBUG(scheduler,belch("Initialised thread %ld, stack size = %lx words\n", 
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
			   tso->id, tso->stack_size));

  /* Put the new thread on the head of the runnable queue.
   * The caller of createThread better push an appropriate closure
   * on this thread's stack before the scheduler is invoked.
   */
  tso->link = run_queue_hd;
  run_queue_hd = tso;
  if (run_queue_tl == END_TSO_QUEUE) {
    run_queue_tl = tso;
  }

  IF_DEBUG(scheduler,printTSO(tso));
}

/* -----------------------------------------------------------------------------
   Delete a thread - reverting all blackholes to (something
   equivalent to) their former state.

   We create an AP_UPD for every UpdateFrame on the stack.
   Entering one of these AP_UPDs pushes everything from the corresponding
   update frame upwards onto the stack.  (Actually, it pushes everything
   up to the next update frame plus a pointer to the next AP_UPD
   object.  Entering the next AP_UPD object pushes more onto the
   stack until we reach the last AP_UPD object - at which point
   the stack should look exactly as it did when we killed the TSO
   and we can continue execution by entering the closure on top of
   the stack.   
   -------------------------------------------------------------------------- */

void deleteThread(StgTSO *tso)
{
    StgUpdateFrame* su = tso->su;
    StgPtr          sp = tso->sp;

    /* Thread already dead? */
    if (tso->whatNext == ThreadComplete || tso->whatNext == ThreadKilled) {
      return;
    }

173
    IF_DEBUG(scheduler, belch("Killing thread %ld.", tso->id));
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196

    tso->whatNext = ThreadKilled; /* changed to ThreadComplete in schedule() */
    tso->link = END_TSO_QUEUE; /* Just to be on the safe side... */

    /* Threads that finish normally leave Su pointing to the word
     * beyond the top of the stack, and Sp pointing to the last word
     * on the stack, which is the return value of the thread.
     */
    if ((P_)tso->su >= tso->stack + tso->stack_size
	|| get_itbl(tso->su)->type == STOP_FRAME) {
      return;
    }
      
    IF_DEBUG(scheduler,
             fprintf(stderr, "Freezing TSO stack\n");
             printTSO(tso);
             );

    /* The stack freezing code assumes there's a closure pointer on
     * the top of the stack.  This isn't always the case with compiled
     * code, so we have to push a dummy closure on the top which just
     * returns to the next return address on the stack.
     */
sof's avatar
sof committed
197
    if ( LOOKS_LIKE_GHC_INFO(*sp) ) {
198
199
200
201
202
203
204
      *(--sp) = (W_)&dummy_ret_closure;
    }

    while (1) {
      int words = (stgCast(StgPtr,su) - stgCast(StgPtr,sp)) - 1;
      nat i;
      StgAP_UPD* ap = stgCast(StgAP_UPD*,allocate(AP_sizeW(words)));
205
      TICK_ALLOC_THK(words+1,0);
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258

      /* First build an AP_UPD consisting of the stack chunk above the
       * current update frame, with the top word on the stack as the
       * fun field.
       */
      ASSERT(words >= 0);

      /*      if (words == 0) {  -- optimisation
	ap = stgCast(StgAP_UPD*,*stgCast(StgPtr*,sp)++);
      } else */ {
	ap->n_args = words;
	ap->fun    = stgCast(StgClosure*,*stgCast(StgPtr*,sp)++);
	for(i=0; i < (nat)words; ++i) {
	  payloadWord(ap,i) = *sp++;
	}
      }

      switch (get_itbl(su)->type) {
	
      case UPDATE_FRAME:
	{
	  SET_HDR(ap,&AP_UPD_info,su->header.prof.ccs /* ToDo */); 
	  
	  IF_DEBUG(scheduler,
		   fprintf(stderr,  "Updating ");
		   printPtr(stgCast(StgPtr,su->updatee)); 
		   fprintf(stderr,  " with ");
		   printObj(stgCast(StgClosure*,ap));
		   );

	  /* Replace the updatee with an indirection - happily
	   * this will also wake up any threads currently
	   * waiting on the result.
	   */
	  UPD_IND(su->updatee,ap);  /* revert the black hole */
	  su = su->link;
	  sp += sizeofW(StgUpdateFrame) -1;
	  sp[0] = stgCast(StgWord,ap); /* push onto stack */
	  break;
	}
      
      case CATCH_FRAME:
	{
	  StgCatchFrame *cf = (StgCatchFrame *)su;
	  StgClosure* o;
	    
	  /* We want a PAP, not an AP_UPD.  Fortunately, the
	   * layout's the same.
	   */
	  SET_HDR(ap,&PAP_info,su->header.prof.ccs /* ToDo */);
	  
	  /* now build o = FUN(catch,ap,handler) */
	  o = stgCast(StgClosure*, allocate(sizeofW(StgClosure)+2));
259
	  TICK_ALLOC_THK(2,0);
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
	  SET_HDR(o,&catch_info,su->header.prof.ccs /* ToDo */);
	  payloadCPtr(o,0) = stgCast(StgClosure*,ap);
	  payloadCPtr(o,1) = cf->handler;
	  
	  IF_DEBUG(scheduler,
		   fprintf(stderr,  "Built ");
		   printObj(stgCast(StgClosure*,o));
		   );
	  
	  /* pop the old handler and put o on the stack */
	  su = cf->link;
	  sp += sizeofW(StgCatchFrame) - 1;
	  sp[0] = (W_)o;
	  break;
	}
	
      case SEQ_FRAME:
	{
	  StgSeqFrame *sf = (StgSeqFrame *)su;
	  StgClosure* o;
	  
	  SET_HDR(ap,&PAP_info,su->header.prof.ccs /* ToDo */);
	  
	  /* now build o = FUN(seq,ap) */
          o = stgCast(StgClosure*, allocate(sizeofW(StgClosure)+1));
285
	  TICK_ALLOC_THK(1,0);
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
	  SET_HDR(o,&seq_info,su->header.prof.ccs /* ToDo */);
	  payloadCPtr(o,0) = stgCast(StgClosure*,ap);
	  
	  IF_DEBUG(scheduler,
		   fprintf(stderr,  "Built ");
		   printObj(stgCast(StgClosure*,o));
		   );
	    
	  /* pop the old handler and put o on the stack */
	  su = sf->link;
	  sp += sizeofW(StgSeqFrame) - 1;
	  sp[0] = (W_)o;
	  break;
	}
      
      case STOP_FRAME:
	return;
	
      default:
	barf("freezeTSO");
      }
    }
}

void initScheduler(void)
{
  run_queue_hd      = END_TSO_QUEUE;
  run_queue_tl      = END_TSO_QUEUE;
  blocked_queue_hd  = END_TSO_QUEUE;
  blocked_queue_tl  = END_TSO_QUEUE;
  ccalling_threads  = END_TSO_QUEUE;
  next_main_thread  = 0;

  context_switch = 0;
  interrupted    = 0;

  enteredCAFs = END_CAF_LIST;
}

/* -----------------------------------------------------------------------------
   Main scheduling loop.

   We use round-robin scheduling, each thread returning to the
   scheduler loop when one of these conditions is detected:

      * stack overflow
      * out of heap space
      * timer expires (thread yields)
      * thread blocks
      * thread ends
   -------------------------------------------------------------------------- */

SchedulerStatus schedule(StgTSO *main, StgClosure **ret_val)
{
  StgTSO *t;
  StgThreadReturnCode ret;
  StgTSO **MainTSO;
  rtsBool in_ccall_gc;

  /* Return value is NULL by default, it is only filled in if the
   * main thread completes successfully.
   */
  if (ret_val) { *ret_val = NULL; }

  /* Save away a pointer to the main thread so that we can keep track
   * of it should a garbage collection happen.  We keep a stack of
   * main threads in order to support scheduler re-entry.  We can't
   * use the normal TSO linkage for this stack, because the main TSO
   * may need to be linked onto other queues.
   */
  main_threads[next_main_thread] = main;
  MainTSO = &main_threads[next_main_thread];
  next_main_thread++;
  IF_DEBUG(scheduler,
	   fprintf(stderr, "Scheduler entered: nesting = %d\n", 
		   next_main_thread););

  /* Are we being re-entered? 
   */
  if (CurrentTSO != NULL) {
    /* This happens when a _ccall_gc from Haskell ends up re-entering
     * the scheduler.
     *
     * Block the current thread (put it on the ccalling_queue) and
     * continue executing.  The calling thread better have stashed
     * away its state properly and left its stack with a proper stack
     * frame on the top.
     */
    threadPaused(CurrentTSO);
    CurrentTSO->link = ccalling_threads;
    ccalling_threads = CurrentTSO;
    in_ccall_gc = rtsTrue;
    IF_DEBUG(scheduler,
379
	     fprintf(stderr, "Re-entry, thread %d did a _ccall_gc\n", 
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
		     CurrentTSO->id););
  } else {
    in_ccall_gc = rtsFalse;
  }

  /* Take a thread from the run queue.
   */
  t = run_queue_hd;
  if (t != END_TSO_QUEUE) {
    run_queue_hd = t->link;
    t->link = END_TSO_QUEUE;
    if (run_queue_hd == END_TSO_QUEUE) {
      run_queue_tl = END_TSO_QUEUE;
    }
  }

  while (t != END_TSO_QUEUE) {
    CurrentTSO = t;

    /* If we have more threads on the run queue, set up a context
     * switch at some point in the future.
     */
    if (run_queue_hd != END_TSO_QUEUE) {
      context_switch = 1;
    } else {
      context_switch = 0;
    }
407
408
409
410
411
    IF_DEBUG(scheduler, belch("Running thread %ld...\n", t->id));

    /* Be friendly to the storage manager: we're about to *run* this
     * thread, so we better make sure the TSO is mutable.
     */
412
413
414
    if (t->mut_link == NULL) {
      recordMutable((StgMutClosure *)t);
    }
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463

    /* Run the current thread */
    switch (t->whatNext) {
    case ThreadKilled:
    case ThreadComplete:
      /* thread already killed.  Drop it and carry on. */
      goto next_thread;
    case ThreadEnterGHC:
      ret = StgRun((StgFunPtr) stg_enterStackTop);
      break;
    case ThreadRunGHC:
      ret = StgRun((StgFunPtr) stg_returnToStackTop);
      break;
    case ThreadEnterHugs:
#ifdef INTERPRETER
      {  
	  IF_DEBUG(scheduler,belch("entering Hugs"));	  
	  LoadThreadState();
	  /* CHECK_SENSIBLE_REGS(); */
	  {
	      StgClosure* c = stgCast(StgClosure*,*Sp);
	      Sp += 1;
	      ret = enter(c);
	  }	
	  SaveThreadState();
	  break;
      }
#else
      barf("Panic: entered a BCO but no bytecode interpreter in this build");
#endif
    default:
      barf("schedule: invalid whatNext field");
    }

    /* We may have garbage collected while running the thread
     * (eg. something nefarious like _ccall_GC_ performGC), and hence
     * CurrentTSO may have moved.  Update t to reflect this.
     */
    t = CurrentTSO;
    CurrentTSO = NULL;

    /* Costs for the scheduler are assigned to CCS_SYSTEM */
#ifdef PROFILING
    CCCS = CCS_SYSTEM;
#endif

    switch (ret) {

    case HeapOverflow:
464
      IF_DEBUG(scheduler,belch("Thread %ld stopped: HeapOverflow\n", t->id));
465
466
467
468
469
470
      threadPaused(t);
      PUSH_ON_RUN_QUEUE(t);
      GarbageCollect(GetRoots);
      break;

    case StackOverflow:
471
      IF_DEBUG(scheduler,belch("Thread %ld stopped, StackOverflow\n", t->id));
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
      { 
	nat i;
	/* enlarge the stack */
	StgTSO *new_t = threadStackOverflow(t);
	
	/* This TSO has moved, so update any pointers to it from the
	 * main thread stack.  It better not be on any other queues...
	 * (it shouldn't be)
	 */
	for (i = 0; i < next_main_thread; i++) {
	  if (main_threads[i] == t) {
	    main_threads[i] = new_t;
	  }
	}
	t = new_t;
      }
      PUSH_ON_RUN_QUEUE(t);
      break;

    case ThreadYielding:
      IF_DEBUG(scheduler,
               if (t->whatNext == ThreadEnterHugs) {
		   /* ToDo: or maybe a timer expired when we were in Hugs?
		    * or maybe someone hit ctrl-C
                    */
497
                   belch("Thread %ld stopped to switch to Hugs\n", t->id);
498
               } else {
499
                   belch("Thread %ld stopped, timer expired\n", t->id);
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
               }
               );
      threadPaused(t);
      if (interrupted) {
          IF_DEBUG(scheduler,belch("Scheduler interrupted - returning"));
	  deleteThread(t);
	  while (run_queue_hd != END_TSO_QUEUE) {
	      run_queue_hd = t->link;
	      deleteThread(t);
	  }
	  run_queue_tl = END_TSO_QUEUE;
	  /* ToDo: should I do the same with blocked queues? */
          return Interrupted;
      }

      /* Put the thread back on the run queue, at the end.
       * t->link is already set to END_TSO_QUEUE.
       */
      ASSERT(t->link == END_TSO_QUEUE);
519
520
521
      if (run_queue_tl == END_TSO_QUEUE) {
        run_queue_hd = run_queue_tl = t;
      } else {
522
523
524
525
526
527
        ASSERT(get_itbl(run_queue_tl)->type == TSO);
	if (run_queue_hd == run_queue_tl) {
	  run_queue_hd->link = t;
	  run_queue_tl = t;
	} else {
	  run_queue_tl->link = t;
528
	  run_queue_tl = t;
529
530
531
532
533
	}
      }
      break;

    case ThreadBlocked:
534
      IF_DEBUG(scheduler,belch("Thread %ld stopped, blocking\n", t->id));
535
536
537
538
539
540
541
      threadPaused(t);
      /* assume the thread has put itself on some blocked queue
       * somewhere.
       */
      break;

    case ThreadFinished:
542
      IF_DEBUG(scheduler,belch("Thread %ld finished\n", t->id));
543
544
545
546
547
548
549
550
551
      deleteThread(t);
      t->whatNext = ThreadComplete;
      break;

    default:
      barf("schedule: invalid thread return code");
    }

    /* check for signals each time around the scheduler */
sof's avatar
sof committed
552
#ifndef __MINGW32__
553
554
555
    if (signals_pending()) {
      start_signal_handlers();
    }
sof's avatar
sof committed
556
#endif
557
558
559
560
561
562
563
564
565
566
567
    /* If our main thread has finished or been killed, return.
     * If we were re-entered as a result of a _ccall_gc, then
     * pop the blocked thread off the ccalling_threads stack back
     * into CurrentTSO.
     */
    if ((*MainTSO)->whatNext == ThreadComplete
	|| (*MainTSO)->whatNext == ThreadKilled) {
      next_main_thread--;
      if (in_ccall_gc) {
	CurrentTSO = ccalling_threads;
	ccalling_threads = ccalling_threads->link;
568
569
	/* remember to stub the link field of CurrentTSO */
	CurrentTSO->link = END_TSO_QUEUE;
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
      }
      if ((*MainTSO)->whatNext == ThreadComplete) {
	/* we finished successfully, fill in the return value */
	if (ret_val) { *ret_val = (StgClosure *)(*MainTSO)->sp[0]; };
	return Success;
      } else {
	return Killed;
      }
    }

  next_thread:
    t = run_queue_hd;
    if (t != END_TSO_QUEUE) {
      run_queue_hd = t->link;
      t->link = END_TSO_QUEUE;
      if (run_queue_hd == END_TSO_QUEUE) {
	run_queue_tl = END_TSO_QUEUE;
      }
    }
  }

  if (blocked_queue_hd != END_TSO_QUEUE) {
    return AllBlocked;
  } else {
    return Deadlock;
  }
}

/* -----------------------------------------------------------------------------
   Where are the roots that we know about?

        - all the threads on the runnable queue
        - all the threads on the blocked queue
	- all the thread currently executing a _ccall_GC
        - all the "main threads"
     
   -------------------------------------------------------------------------- */

static void GetRoots(void)
{
  nat i;

  run_queue_hd      = (StgTSO *)MarkRoot((StgClosure *)run_queue_hd);
  run_queue_tl      = (StgTSO *)MarkRoot((StgClosure *)run_queue_tl);

  blocked_queue_hd  = (StgTSO *)MarkRoot((StgClosure *)blocked_queue_hd);
  blocked_queue_tl  = (StgTSO *)MarkRoot((StgClosure *)blocked_queue_tl);

  ccalling_threads  = (StgTSO *)MarkRoot((StgClosure *)ccalling_threads);

  for (i = 0; i < next_main_thread; i++) {
    main_threads[i] = (StgTSO *)MarkRoot((StgClosure *)main_threads[i]);
  }
}

/* -----------------------------------------------------------------------------
   performGC

   This is the interface to the garbage collector from Haskell land.
   We provide this so that external C code can allocate and garbage
   collect when called from Haskell via _ccall_GC.

   It might be useful to provide an interface whereby the programmer
   can specify more roots (ToDo).
   -------------------------------------------------------------------------- */

void (*extra_roots)(void);

void
performGC(void)
{
  GarbageCollect(GetRoots);
}

static void
AllRoots(void)
{
  GetRoots();			/* the scheduler's roots */
  extra_roots();		/* the user's roots */
}

void
performGCWithRoots(void (*get_roots)(void))
{
  extra_roots = get_roots;

  GarbageCollect(AllRoots);
}

/* -----------------------------------------------------------------------------
   Stack overflow

   If the thread has reached its maximum stack size,
   then bomb out.  Otherwise relocate the TSO into a larger chunk of
   memory and adjust its stack size appropriately.
   -------------------------------------------------------------------------- */

static StgTSO *
threadStackOverflow(StgTSO *tso)
{
  nat new_stack_size, new_tso_size, diff, stack_words;
  StgPtr new_sp;
  StgTSO *dest;

  if (tso->stack_size >= tso->max_stack_size) {
    /* ToDo: just kill this thread? */
#ifdef DEBUG
    /* If we're debugging, just print out the top of the stack */
    printStackChunk(tso->sp, stg_min(tso->stack+tso->stack_size, 
				     tso->sp+64));
#endif
    stackOverflow(tso->max_stack_size);
  }

  /* Try to double the current stack size.  If that takes us over the
   * maximum stack size for this thread, then use the maximum instead.
   * Finally round up so the TSO ends up as a whole number of blocks.
   */
  new_stack_size = stg_min(tso->stack_size * 2, tso->max_stack_size);
  new_tso_size   = (nat)BLOCK_ROUND_UP(new_stack_size * sizeof(W_) + 
				       TSO_STRUCT_SIZE)/sizeof(W_);
691
  new_tso_size = round_to_mblocks(new_tso_size);  /* Be MBLOCK-friendly */
692
693
694
695
696
  new_stack_size = new_tso_size - TSO_STRUCT_SIZEW;

  IF_DEBUG(scheduler, fprintf(stderr,"increasing stack size from %d words to %d.\n", tso->stack_size, new_stack_size));

  dest = (StgTSO *)allocate(new_tso_size);
697
  TICK_ALLOC_TSO(new_tso_size-sizeofW(StgTSO),0);
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714

  /* copy the TSO block and the old stack into the new area */
  memcpy(dest,tso,TSO_STRUCT_SIZE);
  stack_words = tso->stack + tso->stack_size - tso->sp;
  new_sp = (P_)dest + new_tso_size - stack_words;
  memcpy(new_sp, tso->sp, stack_words * sizeof(W_));

  /* relocate the stack pointers... */
  diff = (P_)new_sp - (P_)tso->sp; /* In *words* */
  dest->su    = (StgUpdateFrame *) ((P_)dest->su + diff);
  dest->sp    = new_sp;
  dest->splim = (P_)dest->splim + (nat)((P_)dest - (P_)tso);
  dest->stack_size = new_stack_size;
	
  /* and relocate the update frame list */
  relocate_TSO(tso, dest);

715
716
717
718
719
720
721
722
  /* Mark the old one as dead so we don't try to scavenge it during
   * garbage collection (the TSO will likely be on a mutables list in
   * some generation, but it'll get collected soon enough).
   */
  tso->whatNext = ThreadKilled;
  dest->mut_link = NULL;

  IF_DEBUG(sanity,checkTSO(tso));
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
#if 0
  IF_DEBUG(scheduler,printTSO(dest));
#endif
  if (tso == MainTSO) { /* hack */
      MainTSO = dest;
  }
  return dest;
}

/* -----------------------------------------------------------------------------
   Wake up a queue that was blocked on some resource (usually a
   computation in progress).
   -------------------------------------------------------------------------- */

void awaken_blocked_queue(StgTSO *q)
{
  StgTSO *tso;

  while (q != END_TSO_QUEUE) {
    ASSERT(get_itbl(q)->type == TSO);
    tso = q;
    q = tso->link;
    PUSH_ON_RUN_QUEUE(tso);
746
    IF_DEBUG(scheduler,belch("Waking up thread %ld", tso->id));
747
748
749
750
751
752
753
754
755
756
757
758
759
760
  }
}

/* -----------------------------------------------------------------------------
   Interrupt execution
   - usually called inside a signal handler so it mustn't do anything fancy.   
   -------------------------------------------------------------------------- */

void interruptStgRts(void)
{
    interrupted    = 1;
    context_switch = 1;
}