Capability.c 25.2 KB
Newer Older
sof's avatar
sof committed
1
/* ---------------------------------------------------------------------------
2
 *
3
 * (c) The GHC Team, 2003-2006
sof's avatar
sof committed
4
5
6
 *
 * Capabilities
 *
sof's avatar
sof committed
7
8
 * A Capability represent the token required to execute STG code,
 * and all the state an OS thread/task needs to run Haskell code:
sof's avatar
sof committed
9
 * its STG registers, a pointer to its TSO, a nursery etc. During
sof's avatar
sof committed
10
 * STG execution, a pointer to the capabilitity is kept in a
11
 * register (BaseReg; actually it is a pointer to cap->r).
sof's avatar
sof committed
12
 *
13
14
15
 * Only in an THREADED_RTS build will there be multiple capabilities,
 * for non-threaded builds there is only one global capability, namely
 * MainCapability.
16
 *
sof's avatar
sof committed
17
 * --------------------------------------------------------------------------*/
18

sof's avatar
sof committed
19
20
21
#include "PosixSource.h"
#include "Rts.h"
#include "RtsUtils.h"
22
#include "RtsFlags.h"
23
#include "STM.h"
sof's avatar
sof committed
24
#include "OSThreads.h"
sof's avatar
sof committed
25
#include "Capability.h"
26
#include "Schedule.h"
27
#include "Sparks.h"
Simon Marlow's avatar
Simon Marlow committed
28
#include "Trace.h"
sof's avatar
sof committed
29

30
31
32
// one global capability, this is the Capability for non-threaded
// builds, and for +RTS -N1
Capability MainCapability;
sof's avatar
sof committed
33

34
nat n_capabilities;
35
Capability *capabilities = NULL;
sof's avatar
sof committed
36

37
38
39
40
41
// Holds the Capability which last became free.  This is used so that
// an in-call has a chance of quickly finding a free Capability.
// Maintaining a global free list of Capabilities would require global
// locking, so we don't do that.
Capability *last_free_capability;
42

43
44
45
/* GC indicator, in scope for the scheduler, init'ed to false */
volatile StgWord waiting_for_gc = 0;

46
#if defined(THREADED_RTS)
47
48
49
STATIC_INLINE rtsBool
globalWorkToDo (void)
{
50
    return blackholes_need_checking
51
	|| sched_state >= SCHED_INTERRUPTING
52
53
	;
}
54
#endif
55

56
57
58
#if defined(THREADED_RTS)
STATIC_INLINE rtsBool
anyWorkForMe( Capability *cap, Task *task )
59
{
60
61
62
63
64
65
    if (task->tso != NULL) {
	// A bound task only runs if its thread is on the run queue of
	// the capability on which it was woken up.  Otherwise, we
	// can't be sure that we have the right capability: the thread
	// might be woken up on some other capability, and task->cap
	// could change under our feet.
66
	return !emptyRunQueue(cap) && cap->run_queue_hd->bound == task;
67
    } else {
68
69
70
71
72
73
	// A vanilla worker task runs if either there is a lightweight
	// thread at the head of the run queue, or the run queue is
	// empty and (there are sparks to execute, or there is some
	// other global condition to check, such as threads blocked on
	// blackholes).
	if (emptyRunQueue(cap)) {
74
75
76
	    return !emptySparkPoolCap(cap)
		|| !emptyWakeupQueue(cap)
		|| globalWorkToDo();
77
78
	} else
	    return cap->run_queue_hd->bound == NULL;
79
80
    }
}
81
#endif
82
83
84

/* -----------------------------------------------------------------------------
 * Manage the returning_tasks lists.
85
 *
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
 * These functions require cap->lock
 * -------------------------------------------------------------------------- */

#if defined(THREADED_RTS)
STATIC_INLINE void
newReturningTask (Capability *cap, Task *task)
{
    ASSERT_LOCK_HELD(&cap->lock);
    ASSERT(task->return_link == NULL);
    if (cap->returning_tasks_hd) {
	ASSERT(cap->returning_tasks_tl->return_link == NULL);
	cap->returning_tasks_tl->return_link = task;
    } else {
	cap->returning_tasks_hd = task;
    }
    cap->returning_tasks_tl = task;
102
103
}

104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
STATIC_INLINE Task *
popReturningTask (Capability *cap)
{
    ASSERT_LOCK_HELD(&cap->lock);
    Task *task;
    task = cap->returning_tasks_hd;
    ASSERT(task);
    cap->returning_tasks_hd = task->return_link;
    if (!cap->returning_tasks_hd) {
	cap->returning_tasks_tl = NULL;
    }
    task->return_link = NULL;
    return task;
}
#endif

120
/* ----------------------------------------------------------------------------
121
122
123
124
 * Initialisation
 *
 * The Capability is initially marked not free.
 * ------------------------------------------------------------------------- */
125
126

static void
127
initCapability( Capability *cap, nat i )
sof's avatar
sof committed
128
{
129
    nat g;
130

131
132
133
134
135
136
137
138
139
140
141
142
143
    cap->no = i;
    cap->in_haskell        = rtsFalse;

    cap->run_queue_hd      = END_TSO_QUEUE;
    cap->run_queue_tl      = END_TSO_QUEUE;

#if defined(THREADED_RTS)
    initMutex(&cap->lock);
    cap->running_task      = NULL; // indicates cap is free
    cap->spare_workers     = NULL;
    cap->suspended_ccalling_tasks = NULL;
    cap->returning_tasks_hd = NULL;
    cap->returning_tasks_tl = NULL;
144
145
    cap->wakeup_queue_hd    = END_TSO_QUEUE;
    cap->wakeup_queue_tl    = END_TSO_QUEUE;
146
147
#endif

sof's avatar
sof committed
148
    cap->f.stgGCEnter1     = (F_)__stg_gc_enter_1;
149
    cap->f.stgGCFun        = (F_)__stg_gc_fun;
150

151
    cap->mut_lists  = stgMallocBytes(sizeof(bdescr *) *
152
153
				     RtsFlags.GcFlags.generations,
				     "initCapability");
154
155
156

    for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
	cap->mut_lists[g] = NULL;
157
    }
158

tharris@microsoft.com's avatar
tharris@microsoft.com committed
159
160
    cap->free_tvar_watch_queues = END_STM_WATCH_QUEUE;
    cap->free_invariant_check_queues = END_INVARIANT_CHECK_QUEUE;
161
162
163
    cap->free_trec_chunks = END_STM_CHUNK_LIST;
    cap->free_trec_headers = NO_TREC;
    cap->transaction_tokens = 0;
164
    cap->context_switch = 0;
sof's avatar
sof committed
165
166
}

167
/* ---------------------------------------------------------------------------
sof's avatar
sof committed
168
169
 * Function:  initCapabilities()
 *
170
 * Purpose:   set up the Capability handling. For the THREADED_RTS build,
sof's avatar
sof committed
171
 *            we keep a table of them, the size of which is
172
 *            controlled by the user via the RTS flag -N.
sof's avatar
sof committed
173
 *
174
 * ------------------------------------------------------------------------- */
sof's avatar
sof committed
175
void
176
initCapabilities( void )
sof's avatar
sof committed
177
{
178
179
#if defined(THREADED_RTS)
    nat i;
180

181
#ifndef REG_Base
Simon Marlow's avatar
Simon Marlow committed
182
183
184
185
186
187
188
    // We can't support multiple CPUs if BaseReg is not a register
    if (RtsFlags.ParFlags.nNodes > 1) {
	errorBelch("warning: multiple CPUs not supported in this build, reverting to 1");
	RtsFlags.ParFlags.nNodes = 1;
    }
#endif

189
190
191
192
193
194
195
196
197
198
199
    n_capabilities = RtsFlags.ParFlags.nNodes;

    if (n_capabilities == 1) {
	capabilities = &MainCapability;
	// THREADED_RTS must work on builds that don't have a mutable
	// BaseReg (eg. unregisterised), so in this case
	// capabilities[0] must coincide with &MainCapability.
    } else {
	capabilities = stgMallocBytes(n_capabilities * sizeof(Capability),
				      "initCapabilities");
    }
200

201
    for (i = 0; i < n_capabilities; i++) {
202
	initCapability(&capabilities[i], i);
203
    }
204

Simon Marlow's avatar
Simon Marlow committed
205
    debugTrace(DEBUG_sched, "allocated %d capabilities", n_capabilities);
206
207
208

#else /* !THREADED_RTS */

209
    n_capabilities = 1;
210
    capabilities = &MainCapability;
211
    initCapability(&MainCapability, 0);
212

213
214
#endif

215
216
217
218
    // There are no free capabilities to begin with.  We will start
    // a worker Task to each Capability, which will quickly put the
    // Capability on the free list when it finds nothing to do.
    last_free_capability = &capabilities[0];
sof's avatar
sof committed
219
220
}

221
222
223
224
225
226
227
228
229
230
231
232
233
/* ----------------------------------------------------------------------------
 * setContextSwitches: cause all capabilities to context switch as
 * soon as possible.
 * ------------------------------------------------------------------------- */

void setContextSwitches(void)
{
  nat i;
  for (i=0; i < n_capabilities; i++) {
    capabilities[i].context_switch = 1;
  }
}

234
/* ----------------------------------------------------------------------------
235
236
237
238
239
240
241
242
243
244
 * Give a Capability to a Task.  The task must currently be sleeping
 * on its condition variable.
 *
 * Requires cap->lock (modifies cap->running_task).
 *
 * When migrating a Task, the migrater must take task->lock before
 * modifying task->cap, to synchronise with the waking up Task.
 * Additionally, the migrater should own the Capability (when
 * migrating the run queue), or cap->lock (when migrating
 * returning_workers).
245
246
 *
 * ------------------------------------------------------------------------- */
247
248
249

#if defined(THREADED_RTS)
STATIC_INLINE void
250
giveCapabilityToTask (Capability *cap USED_IF_DEBUG, Task *task)
251
{
252
253
    ASSERT_LOCK_HELD(&cap->lock);
    ASSERT(task->cap == cap);
Simon Marlow's avatar
Simon Marlow committed
254
255
256
257
    trace(TRACE_sched | DEBUG_sched,
	  "passing capability %d to %s %p",
	  cap->no, task->tso ? "bound task" : "worker",
	  (void *)task->id);
258
259
260
261
262
263
264
    ACQUIRE_LOCK(&task->lock);
    task->wakeup = rtsTrue;
    // the wakeup flag is needed because signalCondition() doesn't
    // flag the condition if the thread is already runniing, but we want
    // it to be sticky.
    signalCondition(&task->cond);
    RELEASE_LOCK(&task->lock);
265
}
266
#endif
267

268
/* ----------------------------------------------------------------------------
sof's avatar
sof committed
269
270
 * Function:  releaseCapability(Capability*)
 *
sof's avatar
sof committed
271
272
273
 * Purpose:   Letting go of a capability. Causes a
 *            'returning worker' thread or a 'waiting worker'
 *            to wake up, in that order.
274
275
 * ------------------------------------------------------------------------- */

276
#if defined(THREADED_RTS)
277
void
278
releaseCapability_ (Capability* cap)
279
{
280
281
282
283
    Task *task;

    task = cap->running_task;

284
    ASSERT_PARTIAL_CAPABILITY_INVARIANTS(cap,task);
285
286

    cap->running_task = NULL;
287

288
289
    // Check to see whether a worker thread can be given
    // the go-ahead to return the result of an external call..
290
291
292
293
    if (cap->returning_tasks_hd != NULL) {
	giveCapabilityToTask(cap,cap->returning_tasks_hd);
	// The Task pops itself from the queue (see waitForReturnCapability())
	return;
294
    }
295

296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
    /* if waiting_for_gc was the reason to release the cap: thread
       comes from yieldCap->releaseAndQueueWorker. Unconditionally set
       cap. free and return (see default after the if-protected other
       special cases). Thread will wait on cond.var and re-acquire the
       same cap after GC (GC-triggering cap. calls releaseCap and
       enters the spare_workers case)
    */
    if (waiting_for_gc) {
      last_free_capability = cap; // needed?
      trace(TRACE_sched | DEBUG_sched, 
	    "GC pending, set capability %d free", cap->no);
      return;
    } 


311
312
313
314
315
316
317
318
    // If the next thread on the run queue is a bound thread,
    // give this Capability to the appropriate Task.
    if (!emptyRunQueue(cap) && cap->run_queue_hd->bound) {
	// Make sure we're not about to try to wake ourselves up
	ASSERT(task != cap->run_queue_hd->bound);
	task = cap->run_queue_hd->bound;
	giveCapabilityToTask(cap,task);
	return;
319
    }
320

321
    if (!cap->spare_workers) {
322
323
324
325
	// Create a worker thread if we don't have one.  If the system
	// is interrupted, we only create a worker task if there
	// are threads that need to be completed.  If the system is
	// shutting down, we never create a new worker.
326
	if (sched_state < SCHED_SHUTTING_DOWN || !emptyRunQueue(cap)) {
Simon Marlow's avatar
Simon Marlow committed
327
328
	    debugTrace(DEBUG_sched,
		       "starting new worker on capability %d", cap->no);
329
330
331
	    startWorkerTask(cap, workerStart);
	    return;
	}
332
    }
333

334
335
    // If we have an unbound thread on the run queue, or if there's
    // anything else to do, give the Capability to a worker thread.
336
337
    if (!emptyRunQueue(cap) || !emptyWakeupQueue(cap)
	      || !emptySparkPoolCap(cap) || globalWorkToDo()) {
338
339
340
341
342
343
344
	if (cap->spare_workers) {
	    giveCapabilityToTask(cap,cap->spare_workers);
	    // The worker Task pops itself from the queue;
	    return;
	}
    }

345
    last_free_capability = cap;
Simon Marlow's avatar
Simon Marlow committed
346
    trace(TRACE_sched | DEBUG_sched, "freeing capability %d", cap->no);
sof's avatar
sof committed
347
348
}

349
void
350
releaseCapability (Capability* cap USED_IF_THREADS)
351
352
353
354
355
356
357
{
    ACQUIRE_LOCK(&cap->lock);
    releaseCapability_(cap);
    RELEASE_LOCK(&cap->lock);
}

static void
358
releaseCapabilityAndQueueWorker (Capability* cap USED_IF_THREADS)
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
{
    Task *task;

    ACQUIRE_LOCK(&cap->lock);

    task = cap->running_task;

    // If the current task is a worker, save it on the spare_workers
    // list of this Capability.  A worker can mark itself as stopped,
    // in which case it is not replaced on the spare_worker queue.
    // This happens when the system is shutting down (see
    // Schedule.c:workerStart()).
    // Also, be careful to check that this task hasn't just exited
    // Haskell to do a foreign call (task->suspended_tso).
    if (!isBoundTask(task) && !task->stopped && !task->suspended_tso) {
	task->next = cap->spare_workers;
	cap->spare_workers = task;
    }
    // Bound tasks just float around attached to their TSOs.

    releaseCapability_(cap);

    RELEASE_LOCK(&cap->lock);
}
#endif
sof's avatar
sof committed
384

385
/* ----------------------------------------------------------------------------
386
 * waitForReturnCapability( Task *task )
sof's avatar
sof committed
387
388
 *
 * Purpose:  when an OS thread returns from an external call,
389
390
 * it calls waitForReturnCapability() (via Schedule.resumeThread())
 * to wait for permission to enter the RTS & communicate the
sof's avatar
sof committed
391
 * result of the external call back to the Haskell thread that
sof's avatar
sof committed
392
393
 * made it.
 *
394
 * ------------------------------------------------------------------------- */
sof's avatar
sof committed
395
void
396
waitForReturnCapability (Capability **pCap, Task *task)
sof's avatar
sof committed
397
{
398
#if !defined(THREADED_RTS)
399

400
401
402
    MainCapability.running_task = task;
    task->cap = &MainCapability;
    *pCap = &MainCapability;
403

404
#else
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
    Capability *cap = *pCap;

    if (cap == NULL) {
	// Try last_free_capability first
	cap = last_free_capability;
	if (!cap->running_task) {
	    nat i;
	    // otherwise, search for a free capability
	    for (i = 0; i < n_capabilities; i++) {
		cap = &capabilities[i];
		if (!cap->running_task) {
		    break;
		}
	    }
	    // Can't find a free one, use last_free_capability.
	    cap = last_free_capability;
	}

	// record the Capability as the one this Task is now assocated with.
	task->cap = cap;

426
    } else {
427
	ASSERT(task->cap == cap);
428
429
    }

430
    ACQUIRE_LOCK(&cap->lock);
sof's avatar
sof committed
431

Simon Marlow's avatar
Simon Marlow committed
432
    debugTrace(DEBUG_sched, "returning; I want capability %d", cap->no);
sof's avatar
sof committed
433

434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
    if (!cap->running_task) {
	// It's free; just grab it
	cap->running_task = task;
	RELEASE_LOCK(&cap->lock);
    } else {
	newReturningTask(cap,task);
	RELEASE_LOCK(&cap->lock);

	for (;;) {
	    ACQUIRE_LOCK(&task->lock);
	    // task->lock held, cap->lock not held
	    if (!task->wakeup) waitCondition(&task->cond, &task->lock);
	    cap = task->cap;
	    task->wakeup = rtsFalse;
	    RELEASE_LOCK(&task->lock);

	    // now check whether we should wake up...
	    ACQUIRE_LOCK(&cap->lock);
	    if (cap->running_task == NULL) {
		if (cap->returning_tasks_hd != task) {
		    giveCapabilityToTask(cap,cap->returning_tasks_hd);
		    RELEASE_LOCK(&cap->lock);
		    continue;
		}
		cap->running_task = task;
		popReturningTask(cap);
		RELEASE_LOCK(&cap->lock);
		break;
	    }
	    RELEASE_LOCK(&cap->lock);
	}

    }

468
    ASSERT_FULL_CAPABILITY_INVARIANTS(cap,task);
469

Simon Marlow's avatar
Simon Marlow committed
470
    trace(TRACE_sched | DEBUG_sched, "resuming capability %d", cap->no);
471
472
473
474
475
476

    *pCap = cap;
#endif
}

#if defined(THREADED_RTS)
477
/* ----------------------------------------------------------------------------
478
 * yieldCapability
479
 * ------------------------------------------------------------------------- */
sof's avatar
sof committed
480

sof's avatar
sof committed
481
void
482
yieldCapability (Capability** pCap, Task *task)
sof's avatar
sof committed
483
{
484
485
    Capability *cap = *pCap;

486
    // The fast path has no locking, if we don't enter this while loop
487

488
489
490
491
492
493
494
495
    while ( waiting_for_gc
	    /* i.e. another capability triggered HeapOverflow, is busy
	       getting capabilities (stopping their owning tasks) */
	    || cap->returning_tasks_hd != NULL 
	        /* cap reserved for another task */
	    || !anyWorkForMe(cap,task) 
	        /* cap/task have no work */
	    ) {
Simon Marlow's avatar
Simon Marlow committed
496
	debugTrace(DEBUG_sched, "giving up capability %d", cap->no);
497
498

	// We must now release the capability and wait to be woken up
499
	// again.
500
	task->wakeup = rtsFalse;
501
502
503
504
505
506
507
508
509
510
	releaseCapabilityAndQueueWorker(cap);

	for (;;) {
	    ACQUIRE_LOCK(&task->lock);
	    // task->lock held, cap->lock not held
	    if (!task->wakeup) waitCondition(&task->cond, &task->lock);
	    cap = task->cap;
	    task->wakeup = rtsFalse;
	    RELEASE_LOCK(&task->lock);

Simon Marlow's avatar
Simon Marlow committed
511
512
	    debugTrace(DEBUG_sched, "woken up on capability %d", cap->no);

513
514
	    ACQUIRE_LOCK(&cap->lock);
	    if (cap->running_task != NULL) {
Simon Marlow's avatar
Simon Marlow committed
515
516
		debugTrace(DEBUG_sched, 
			   "capability %d is owned by another task", cap->no);
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
		RELEASE_LOCK(&cap->lock);
		continue;
	    }

	    if (task->tso == NULL) {
		ASSERT(cap->spare_workers != NULL);
		// if we're not at the front of the queue, release it
		// again.  This is unlikely to happen.
		if (cap->spare_workers != task) {
		    giveCapabilityToTask(cap,cap->spare_workers);
		    RELEASE_LOCK(&cap->lock);
		    continue;
		}
		cap->spare_workers = task->next;
		task->next = NULL;
	    }
	    cap->running_task = task;
	    RELEASE_LOCK(&cap->lock);
	    break;
	}

Simon Marlow's avatar
Simon Marlow committed
538
	trace(TRACE_sched | DEBUG_sched, "resuming capability %d", cap->no);
539
	ASSERT(cap->running_task == task);
540
541
    }

542
    *pCap = cap;
543

544
    ASSERT_FULL_CAPABILITY_INVARIANTS(cap,task);
545

546
    return;
sof's avatar
sof committed
547
548
}

549
550
551
552
553
554
555
556
/* ----------------------------------------------------------------------------
 * Wake up a thread on a Capability.
 *
 * This is used when the current Task is running on a Capability and
 * wishes to wake up a thread on a different Capability.
 * ------------------------------------------------------------------------- */

void
557
558
559
wakeupThreadOnCapability (Capability *my_cap, 
                          Capability *other_cap, 
                          StgTSO *tso)
560
{
561
    ACQUIRE_LOCK(&other_cap->lock);
562

563
564
565
566
567
568
569
570
    // ASSUMES: cap->lock is held (asserted in wakeupThreadOnCapability)
    if (tso->bound) {
	ASSERT(tso->bound->cap == tso->cap);
    	tso->bound->cap = other_cap;
    }
    tso->cap = other_cap;

    ASSERT(tso->bound ? tso->bound->cap == other_cap : 1);
571

572
    if (other_cap->running_task == NULL) {
573
574
575
	// nobody is running this Capability, we can add our thread
	// directly onto the run queue and start up a Task to run it.

576
577
578
579
580
581
582
	other_cap->running_task = myTask(); 
            // precond for releaseCapability_() and appendToRunQueue()

	appendToRunQueue(other_cap,tso);

	trace(TRACE_sched, "resuming capability %d", other_cap->no);
	releaseCapability_(other_cap);
583
    } else {
584
	appendToWakeupQueue(my_cap,other_cap,tso);
585
        other_cap->context_switch = 1;
586
587
588
589
	// someone is running on this Capability, so it cannot be
	// freed without first checking the wakeup queue (see
	// releaseCapability_).
    }
590

591
    RELEASE_LOCK(&other_cap->lock);
592
593
}

594
/* ----------------------------------------------------------------------------
595
 * prodCapabilities
sof's avatar
sof committed
596
 *
597
598
599
 * Used to indicate that the interrupted flag is now set, or some
 * other global condition that might require waking up a Task on each
 * Capability.
600
601
 * ------------------------------------------------------------------------- */

602
603
604
605
606
607
static void
prodCapabilities(rtsBool all)
{
    nat i;
    Capability *cap;
    Task *task;
608

609
610
611
612
613
    for (i=0; i < n_capabilities; i++) {
	cap = &capabilities[i];
	ACQUIRE_LOCK(&cap->lock);
	if (!cap->running_task) {
	    if (cap->spare_workers) {
Simon Marlow's avatar
Simon Marlow committed
614
		trace(TRACE_sched, "resuming capability %d", cap->no);
615
616
617
618
619
620
621
622
		task = cap->spare_workers;
		ASSERT(!task->stopped);
		giveCapabilityToTask(cap,task);
		if (!all) {
		    RELEASE_LOCK(&cap->lock);
		    return;
		}
	    }
623
	}
624
	RELEASE_LOCK(&cap->lock);
625
    }
626
    return;
sof's avatar
sof committed
627
}
628

629
630
631
632
633
void
prodAllCapabilities (void)
{
    prodCapabilities(rtsTrue);
}
sof's avatar
sof committed
634

635
/* ----------------------------------------------------------------------------
636
637
638
639
640
641
 * prodOneCapability
 *
 * Like prodAllCapabilities, but we only require a single Task to wake
 * up in order to service some global event, such as checking for
 * deadlock after some idle time has passed.
 * ------------------------------------------------------------------------- */
642

643
644
645
646
void
prodOneCapability (void)
{
    prodCapabilities(rtsFalse);
647
}
648
649
650
651
652
653
654
655
656
657
658
659
660

/* ----------------------------------------------------------------------------
 * shutdownCapability
 *
 * At shutdown time, we want to let everything exit as cleanly as
 * possible.  For each capability, we let its run queue drain, and
 * allow the workers to stop.
 *
 * This function should be called when interrupted and
 * shutting_down_scheduler = rtsTrue, thus any worker that wakes up
 * will exit the scheduler and call taskStop(), and any bound thread
 * that wakes up will return to its caller.  Runnable threads are
 * killed.
661
 *
662
 * ------------------------------------------------------------------------- */
663
664

void
665
shutdownCapability (Capability *cap, Task *task, rtsBool safe)
666
{
667
668
    nat i;

669
    ASSERT(sched_state == SCHED_SHUTTING_DOWN);
670
671
672

    task->cap = cap;

673
674
675
676
677
678
679
    // Loop indefinitely until all the workers have exited and there
    // are no Haskell threads left.  We used to bail out after 50
    // iterations of this loop, but that occasionally left a worker
    // running which caused problems later (the closeMutex() below
    // isn't safe, for one thing).

    for (i = 0; /* i < 50 */; i++) {
Simon Marlow's avatar
Simon Marlow committed
680
681
	debugTrace(DEBUG_sched, 
		   "shutting down capability %d, attempt %d", cap->no, i);
682
683
684
	ACQUIRE_LOCK(&cap->lock);
	if (cap->running_task) {
	    RELEASE_LOCK(&cap->lock);
Simon Marlow's avatar
Simon Marlow committed
685
	    debugTrace(DEBUG_sched, "not owner, yielding");
686
687
	    yieldThread();
	    continue;
688
	}
689
	cap->running_task = task;
Simon Marlow's avatar
Simon Marlow committed
690
691
692
693
694
695
696
697
698
699
700
701
702

        if (cap->spare_workers) {
            // Look for workers that have died without removing
            // themselves from the list; this could happen if the OS
            // summarily killed the thread, for example.  This
            // actually happens on Windows when the system is
            // terminating the program, and the RTS is running in a
            // DLL.
            Task *t, *prev;
            prev = NULL;
            for (t = cap->spare_workers; t != NULL; t = t->next) {
                if (!osThreadIsAlive(t->id)) {
                    debugTrace(DEBUG_sched, 
703
                               "worker thread %p has died unexpectedly", (void *)t->id);
Simon Marlow's avatar
Simon Marlow committed
704
705
706
707
708
709
710
711
712
713
                        if (!prev) {
                            cap->spare_workers = t->next;
                        } else {
                            prev->next = t->next;
                        }
                        prev = t;
                }
            }
        }

714
	if (!emptyRunQueue(cap) || cap->spare_workers) {
Simon Marlow's avatar
Simon Marlow committed
715
716
	    debugTrace(DEBUG_sched, 
		       "runnable threads or workers still alive, yielding");
717
718
719
720
	    releaseCapability_(cap); // this will wake up a worker
	    RELEASE_LOCK(&cap->lock);
	    yieldThread();
	    continue;
721
	}
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737

        // If "safe", then busy-wait for any threads currently doing
        // foreign calls.  If we're about to unload this DLL, for
        // example, we need to be sure that there are no OS threads
        // that will try to return to code that has been unloaded.
        // We can be a bit more relaxed when this is a standalone
        // program that is about to terminate, and let safe=false.
        if (cap->suspended_ccalling_tasks && safe) {
	    debugTrace(DEBUG_sched, 
		       "thread(s) are involved in foreign calls, yielding");
            cap->running_task = NULL;
	    RELEASE_LOCK(&cap->lock);
            yieldThread();
            continue;
        }
            
Simon Marlow's avatar
Simon Marlow committed
738
	debugTrace(DEBUG_sched, "capability %d is stopped.", cap->no);
739
        freeCapability(cap);
740
741
	RELEASE_LOCK(&cap->lock);
	break;
742
    }
743
744
    // we now have the Capability, its run queue and spare workers
    // list are both empty.
745

746
747
748
749
    // ToDo: we can't drop this mutex, because there might still be
    // threads performing foreign calls that will eventually try to 
    // return via resumeThread() and attempt to grab cap->lock.
    // closeMutex(&cap->lock);
750
}
751

752
753
754
755
/* ----------------------------------------------------------------------------
 * tryGrabCapability
 *
 * Attempt to gain control of a Capability if it is free.
756
 *
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
 * ------------------------------------------------------------------------- */

rtsBool
tryGrabCapability (Capability *cap, Task *task)
{
    if (cap->running_task != NULL) return rtsFalse;
    ACQUIRE_LOCK(&cap->lock);
    if (cap->running_task != NULL) {
	RELEASE_LOCK(&cap->lock);
	return rtsFalse;
    }
    task->cap = cap;
    cap->running_task = task;
    RELEASE_LOCK(&cap->lock);
    return rtsTrue;
}


775
#endif /* THREADED_RTS */
776

Ian Lynagh's avatar
Ian Lynagh committed
777
778
779
780
781
782
783
void
freeCapability (Capability *cap) {
    stgFree(cap->mut_lists);
#if defined(THREADED_RTS) || defined(PARALLEL_HASKELL)
    freeSparkPool(&cap->r.rSparks);
#endif
}
784

785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
/* ---------------------------------------------------------------------------
   Mark everything directly reachable from the Capabilities.  When
   using multiple GC threads, each GC thread marks all Capabilities
   for which (c `mod` n == 0), for Capability c and thread n.
   ------------------------------------------------------------------------ */

void
markSomeCapabilities (evac_fn evac, void *user, nat i0, nat delta)
{
    nat i;
    Capability *cap;
    Task *task;

    // Each GC thread is responsible for following roots from the
    // Capability of the same number.  There will usually be the same
    // or fewer Capabilities as GC threads, but just in case there
    // are more, we mark every Capability whose number is the GC
    // thread's index plus a multiple of the number of GC threads.
    for (i = i0; i < n_capabilities; i += delta) {
	cap = &capabilities[i];
	evac(user, (StgClosure **)(void *)&cap->run_queue_hd);
	evac(user, (StgClosure **)(void *)&cap->run_queue_tl);
#if defined(THREADED_RTS)
	evac(user, (StgClosure **)(void *)&cap->wakeup_queue_hd);
	evac(user, (StgClosure **)(void *)&cap->wakeup_queue_tl);
#endif
	for (task = cap->suspended_ccalling_tasks; task != NULL; 
	     task=task->next) {
	    debugTrace(DEBUG_sched,
		       "evac'ing suspended TSO %lu", (unsigned long)task->suspended_tso->id);
	    evac(user, (StgClosure **)(void *)&task->suspended_tso);
	}
817
818

#if defined(THREADED_RTS)
819
        traverseSparkQueue (evac, user, cap);
820
#endif
821
    }
822

823
824
825
826
827
828
829
#if !defined(THREADED_RTS)
    evac(user, (StgClosure **)(void *)&blocked_queue_hd);
    evac(user, (StgClosure **)(void *)&blocked_queue_tl);
    evac(user, (StgClosure **)(void *)&sleeping_queue);
#endif 
}

830
831
832
833
834
835
836
837
838
839
840
841
842
843
// This function is used by the compacting GC to thread all the
// pointers from spark queues.
void
traverseSparkQueues (evac_fn evac USED_IF_THREADS, void *user USED_IF_THREADS)
{
#if defined(THREADED_RTS)
    nat i;
    for (i = 0; i < n_capabilities; i++) {
        traverseSparkQueue (evac, user, &capabilities[i]);
    }
#endif // THREADED_RTS

}

844
845
846
847
848
void
markCapabilities (evac_fn evac, void *user)
{
    markSomeCapabilities(evac, user, 0, 1);
}