Capability.h 11.4 KB
Newer Older
sof's avatar
sof committed
1
2
/* ---------------------------------------------------------------------------
 *
3
 * (c) The GHC Team, 2001-2006
sof's avatar
sof committed
4
5
6
 *
 * Capabilities
 *
Simon Marlow's avatar
Simon Marlow committed
7
8
9
 * For details on the high-level design, see
 *   http://hackage.haskell.org/trac/ghc/wiki/Commentary/Rts/Scheduler
 *
Simon Marlow's avatar
Simon Marlow committed
10
11
12
13
 * A Capability holds all the state an OS thread/task needs to run
 * Haskell code: its STG registers, a pointer to its TSO, a nursery
 * etc. During STG execution, a pointer to the Capabilitity is kept in
 * a register (BaseReg).
sof's avatar
sof committed
14
 *
Simon Marlow's avatar
Simon Marlow committed
15
16
 * Only in a THREADED_RTS build will there be multiple capabilities,
 * in the non-threaded RTS there is one global capability, called
17
 * MainCapability.
sof's avatar
sof committed
18
19
 *
 * --------------------------------------------------------------------------*/
20

21
22
#ifndef CAPABILITY_H
#define CAPABILITY_H
sof's avatar
sof committed
23

Simon Marlow's avatar
Simon Marlow committed
24
#include "sm/GC.h" // for evac_fn
25
#include "Task.h"
26
#include "Sparks.h"
27

28
BEGIN_RTS_PRIVATE
29

30
31
32
33
34
struct Capability_ {
    // State required by the STG virtual machine when running Haskell
    // code.  During STG execution, the BaseReg register always points
    // to the StgRegTable of the current Capability (&cap->r).
    StgFunTable f;
35
    StgRegTable r;
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61

    nat no;  // capability number.

    // The Task currently holding this Capability.  This task has
    // exclusive access to the contents of this Capability (apart from
    // returning_tasks_hd/returning_tasks_tl).
    // Locks required: cap->lock.
    Task *running_task;

    // true if this Capability is running Haskell code, used for
    // catching unsafe call-ins.
    rtsBool in_haskell;

    // The run queue.  The Task owning this Capability has exclusive
    // access to its run queue, so can wake up threads without
    // taking a lock, and the common path through the scheduler is
    // also lock-free.
    StgTSO *run_queue_hd;
    StgTSO *run_queue_tl;

    // Tasks currently making safe foreign calls.  Doubly-linked.
    // When returning, a task first acquires the Capability before
    // removing itself from this list, so that the GC can find all
    // the suspended TSOs easily.  Hence, when migrating a Task from
    // the returning_tasks list, we must also migrate its entry from
    // this list.
62
    InCall *suspended_ccalls;
63

64
    // One mutable list per generation, so we don't need to take any
65
66
67
68
    // locks when updating an old-generation thunk.  This also lets us
    // keep track of which closures this CPU has been mutating, so we
    // can traverse them using the right thread during GC and avoid
    // unnecessarily moving the data from one cache to another.
69
    bdescr **mut_lists;
70
    bdescr **saved_mut_lists; // tmp use during GC
71

72
73
74
    // block for allocating pinned objects into
    bdescr *pinned_object_block;

75
76
77
78
    // Context switch flag. We used to have one global flag, now one 
    // per capability. Locks required  : none (conflicts are harmless)
    int context_switch;

79
80
81
82
#if defined(THREADED_RTS)
    // Worker Tasks waiting in the wings.  Singly-linked.
    Task *spare_workers;

83
    // This lock protects running_task, returning_tasks_{hd,tl}, wakeup_queue.
84
85
86
87
88
89
90
91
92
    Mutex lock;

    // Tasks waiting to return from a foreign call, or waiting to make
    // a new call-in using this Capability (NULL if empty).
    // NB. this field needs to be modified by tasks other than the
    // running_task, so it requires cap->lock to modify.  A task can
    // check whether it is NULL without taking the lock, however.
    Task *returning_tasks_hd; // Singly-linked, with head/tail
    Task *returning_tasks_tl;
93

94
95
    // Messages, or END_TSO_QUEUE.
    Message *inbox;
96
97
98
99
100
101
102

    SparkPool *sparks;

    // Stats on spark creation/conversion
    nat sparks_created;
    nat sparks_converted;
    nat sparks_pruned;
103
#endif
104
105

    // Per-capability STM-related data
tharris@microsoft.com's avatar
tharris@microsoft.com committed
106
107
    StgTVarWatchQueue *free_tvar_watch_queues;
    StgInvariantCheckQueue *free_invariant_check_queues;
108
109
110
    StgTRecChunk *free_trec_chunks;
    StgTRecHeader *free_trec_headers;
    nat transaction_tokens;
111
112
113
} // typedef Capability is defined in RtsAPI.h
  // Capabilities are stored in an array, so make sure that adjacent
  // Capabilities don't share any cache-lines:
114
115
116
117
#ifndef mingw32_HOST_OS
  ATTRIBUTE_ALIGNED(64)
#endif
  ;
118

119

sof's avatar
sof committed
120
121
122
123
124
125
#if defined(THREADED_RTS)
#define ASSERT_TASK_ID(task) ASSERT(task->id == osThreadId())
#else
#define ASSERT_TASK_ID(task) /*empty*/
#endif

126
// These properties should be true when a Task is holding a Capability
127
#define ASSERT_FULL_CAPABILITY_INVARIANTS(cap,task)			\
128
129
  ASSERT(cap->running_task != NULL && cap->running_task == task);	\
  ASSERT(task->cap == cap);						\
130
131
132
133
134
135
136
137
138
139
140
  ASSERT_PARTIAL_CAPABILITY_INVARIANTS(cap,task)

// Sometimes a Task holds a Capability, but the Task is not associated
// with that Capability (ie. task->cap != cap).  This happens when
// (a) a Task holds multiple Capabilities, and (b) when the current
// Task is bound, its thread has just blocked, and it may have been
// moved to another Capability.
#define ASSERT_PARTIAL_CAPABILITY_INVARIANTS(cap,task)	\
  ASSERT(cap->run_queue_hd == END_TSO_QUEUE ?		\
	    cap->run_queue_tl == END_TSO_QUEUE : 1);	\
  ASSERT(myTask() == task);				\
sof's avatar
sof committed
141
  ASSERT_TASK_ID(task);
142

143
144
145
146
147
// Converts a *StgRegTable into a *Capability.
//
INLINE_HEADER Capability *
regTableToCapability (StgRegTable *reg)
{
Ian Lynagh's avatar
Ian Lynagh committed
148
    return (Capability *)((void *)((unsigned char*)reg - STG_FIELD_OFFSET(Capability,r)));
149
}
150

151
// Initialise the available capabilities.
152
//
153
154
155
156
157
158
159
160
161
void initCapabilities (void);

// Release a capability.  This is called by a Task that is exiting
// Haskell to make a foreign call, or in various other cases when we
// want to relinquish a Capability that we currently hold.
//
// ASSUMES: cap->running_task is the current Task.
//
#if defined(THREADED_RTS)
162
163
164
165
void releaseCapability           (Capability* cap);
void releaseAndWakeupCapability  (Capability* cap);
void releaseCapability_ (Capability* cap, rtsBool always_wakeup); 
// assumes cap->lock is held
166
167
168
#else
// releaseCapability() is empty in non-threaded RTS
INLINE_HEADER void releaseCapability  (Capability* cap STG_UNUSED) {};
169
170
171
INLINE_HEADER void releaseAndWakeupCapability  (Capability* cap STG_UNUSED) {};
INLINE_HEADER void releaseCapability_ (Capability* cap STG_UNUSED, 
                                       rtsBool always_wakeup STG_UNUSED) {};
172
#endif
173

Simon Marlow's avatar
Simon Marlow committed
174
175
176
177
178
// declared in includes/rts/Threads.h:
// extern Capability MainCapability; 

// declared in includes/rts/Threads.h:
// extern nat n_capabilities;
179
180

// Array of all the capabilities
181
//
182
extern Capability *capabilities;
183

184
185
// The Capability that was last free.  Used as a good guess for where
// to assign new threads.
186
//
187
extern Capability *last_free_capability;
188

189
// GC indicator, in scope for the scheduler
190
191
#define PENDING_GC_SEQ 1
#define PENDING_GC_PAR 2
192
193
extern volatile StgWord waiting_for_gc;

194
195
196
197
198
199
200
201
202
203
// Acquires a capability at a return point.  If *cap is non-NULL, then
// this is taken as a preference for the Capability we wish to
// acquire.
//
// OS threads waiting in this function get priority over those waiting
// in waitForCapability().
//
// On return, *cap is non-NULL, and points to the Capability acquired.
//
void waitForReturnCapability (Capability **cap/*in/out*/, Task *task);
204

205
EXTERN_INLINE void recordMutableCap (StgClosure *p, Capability *cap, nat gen);
206

207
EXTERN_INLINE void recordClosureMutated (Capability *cap, StgClosure *p);
208

209
#if defined(THREADED_RTS)
210

211
212
213
214
215
216
217
218
219
// Gives up the current capability IFF there is a higher-priority
// thread waiting for it.  This happens in one of two ways:
//
//   (a) we are passing the capability to another OS thread, so
//       that it can run a bound Haskell thread, or
//
//   (b) there is an OS thread waiting to return from a foreign call
//
// On return: *pCap is NULL if the capability was released.  The
220
// current task should then re-acquire it using waitForCapability().
221
//
222
void yieldCapability (Capability** pCap, Task *task);
223
224
225
226
227

// Acquires a capability for doing some work.
//
// On return: pCap points to the capability.
//
228
void waitForCapability (Task *task, Mutex *mutex, Capability **pCap);
229

230
231
// Wakes up a worker thread on just one Capability, used when we
// need to service some global event.
232
//
233
void prodOneCapability (void);
234
void prodCapability (Capability *cap, Task *task);
235

236
// Similar to prodOneCapability(), but prods all of them.
237
//
238
void prodAllCapabilities (void);
sof's avatar
sof committed
239

240
241
242
// Waits for a capability to drain of runnable threads and workers,
// and then acquires it.  Used at shutdown time.
//
243
void shutdownCapability (Capability *cap, Task *task, rtsBool wait_foreign);
sof's avatar
sof committed
244

245
246
247
248
// Attempt to gain control of a Capability if it is free.
//
rtsBool tryGrabCapability (Capability *cap, Task *task);

249
// Try to find a spark to run
250
//
251
StgClosure *findSpark (Capability *cap);
252
253
254
255

// True if any capabilities have sparks
//
rtsBool anySparks (void);
256
257
258
259
260

INLINE_HEADER rtsBool emptySparkPoolCap (Capability *cap);
INLINE_HEADER nat     sparkPoolSizeCap  (Capability *cap);
INLINE_HEADER void    discardSparksCap  (Capability *cap);

261
#else // !THREADED_RTS
262
263
264
265

// Grab a capability.  (Only in the non-threaded RTS; in the threaded
// RTS one of the waitFor*Capability() functions must be used).
//
266
extern void grabCapability (Capability **pCap);
267

268
#endif /* !THREADED_RTS */
sof's avatar
sof committed
269

270
271
// cause all capabilities to context switch as soon as possible.
void setContextSwitches(void);
272
INLINE_HEADER void contextSwitchCapability(Capability *cap);
273

274
275
// Free all capabilities
void freeCapabilities (void);
Ian Lynagh's avatar
Ian Lynagh committed
276

Simon Marlow's avatar
Simon Marlow committed
277
// For the GC:
278
void markSomeCapabilities (evac_fn evac, void *user, nat i0, nat delta, 
279
                           rtsBool no_mark_sparks);
280
void markCapabilities (evac_fn evac, void *user);
281
void traverseSparkQueues (evac_fn evac, void *user);
282

283
284
285
286
287
288
289
290
291
292
/* -----------------------------------------------------------------------------
   Messages
   -------------------------------------------------------------------------- */

#ifdef THREADED_RTS

INLINE_HEADER rtsBool emptyInbox(Capability *cap);;

#endif // THREADED_RTS

293
294
295
296
/* -----------------------------------------------------------------------------
 * INLINE functions... private below here
 * -------------------------------------------------------------------------- */

297
EXTERN_INLINE void
298
299
300
301
recordMutableCap (StgClosure *p, Capability *cap, nat gen)
{
    bdescr *bd;

302
    // We must own this Capability in order to modify its mutable list.
303
304
    //    ASSERT(cap->running_task == myTask());
    // NO: assertion is violated by performPendingThrowTos()
305
306
307
308
309
310
311
312
313
314
315
    bd = cap->mut_lists[gen];
    if (bd->free >= bd->start + BLOCK_SIZE_W) {
	bdescr *new_bd;
	new_bd = allocBlock_lock();
	new_bd->link = bd;
	bd = new_bd;
	cap->mut_lists[gen] = bd;
    }
    *bd->free++ = (StgWord)p;
}

316
EXTERN_INLINE void
317
318
319
320
321
322
323
324
recordClosureMutated (Capability *cap, StgClosure *p)
{
    bdescr *bd;
    bd = Bdescr((StgPtr)p);
    if (bd->gen_no != 0) recordMutableCap(p,cap,bd->gen_no);
}


325
326
327
328
329
330
331
332
333
334
335
336
337
338
#if defined(THREADED_RTS)
INLINE_HEADER rtsBool
emptySparkPoolCap (Capability *cap) 
{ return looksEmpty(cap->sparks); }

INLINE_HEADER nat
sparkPoolSizeCap (Capability *cap) 
{ return sparkPoolSize(cap->sparks); }

INLINE_HEADER void
discardSparksCap (Capability *cap) 
{ return discardSparks(cap->sparks); }
#endif

339
340
341
342
343
344
345
346
347
348
349
350
INLINE_HEADER void
contextSwitchCapability (Capability *cap)
{
    // setting HpLim to NULL ensures that the next heap check will
    // fail, and the thread will return to the scheduler.
    cap->r.rHpLim = NULL;
    // But just in case it didn't work (the target thread might be
    // modifying HpLim at the same time), we set the end-of-block
    // context-switch flag too:
    cap->context_switch = 1;
}

351
352
353
354
355
356
357
358
359
#ifdef THREADED_RTS

INLINE_HEADER rtsBool emptyInbox(Capability *cap)
{
    return (cap->inbox == (Message*)END_TSO_QUEUE);
}

#endif

360
END_RTS_PRIVATE
361

362
#endif /* CAPABILITY_H */