Capability.h 13.4 KB
Newer Older
sof's avatar
sof committed
1 2
/* ---------------------------------------------------------------------------
 *
3
 * (c) The GHC Team, 2001-2006
sof's avatar
sof committed
4 5 6
 *
 * Capabilities
 *
Simon Marlow's avatar
Simon Marlow committed
7
 * For details on the high-level design, see
8
 *   http://ghc.haskell.org/trac/ghc/wiki/Commentary/Rts/Scheduler
Simon Marlow's avatar
Simon Marlow committed
9
 *
Simon Marlow's avatar
Simon Marlow committed
10 11 12 13
 * A Capability holds all the state an OS thread/task needs to run
 * Haskell code: its STG registers, a pointer to its TSO, a nursery
 * etc. During STG execution, a pointer to the Capabilitity is kept in
 * a register (BaseReg).
sof's avatar
sof committed
14
 *
Simon Marlow's avatar
Simon Marlow committed
15 16
 * Only in a THREADED_RTS build will there be multiple capabilities,
 * in the non-threaded RTS there is one global capability, called
17
 * MainCapability.
sof's avatar
sof committed
18 19
 *
 * --------------------------------------------------------------------------*/
20

21 22
#ifndef CAPABILITY_H
#define CAPABILITY_H
sof's avatar
sof committed
23

Simon Marlow's avatar
Simon Marlow committed
24
#include "sm/GC.h" // for evac_fn
25
#include "Task.h"
26
#include "Sparks.h"
27

28
#include "BeginPrivate.h"
29

30 31 32 33 34
struct Capability_ {
    // State required by the STG virtual machine when running Haskell
    // code.  During STG execution, the BaseReg register always points
    // to the StgRegTable of the current Capability (&cap->r).
    StgFunTable f;
35
    StgRegTable r;
36 37 38 39 40 41 42 43 44 45 46 47 48

    nat no;  // capability number.

    // The Task currently holding this Capability.  This task has
    // exclusive access to the contents of this Capability (apart from
    // returning_tasks_hd/returning_tasks_tl).
    // Locks required: cap->lock.
    Task *running_task;

    // true if this Capability is running Haskell code, used for
    // catching unsafe call-ins.
    rtsBool in_haskell;

49 50 51
    // Has there been any activity on this Capability since the last GC?
    nat idle;

52 53
    rtsBool disabled;

54 55 56 57 58 59 60 61 62 63 64 65 66
    // The run queue.  The Task owning this Capability has exclusive
    // access to its run queue, so can wake up threads without
    // taking a lock, and the common path through the scheduler is
    // also lock-free.
    StgTSO *run_queue_hd;
    StgTSO *run_queue_tl;

    // Tasks currently making safe foreign calls.  Doubly-linked.
    // When returning, a task first acquires the Capability before
    // removing itself from this list, so that the GC can find all
    // the suspended TSOs easily.  Hence, when migrating a Task from
    // the returning_tasks list, we must also migrate its entry from
    // this list.
67
    InCall *suspended_ccalls;
68

69
    // One mutable list per generation, so we don't need to take any
70 71 72 73
    // locks when updating an old-generation thunk.  This also lets us
    // keep track of which closures this CPU has been mutating, so we
    // can traverse them using the right thread during GC and avoid
    // unnecessarily moving the data from one cache to another.
74
    bdescr **mut_lists;
75
    bdescr **saved_mut_lists; // tmp use during GC
76

77 78
    // block for allocating pinned objects into
    bdescr *pinned_object_block;
79 80
    // full pinned object blocks allocated since the last GC
    bdescr *pinned_object_blocks;
81

82 83 84 85 86
    // per-capability weak pointer list associated with nursery (older
    // lists stored in generation object)
    StgWeak *weak_ptr_list_hd;
    StgWeak *weak_ptr_list_tl;

87 88
    // Context switch flag.  When non-zero, this means: stop running
    // Haskell code, and switch threads.
89 90
    int context_switch;

91 92 93 94 95 96 97 98 99 100
    // Interrupt flag.  Like the context_switch flag, this also
    // indicates that we should stop running Haskell code, but we do
    // *not* switch threads.  This is used to stop a Capability in
    // order to do GC, for example.
    //
    // The interrupt flag is always reset before we start running
    // Haskell code, unlike the context_switch flag which is only
    // reset after we have executed the context switch.
    int interrupt;

101 102 103
#if defined(THREADED_RTS)
    // Worker Tasks waiting in the wings.  Singly-linked.
    Task *spare_workers;
104
    nat n_spare_workers; // count of above
105

Simon Marlow's avatar
Simon Marlow committed
106 107 108 109 110
    // This lock protects:
    //    running_task
    //    returning_tasks_{hd,tl}
    //    wakeup_queue
    //    inbox
111 112 113 114 115 116 117 118 119
    Mutex lock;

    // Tasks waiting to return from a foreign call, or waiting to make
    // a new call-in using this Capability (NULL if empty).
    // NB. this field needs to be modified by tasks other than the
    // running_task, so it requires cap->lock to modify.  A task can
    // check whether it is NULL without taking the lock, however.
    Task *returning_tasks_hd; // Singly-linked, with head/tail
    Task *returning_tasks_tl;
120

121
    // Messages, or END_TSO_QUEUE.
Simon Marlow's avatar
Simon Marlow committed
122
    // Locks required: cap->lock
123
    Message *inbox;
124 125 126 127

    SparkPool *sparks;

    // Stats on spark creation/conversion
128
    SparkCounters spark_stats;
129
#endif
130
    // Total words allocated by this cap since rts start
131
    W_ total_allocated;
132 133

    // Per-capability STM-related data
tharris@microsoft.com's avatar
tharris@microsoft.com committed
134 135
    StgTVarWatchQueue *free_tvar_watch_queues;
    StgInvariantCheckQueue *free_invariant_check_queues;
136 137 138
    StgTRecChunk *free_trec_chunks;
    StgTRecHeader *free_trec_headers;
    nat transaction_tokens;
139
} // typedef Capability is defined in RtsAPI.h
140 141
  // We never want a Capability to overlap a cache line with anything
  // else, so round it up to a cache line size:
142 143 144 145
#ifndef mingw32_HOST_OS
  ATTRIBUTE_ALIGNED(64)
#endif
  ;
146

147

sof's avatar
sof committed
148 149 150 151 152 153
#if defined(THREADED_RTS)
#define ASSERT_TASK_ID(task) ASSERT(task->id == osThreadId())
#else
#define ASSERT_TASK_ID(task) /*empty*/
#endif

154
// These properties should be true when a Task is holding a Capability
155
#define ASSERT_FULL_CAPABILITY_INVARIANTS(cap,task)			\
156 157
  ASSERT(cap->running_task != NULL && cap->running_task == task);	\
  ASSERT(task->cap == cap);						\
158 159 160 161 162 163 164 165 166 167 168
  ASSERT_PARTIAL_CAPABILITY_INVARIANTS(cap,task)

// Sometimes a Task holds a Capability, but the Task is not associated
// with that Capability (ie. task->cap != cap).  This happens when
// (a) a Task holds multiple Capabilities, and (b) when the current
// Task is bound, its thread has just blocked, and it may have been
// moved to another Capability.
#define ASSERT_PARTIAL_CAPABILITY_INVARIANTS(cap,task)	\
  ASSERT(cap->run_queue_hd == END_TSO_QUEUE ?		\
	    cap->run_queue_tl == END_TSO_QUEUE : 1);	\
  ASSERT(myTask() == task);				\
sof's avatar
sof committed
169
  ASSERT_TASK_ID(task);
170

171 172 173 174
#if defined(THREADED_RTS)
rtsBool checkSparkCountInvariant (void);
#endif

175 176 177 178 179
// Converts a *StgRegTable into a *Capability.
//
INLINE_HEADER Capability *
regTableToCapability (StgRegTable *reg)
{
Ian Lynagh's avatar
Ian Lynagh committed
180
    return (Capability *)((void *)((unsigned char*)reg - STG_FIELD_OFFSET(Capability,r)));
181
}
182

183
// Initialise the available capabilities.
184
//
185 186
void initCapabilities (void);

187 188
// Add and initialise more Capabilities
//
189
void moreCapabilities (nat from, nat to);
190

191 192 193 194 195 196 197
// Release a capability.  This is called by a Task that is exiting
// Haskell to make a foreign call, or in various other cases when we
// want to relinquish a Capability that we currently hold.
//
// ASSUMES: cap->running_task is the current Task.
//
#if defined(THREADED_RTS)
198 199 200 201
void releaseCapability           (Capability* cap);
void releaseAndWakeupCapability  (Capability* cap);
void releaseCapability_ (Capability* cap, rtsBool always_wakeup); 
// assumes cap->lock is held
202 203 204
#else
// releaseCapability() is empty in non-threaded RTS
INLINE_HEADER void releaseCapability  (Capability* cap STG_UNUSED) {};
205 206 207
INLINE_HEADER void releaseAndWakeupCapability  (Capability* cap STG_UNUSED) {};
INLINE_HEADER void releaseCapability_ (Capability* cap STG_UNUSED, 
                                       rtsBool always_wakeup STG_UNUSED) {};
208
#endif
209

Simon Marlow's avatar
Simon Marlow committed
210 211 212 213 214
// declared in includes/rts/Threads.h:
// extern Capability MainCapability; 

// declared in includes/rts/Threads.h:
// extern nat n_capabilities;
215
// extern nat enabled_capabilities;
216

217
// Array of all the capabilities
218
//
219
extern Capability **capabilities;
220

221 222
// The Capability that was last free.  Used as a good guess for where
// to assign new threads.
223
//
224
extern Capability *last_free_capability;
225

226 227 228 229 230 231 232
//
// Indicates that the RTS wants to synchronise all the Capabilities
// for some reason.  All Capabilities should stop and return to the
// scheduler.
//
#define SYNC_GC_SEQ 1
#define SYNC_GC_PAR 2
233
#define SYNC_OTHER  3
234
extern volatile StgWord pending_sync;
235

236 237 238 239 240 241 242 243 244 245
// Acquires a capability at a return point.  If *cap is non-NULL, then
// this is taken as a preference for the Capability we wish to
// acquire.
//
// OS threads waiting in this function get priority over those waiting
// in waitForCapability().
//
// On return, *cap is non-NULL, and points to the Capability acquired.
//
void waitForReturnCapability (Capability **cap/*in/out*/, Task *task);
246

247
EXTERN_INLINE void recordMutableCap (StgClosure *p, Capability *cap, nat gen);
248

249
EXTERN_INLINE void recordClosureMutated (Capability *cap, StgClosure *p);
250

251
#if defined(THREADED_RTS)
252

253 254 255 256 257 258 259 260 261
// Gives up the current capability IFF there is a higher-priority
// thread waiting for it.  This happens in one of two ways:
//
//   (a) we are passing the capability to another OS thread, so
//       that it can run a bound Haskell thread, or
//
//   (b) there is an OS thread waiting to return from a foreign call
//
// On return: *pCap is NULL if the capability was released.  The
262
// current task should then re-acquire it using waitForCapability().
263
//
264
rtsBool yieldCapability (Capability** pCap, Task *task, rtsBool gcAllowed);
265 266 267 268 269

// Acquires a capability for doing some work.
//
// On return: pCap points to the capability.
//
270
void waitForCapability (Task *task, Mutex *mutex, Capability **pCap);
271

272 273
// Wakes up a worker thread on just one Capability, used when we
// need to service some global event.
274
//
275
void prodOneCapability (void);
276
void prodCapability (Capability *cap, Task *task);
277

278
// Similar to prodOneCapability(), but prods all of them.
279
//
280
void prodAllCapabilities (void);
sof's avatar
sof committed
281

282 283 284 285
// Attempt to gain control of a Capability if it is free.
//
rtsBool tryGrabCapability (Capability *cap, Task *task);

286
// Try to find a spark to run
287
//
288
StgClosure *findSpark (Capability *cap);
289 290 291 292

// True if any capabilities have sparks
//
rtsBool anySparks (void);
293 294 295 296 297

INLINE_HEADER rtsBool emptySparkPoolCap (Capability *cap);
INLINE_HEADER nat     sparkPoolSizeCap  (Capability *cap);
INLINE_HEADER void    discardSparksCap  (Capability *cap);

298
#else // !THREADED_RTS
299 300 301 302

// Grab a capability.  (Only in the non-threaded RTS; in the threaded
// RTS one of the waitFor*Capability() functions must be used).
//
303
extern void grabCapability (Capability **pCap);
304

305
#endif /* !THREADED_RTS */
sof's avatar
sof committed
306

307 308 309 310 311 312 313 314 315
// Waits for a capability to drain of runnable threads and workers,
// and then acquires it.  Used at shutdown time.
//
void shutdownCapability (Capability *cap, Task *task, rtsBool wait_foreign);

// Shut down all capabilities.
//
void shutdownCapabilities(Task *task, rtsBool wait_foreign);

316
// cause all capabilities to context switch as soon as possible.
317
void contextSwitchAllCapabilities(void);
318
INLINE_HEADER void contextSwitchCapability(Capability *cap);
319

320 321 322 323 324
// cause all capabilities to stop running Haskell code and return to
// the scheduler as soon as possible.
void interruptAllCapabilities(void);
INLINE_HEADER void interruptCapability(Capability *cap);

325 326
// Free all capabilities
void freeCapabilities (void);
Ian Lynagh's avatar
Ian Lynagh committed
327

Simon Marlow's avatar
Simon Marlow committed
328
// For the GC:
Simon Marlow's avatar
Simon Marlow committed
329 330 331
void markCapability (evac_fn evac, void *user, Capability *cap,
                     rtsBool no_mark_sparks USED_IF_THREADS);

332
void markCapabilities (evac_fn evac, void *user);
Simon Marlow's avatar
Simon Marlow committed
333

334
void traverseSparkQueues (evac_fn evac, void *user);
335

336 337 338 339 340 341
/* -----------------------------------------------------------------------------
   Messages
   -------------------------------------------------------------------------- */

#ifdef THREADED_RTS

Gabor Greif's avatar
Gabor Greif committed
342
INLINE_HEADER rtsBool emptyInbox(Capability *cap);
343 344 345

#endif // THREADED_RTS

346 347 348 349
/* -----------------------------------------------------------------------------
 * INLINE functions... private below here
 * -------------------------------------------------------------------------- */

350
EXTERN_INLINE void
351 352 353 354
recordMutableCap (StgClosure *p, Capability *cap, nat gen)
{
    bdescr *bd;

355
    // We must own this Capability in order to modify its mutable list.
356 357
    //    ASSERT(cap->running_task == myTask());
    // NO: assertion is violated by performPendingThrowTos()
358 359 360 361 362 363 364 365 366 367 368
    bd = cap->mut_lists[gen];
    if (bd->free >= bd->start + BLOCK_SIZE_W) {
	bdescr *new_bd;
	new_bd = allocBlock_lock();
	new_bd->link = bd;
	bd = new_bd;
	cap->mut_lists[gen] = bd;
    }
    *bd->free++ = (StgWord)p;
}

369
EXTERN_INLINE void
370 371 372 373 374 375 376 377
recordClosureMutated (Capability *cap, StgClosure *p)
{
    bdescr *bd;
    bd = Bdescr((StgPtr)p);
    if (bd->gen_no != 0) recordMutableCap(p,cap,bd->gen_no);
}


378 379 380 381 382 383 384 385 386 387 388
#if defined(THREADED_RTS)
INLINE_HEADER rtsBool
emptySparkPoolCap (Capability *cap) 
{ return looksEmpty(cap->sparks); }

INLINE_HEADER nat
sparkPoolSizeCap (Capability *cap) 
{ return sparkPoolSize(cap->sparks); }

INLINE_HEADER void
discardSparksCap (Capability *cap) 
Simon Marlow's avatar
Simon Marlow committed
389
{ discardSparks(cap->sparks); }
390 391
#endif

392
INLINE_HEADER void
393
stopCapability (Capability *cap)
394
{
395 396 397 398 399
    // setting HpLim to NULL tries to make the next heap check will
    // fail, which will cause the thread to return to the scheduler.
    // It may not work - the thread might be updating HpLim itself
    // at the same time - so we also have the context_switch/interrupted
    // flags as a sticky way to tell the thread to stop.
400
    cap->r.rHpLim = NULL;
401 402 403 404 405 406 407 408 409 410 411 412 413
}

INLINE_HEADER void
interruptCapability (Capability *cap)
{
    stopCapability(cap);
    cap->interrupt = 1;
}

INLINE_HEADER void
contextSwitchCapability (Capability *cap)
{
    stopCapability(cap);
414 415 416
    cap->context_switch = 1;
}

417 418 419 420 421 422 423 424 425
#ifdef THREADED_RTS

INLINE_HEADER rtsBool emptyInbox(Capability *cap)
{
    return (cap->inbox == (Message*)END_TSO_QUEUE);
}

#endif

426
#include "EndPrivate.h"
427

428
#endif /* CAPABILITY_H */
429 430 431 432 433 434 435 436

// Local Variables:
// mode: C
// fill-column: 80
// indent-tabs-mode: nil
// c-basic-offset: 4
// buffer-file-coding-system: utf-8-unix
// End: