TSO.h 11.7 KB
Newer Older
1
/* -----------------------------------------------------------------------------
2
 *
Simon Marlow's avatar
Simon Marlow committed
3
 * (c) The GHC Team, 1998-2009
4 5 6 7 8
 *
 * The definitions for Thread State Objects.
 *
 * ---------------------------------------------------------------------------*/

9
#pragma once
10

11 12 13
/*
 * PROFILING info in a TSO
 */
14
typedef struct {
15
  CostCentreStack *cccs;       /* thread's current CCS */
16 17
} StgTSOProfInfo;

18
/*
19
 * There is no TICKY info in a TSO at this time.
20
 */
21 22

/*
ken's avatar
ken committed
23
 * Thread IDs are 32 bits.
24
 */
sof's avatar
sof committed
25
typedef StgWord32 StgThreadID;
26

27
#define tsoLocked(tso) ((tso)->flags & TSO_LOCKED)
28

29
/*
30 31 32
 * Type returned after running a thread.  Values of this type
 * include HeapOverflow, StackOverflow etc.  See Constants.h for the
 * full list.
33
 */
34
typedef unsigned int StgThreadReturnCode;
35

36
#if defined(mingw32_HOST_OS)
sof's avatar
sof committed
37
/* results from an async I/O request + its request ID. */
sof's avatar
sof committed
38 39 40 41 42 43 44
typedef struct {
  unsigned int reqID;
  int          len;
  int          errCode;
} StgAsyncIOResult;
#endif

45
/* Reason for thread being blocked. See comment above struct StgTso_. */
46 47
typedef union {
  StgClosure *closure;
48
  StgTSO *prev; // a back-link when the TSO is on the run queue (NotBlocked)
49
  struct MessageBlackHole_ *bh;
50 51
  struct MessageThrowTo_ *throwto;
  struct MessageWakeup_  *wakeup;
52
  StgInt fd;    /* StgInt instead of int, so that it's the same size as the ptrs */
53
#if defined(mingw32_HOST_OS)
sof's avatar
sof committed
54
  StgAsyncIOResult *async_result;
sof's avatar
sof committed
55
#endif
Simon Marlow's avatar
Simon Marlow committed
56
#if !defined(THREADED_RTS)
57
  StgWord target;
Simon Marlow's avatar
Simon Marlow committed
58
    // Only for the non-threaded RTS: the target time for a thread
59
    // blocked in threadDelay, in units of 1ms.  This is a
Simon Marlow's avatar
Simon Marlow committed
60 61 62
    // compromise: we don't want to take up much space in the TSO.  If
    // you want better resolution for threadDelay, use -threaded.
#endif
63 64
} StgTSOBlockInfo;

65

66 67 68 69 70 71
/*
 * TSOs live on the heap, and therefore look just like heap objects.
 * Large TSOs will live in their own "block group" allocated by the
 * storage manager, and won't be copied during garbage collection.
 */

72
/*
73 74 75 76 77 78 79
 * Threads may be blocked for several reasons.  A blocked thread will
 * have the reason in the why_blocked field of the TSO, and some
 * further info (such as the closure the thread is blocked on, or the
 * file descriptor if the thread is waiting on I/O) in the block_info
 * field.
 */

80
typedef struct StgTSO_ {
81 82
    StgHeader               header;

83 84 85 86 87
    /* The link field, for linking threads together in lists (e.g. the
       run queue on a Capability.
    */
    struct StgTSO_*         _link;
    /*
88 89 90
      Currently used for linking TSOs on:
      * cap->run_queue_{hd,tl}
      * (non-THREADED_RTS); the blocked_queue
91
      * and pointing to the next chunk for a ThreadOldStack
92

93 94 95 96 97 98 99 100 101
       NOTE!!!  do not modify _link directly, it is subject to
       a write barrier for generational GC.  Instead use the
       setTSOLink() function.  Exceptions to this rule are:

       * setting the link field to END_TSO_QUEUE
       * setting the link field of the currently running TSO, as it
         will already be dirty.
    */

102 103
    struct StgTSO_*         global_link;    // Links threads on the
                                            // generation->threads lists
104

105 106 107 108 109
    /*
     * The thread's stack
     */
    struct StgStack_       *stackobj;

110 111 112 113 114 115 116 117 118 119 120 121 122 123
    /*
     * The tso->dirty flag indicates that this TSO's stack should be
     * scanned during garbage collection.  It also indicates that this
     * TSO is on the mutable list.
     *
     * NB. The dirty flag gets a word to itself, so that it can be set
     * safely by multiple threads simultaneously (the flags field is
     * not safe for this purpose; see #3429).  It is harmless for the
     * TSO to be on the mutable list multiple times.
     *
     * tso->dirty is set by dirty_TSO(), and unset by the garbage
     * collector (only).
     */

124 125 126
    StgWord16               what_next;      // Values defined in Constants.h
    StgWord16               why_blocked;    // Values defined in Constants.h
    StgWord32               flags;          // Values defined in Constants.h
127 128
    StgTSOBlockInfo         block_info;
    StgThreadID             id;
129 130
    StgWord32               saved_errno;
    StgWord32               dirty;          /* non-zero => dirty */
131
    struct InCall_*         bound;
132
    struct Capability_*     cap;
133

134 135
    struct StgTRecHeader_ * trec;       /* STM transaction record */

136
    /*
137
     * A list of threads blocked on this TSO waiting to throw exceptions.
138
    */
139
    struct MessageThrowTo_ * blocked_exceptions;
140

141
    /*
142 143
     * A list of StgBlockingQueue objects, representing threads
     * blocked on thunks that are under evaluation by this thread.
144 145 146
    */
    struct StgBlockingQueue_ *bq;

147 148 149 150 151 152 153 154 155 156
    /*
     * The allocation limit for this thread, which is updated as the
     * thread allocates.  If the value drops below zero, and
     * TSO_ALLOC_LIMIT is set in flags, we raise an exception in the
     * thread, and give the thread a little more space to handle the
     * exception before we raise the exception again.
     *
     * This is an integer, because we might update it in a place where
     * it isn't convenient to raise the exception, so we want it to
     * stay negative until we get around to checking it.
157 158 159
     *
     * Use only PK_Int64/ASSIGN_Int64 macros to get/set the value of alloc_limit
     * in C code otherwise you will cause alignment issues on SPARC
160 161
     */
    StgInt64  alloc_limit;     /* in bytes */
162

163 164 165 166 167 168 169 170 171 172 173 174 175
    /*
     * sum of the sizes of all stack chunks (in words), used to decide
     * whether to throw the StackOverflow exception when the stack
     * overflows, or whether to just chain on another stack chunk.
     *
     * Note that this overestimates the real stack size, because each
     * chunk will have a gap at the end, of +RTS -kb<size> words.
     * This means stack overflows are not entirely accurate, because
     * the more gaps there are, the sooner the stack will run into the
     * hard +RTS -K<size> limit.
     */
    StgWord32  tot_stack_size;

Ben Gamari's avatar
Ben Gamari committed
176
#if defined(TICKY_TICKY)
177 178
    /* TICKY-specific stuff would go here. */
#endif
Ben Gamari's avatar
Ben Gamari committed
179
#if defined(PROFILING)
180 181
    StgTSOProfInfo prof;
#endif
Ben Gamari's avatar
Ben Gamari committed
182
#if defined(mingw32_HOST_OS)
183 184 185
    StgWord32 saved_winerror;
#endif

186
} *StgTSOPtr; // StgTSO defined in rts/Types.h
187

188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233
/* Note [StgStack dirtiness flags and concurrent marking]
 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 *
 * Without concurrent collection by the nonmoving collector the stack dirtiness story
 * is quite simple: The stack is either STACK_DIRTY (meaning it has been added to mut_list)
 * or not.
 *
 * However, things are considerably more complicated with concurrent collection
 * (namely, when nonmoving_write_barrier_enabled is set): In addition to adding
 * the stack to mut_list and flagging it as STACK_DIRTY, we also must ensure
 * that stacks are marked in accordance with the nonmoving collector's snapshot
 * invariant. This is: every stack alive at the time the snapshot is taken must
 * be marked at some point after the moment the snapshot is taken and before it
 * is mutated or the commencement of the sweep phase.
 *
 * This marking may be done by the concurrent mark phase (in the case of a
 * thread that never runs during the concurrent mark) or by the mutator when
 * dirtying the stack. However, it is unsafe for the concurrent collector to
 * traverse the stack while it is under mutation. Consequently, the following
 * handshake is obeyed by the mutator's write barrier and the concurrent mark to
 * ensure this doesn't happen:
 *
 * 1. The entity seeking to mark first checks that the stack lives in the nonmoving
 *    generation; if not then the stack was not alive at the time the snapshot
 *    was taken and therefore we need not mark it.
 *
 * 2. The entity seeking to mark checks the stack's mark bit. If it is set then
 *    no mark is necessary.
 *
 * 3. The entity seeking to mark tries to lock the stack for marking by
 *    atomically setting its `marking` field to the current non-moving mark
 *    epoch:
 *
 *    a. If the mutator finds the concurrent collector has already locked the
 *       stack then it waits until it is finished (indicated by the mark bit
 *       being set) before proceeding with execution.
 *
 *    b. If the concurrent collector finds that the mutator has locked the stack
 *       then it moves on, leaving the mutator to mark it. There is no need to wait;
 *       the mark is guaranteed to finish before sweep due to the post-mark
 *       synchronization with mutators.
 *
 *    c. Whoever succeeds in locking the stack is responsible for marking it and
 *       setting the stack's mark bit (either the BF_MARKED bit for large objects
 *       or otherwise its bit in its segment's mark bitmap).
 *
Ben Gamari's avatar
Ben Gamari committed
234 235 236
 * To ensure that mutation does not proceed until the stack is fully marked the
 * mark phase must not set the mark bit until it has finished tracing.
 *
237
 */
238 239 240 241 242

#define STACK_DIRTY 1
// used by sanity checker to verify that all dirty stacks are on the mutable list
#define STACK_SANE 64

243 244 245
typedef struct StgStack_ {
    StgHeader  header;
    StgWord32  stack_size;     // stack size in *words*
246 247
    StgWord8   dirty;          // non-zero => dirty
    StgWord8   marking;        // non-zero => someone is currently marking the stack
248
    StgPtr     sp;             // current stack pointer
249
    StgWord    stack[];
250 251 252 253 254 255 256 257 258
} StgStack;

// Calculate SpLim from a TSO (reads tso->stackobj, but no fields from
// the stackobj itself).
INLINE_HEADER StgPtr tso_SpLim (StgTSO* tso)
{
    return tso->stackobj->stack + RESERVED_STACK_WORDS;
}

259 260 261 262
/* -----------------------------------------------------------------------------
   functions
   -------------------------------------------------------------------------- */

Simon Marlow's avatar
Simon Marlow committed
263 264
void dirty_TSO  (Capability *cap, StgTSO *tso);
void setTSOLink (Capability *cap, StgTSO *tso, StgTSO *target);
265
void setTSOPrev (Capability *cap, StgTSO *tso, StgTSO *target);
266

267
void dirty_STACK (Capability *cap, StgStack *stack);
268

269 270 271 272 273 274 275
/* -----------------------------------------------------------------------------
   Invariants:

   An active thread has the following properties:

      tso->stack < tso->sp < tso->stack+tso->stack_size
      tso->stack_size <= tso->max_stack_size
276

277 278 279 280 281
      RESERVED_STACK_WORDS is large enough for any heap-check or
      stack-check failure.

      The size of the TSO struct plus the stack is either
        (a) smaller than a block, or
282
        (b) a multiple of BLOCK_SIZE
283

284
        tso->why_blocked       tso->block_info      location
285
        ----------------------------------------------------------------------
286 287
        NotBlocked             END_TSO_QUEUE        runnable_queue, or running

288
        BlockedOnBlackHole     MessageBlackHole *   TSO->bq
289

290
        BlockedOnMVar          the MVAR             the MVAR's queue
291

Ben Gamari's avatar
Ben Gamari committed
292 293
        BlockedOnSTM           END_TSO_QUEUE        STM wait queue(s)
        BlockedOnSTM           STM_AWOKEN           run queue
294

295
        BlockedOnMsgThrowTo    MessageThrowTo *     TSO->blocked_exception
296 297

        BlockedOnRead          NULL                 blocked_queue
298
        BlockedOnWrite         NULL                 blocked_queue
299 300 301
        BlockedOnDelay         NULL                 blocked_queue

      tso->link == END_TSO_QUEUE, if the thread is currently running.
302 303

   A zombie thread has the following properties:
304

305
      tso->what_next == ThreadComplete or ThreadKilled
306 307
      tso->link     ==  (could be on some queue somewhere)
      tso->sp       ==  tso->stack + tso->stack_size - 1 (i.e. top stack word)
308 309
      tso->sp[0]    ==  return value of thread, if what_next == ThreadComplete,
                        exception             , if what_next == ThreadKilled
310 311 312 313

      (tso->sp is left pointing at the top word on the stack so that
      the return value or exception will be retained by a GC).

314
 ---------------------------------------------------------------------------- */
315

316
/* this is the NIL ptr for a TSO queue (e.g. runnable queue) */
317
#define END_TSO_QUEUE  ((StgTSO *)(void*)&stg_END_TSO_QUEUE_closure)