Threads.c 29.4 KB
Newer Older
1 2 3 4 5 6 7 8 9 10
/* ---------------------------------------------------------------------------
 *
 * (c) The GHC Team, 2006
 *
 * Thread-related functionality
 *
 * --------------------------------------------------------------------------*/

#include "PosixSource.h"
#include "Rts.h"
Simon Marlow's avatar
Simon Marlow committed
11

12 13
#include "Capability.h"
#include "Updates.h"
14 15 16 17 18
#include "Threads.h"
#include "STM.h"
#include "Schedule.h"
#include "Trace.h"
#include "ThreadLabels.h"
19 20
#include "Updates.h"
#include "Messages.h"
21 22 23 24
#include "RaiseAsync.h"
#include "Prelude.h"
#include "Printer.h"
#include "sm/Sanity.h"
25
#include "sm/Storage.h"
26

27 28
#include <string.h>

29 30 31 32 33 34 35 36 37
/* Next thread ID to allocate.
 * LOCK: sched_mutex
 */
static StgThreadID next_thread_id = 1;

/* The smallest stack size that makes any sense is:
 *    RESERVED_STACK_WORDS    (so we can get back from the stack overflow)
 *  + sizeofW(StgStopFrame)   (the stg_stop_thread_info frame)
 *  + 1                       (the closure to enter)
38 39
 *  + 1                       (stg_ap_v_ret)
 *  + 1                       (spare slot req'd by stg_ap_v_ret)
40 41
 *
 * A thread with this stack will bomb immediately with a stack
42
 * overflow, which will increase its stack size.
43 44 45 46 47 48 49 50 51 52 53 54 55 56 57
 */
#define MIN_STACK_WORDS (RESERVED_STACK_WORDS + sizeofW(StgStopFrame) + 3)

/* ---------------------------------------------------------------------------
   Create a new thread.

   The new thread starts with the given stack size.  Before the
   scheduler can run, however, this thread needs to have a closure
   (and possibly some arguments) pushed on its stack.  See
   pushClosure() in Schedule.h.

   createGenThread() and createIOThread() (in SchedAPI.h) are
   convenient packaged versions of this function.
   ------------------------------------------------------------------------ */
StgTSO *
Simon Marlow's avatar
Simon Marlow committed
58
createThread(Capability *cap, W_ size)
59 60
{
    StgTSO *tso;
61
    StgStack *stack;
62
    uint32_t stack_size;
63 64 65 66

    /* sched_mutex is *not* required */

    /* catch ridiculously small stack sizes */
67 68
    if (size < MIN_STACK_WORDS + sizeofW(StgStack) + sizeofW(StgTSO)) {
        size = MIN_STACK_WORDS + sizeofW(StgStack) + sizeofW(StgTSO);
69 70
    }

71 72 73 74 75 76 77 78 79 80 81 82 83 84
    /* The size argument we are given includes all the per-thread
     * overheads:
     *
     *    - The TSO structure
     *    - The STACK header
     *
     * This is so that we can use a nice round power of 2 for the
     * default stack size (e.g. 1k), and if we're allocating lots of
     * threads back-to-back they'll fit nicely in a block.  It's a bit
     * of a benchmark hack, but it doesn't do any harm.
     */
    stack_size = round_to_mblocks(size - sizeofW(StgTSO));
    stack = (StgStack *)allocate(cap, stack_size);
    TICK_ALLOC_STACK(stack_size);
85
    SET_HDR(stack, &stg_STACK_info, cap->r.rCCCS);
86 87 88 89 90 91
    stack->stack_size   = stack_size - sizeofW(StgStack);
    stack->sp           = stack->stack + stack->stack_size;
    stack->dirty        = 1;

    tso = (StgTSO *)allocate(cap, sizeofW(StgTSO));
    TICK_ALLOC_TSO();
92 93 94 95 96
    SET_HDR(tso, &stg_TSO_info, CCS_SYSTEM);

    // Always start with the compiled code evaluator
    tso->what_next = ThreadRunGHC;
    tso->why_blocked  = NotBlocked;
97
    tso->block_info.closure = (StgClosure *)END_TSO_QUEUE;
98
    tso->blocked_exceptions = END_BLOCKED_EXCEPTIONS_QUEUE;
99
    tso->bq = (StgBlockingQueue *)END_TSO_QUEUE;
100 101
    tso->flags = 0;
    tso->dirty = 1;
102 103
    tso->_link = END_TSO_QUEUE;

104 105 106
    tso->saved_errno = 0;
    tso->bound = NULL;
    tso->cap = cap;
107

108 109
    tso->stackobj       = stack;
    tso->tot_stack_size = stack->stack_size;
110

111
    ASSIGN_Int64((W_*)&(tso->alloc_limit), 0);
112

113
    tso->trec = NO_TREC;
114

115
#ifdef PROFILING
116
    tso->prof.cccs = CCS_MAIN;
117
#endif
118

119 120 121 122 123
    // put a stop frame on the stack
    stack->sp -= sizeofW(StgStopFrame);
    SET_HDR((StgClosure*)stack->sp,
            (StgInfoTable *)&stg_stop_thread_info,CCS_SYSTEM);

124 125 126 127
    /* Link the new thread on the global thread list.
     */
    ACQUIRE_LOCK(&sched_mutex);
    tso->id = next_thread_id++;  // while we have the mutex
Simon Marlow's avatar
Simon Marlow committed
128 129
    tso->global_link = g0->threads;
    g0->threads = tso;
130
    RELEASE_LOCK(&sched_mutex);
131

132
    // ToDo: report the stack size in the event?
133
    traceEventCreateThread(cap, tso);
Simon Marlow's avatar
Simon Marlow committed
134

135 136 137 138 139 140 141 142 143 144 145
    return tso;
}

/* ---------------------------------------------------------------------------
 * Comparing Thread ids.
 *
 * This is used from STG land in the implementation of the
 * instances of Eq/Ord for ThreadIds.
 * ------------------------------------------------------------------------ */

int
146 147 148
cmp_thread(StgPtr tso1, StgPtr tso2)
{
  StgThreadID id1 = ((StgTSO *)tso1)->id;
149
  StgThreadID id2 = ((StgTSO *)tso2)->id;
150

151 152 153 154 155 156 157 158 159 160 161
  if (id1 < id2) return (-1);
  if (id1 > id2) return 1;
  return 0;
}

/* ---------------------------------------------------------------------------
 * Fetching the ThreadID from an StgTSO.
 *
 * This is used in the implementation of Show for ThreadIds.
 * ------------------------------------------------------------------------ */
int
162
rts_getThreadId(StgPtr tso)
163 164 165 166
{
  return ((StgTSO *)tso)->id;
}

167 168 169 170 171 172 173
/* ---------------------------------------------------------------------------
 * Getting & setting the thread allocation limit
 * ------------------------------------------------------------------------ */
HsInt64 rts_getThreadAllocationCounter(StgPtr tso)
{
    // NB. doesn't take into account allocation in the current nursery
    // block, so it might be off by up to 4k.
174
    return PK_Int64((W_*)&(((StgTSO *)tso)->alloc_limit));
175 176 177 178
}

void rts_setThreadAllocationCounter(StgPtr tso, HsInt64 i)
{
179
    ASSIGN_Int64((W_*)&(((StgTSO *)tso)->alloc_limit), i);
180 181 182 183 184 185 186 187 188 189 190 191
}

void rts_enableThreadAllocationLimit(StgPtr tso)
{
    ((StgTSO *)tso)->flags |= TSO_ALLOC_LIMIT;
}

void rts_disableThreadAllocationLimit(StgPtr tso)
{
    ((StgTSO *)tso)->flags &= ~TSO_ALLOC_LIMIT;
}

192 193 194 195 196
/* -----------------------------------------------------------------------------
   Remove a thread from a queue.
   Fails fatally if the TSO is not on the queue.
   -------------------------------------------------------------------------- */

Ben Gamari's avatar
Ben Gamari committed
197
bool // returns true if we modified queue
198
removeThreadFromQueue (Capability *cap, StgTSO **queue, StgTSO *tso)
199 200 201 202
{
    StgTSO *t, *prev;

    prev = NULL;
203
    for (t = *queue; t != END_TSO_QUEUE; prev = t, t = t->_link) {
204 205 206
        if (t == tso) {
            if (prev) {
                setTSOLink(cap,prev,t->_link);
207
                t->_link = END_TSO_QUEUE;
Ben Gamari's avatar
Ben Gamari committed
208
                return false;
209 210
            } else {
                *queue = t->_link;
211
                t->_link = END_TSO_QUEUE;
Ben Gamari's avatar
Ben Gamari committed
212
                return true;
213 214
            }
        }
215 216 217 218
    }
    barf("removeThreadFromQueue: not found");
}

Ben Gamari's avatar
Ben Gamari committed
219
bool // returns true if we modified head or tail
220
removeThreadFromDeQueue (Capability *cap,
221
                         StgTSO **head, StgTSO **tail, StgTSO *tso)
222 223
{
    StgTSO *t, *prev;
Ben Gamari's avatar
Ben Gamari committed
224
    bool flag = false;
225 226

    prev = NULL;
227
    for (t = *head; t != END_TSO_QUEUE; prev = t, t = t->_link) {
228 229 230
        if (t == tso) {
            if (prev) {
                setTSOLink(cap,prev,t->_link);
Ben Gamari's avatar
Ben Gamari committed
231
                flag = false;
232 233
            } else {
                *head = t->_link;
Ben Gamari's avatar
Ben Gamari committed
234
                flag = true;
235
            }
236 237
            t->_link = END_TSO_QUEUE;
            if (*tail == tso) {
238 239 240 241 242
                if (prev) {
                    *tail = prev;
                } else {
                    *tail = END_TSO_QUEUE;
                }
Ben Gamari's avatar
Ben Gamari committed
243
                return true;
244
            } else {
245 246
                return flag;
            }
247
        }
248
    }
Simon Marlow's avatar
Simon Marlow committed
249
    barf("removeThreadFromDeQueue: not found");
250 251 252
}

/* ----------------------------------------------------------------------------
253
   tryWakeupThread()
254

255 256 257
   Attempt to wake up a thread.  tryWakeupThread is idempotent: it is
   always safe to call it too many times, but it is not safe in
   general to omit a call.
258

259
   ------------------------------------------------------------------------- */
260

261
void
262 263
tryWakeupThread (Capability *cap, StgTSO *tso)
{
264 265
    traceEventThreadWakeup (cap, tso, tso->cap->no);

266 267 268 269 270 271 272 273
#ifdef THREADED_RTS
    if (tso->cap != cap)
    {
        MessageWakeup *msg;
        msg = (MessageWakeup *)allocate(cap,sizeofW(MessageWakeup));
        SET_HDR(msg, &stg_MSG_TRY_WAKEUP_info, CCS_SYSTEM);
        msg->tso = tso;
        sendMessage(cap, tso->cap, (Message*)msg);
274
        debugTraceCap(DEBUG_sched, cap, "message: try wakeup thread %ld on cap %d",
275
                      (W_)tso->id, tso->cap->no);
276 277 278 279 280 281
        return;
    }
#endif

    switch (tso->why_blocked)
    {
282
    case BlockedOnMVar:
283
    case BlockedOnMVarRead:
284 285 286 287 288 289 290 291 292
    {
        if (tso->_link == END_TSO_QUEUE) {
            tso->block_info.closure = (StgClosure*)END_TSO_QUEUE;
            goto unblock;
        } else {
            return;
        }
    }

293 294 295
    case BlockedOnMsgThrowTo:
    {
        const StgInfoTable *i;
296

297 298 299 300
        i = lockClosure(tso->block_info.closure);
        unlockClosure(tso->block_info.closure, i);
        if (i != &stg_MSG_NULL_info) {
            debugTraceCap(DEBUG_sched, cap, "thread %ld still blocked on throwto (%p)",
301
                          (W_)tso->id, tso->block_info.throwto->header.info);
302
            return;
303 304 305
        }

        // remove the block frame from the stack
306 307
        ASSERT(tso->stackobj->sp[0] == (StgWord)&stg_block_throwto_info);
        tso->stackobj->sp += 3;
308
        goto unblock;
309
    }
310

311 312
    case BlockedOnBlackHole:
    case BlockedOnSTM:
313 314 315
    case ThreadMigrating:
        goto unblock;

316 317
    default:
        // otherwise, do nothing
318
        return;
319
    }
320 321 322 323 324 325

unblock:
    // just run the thread now, if the BH is not really available,
    // we'll block again.
    tso->why_blocked = NotBlocked;
    appendToRunQueue(cap,tso);
Simon Marlow's avatar
comment  
Simon Marlow committed
326 327 328 329 330 331 332 333 334 335 336 337 338

    // We used to set the context switch flag here, which would
    // trigger a context switch a short time in the future (at the end
    // of the current nursery block).  The idea is that we have just
    // woken up a thread, so we may need to load-balance and migrate
    // threads to other CPUs.  On the other hand, setting the context
    // switch flag here unfairly penalises the current thread by
    // yielding its time slice too early.
    //
    // The synthetic benchmark nofib/smp/chan can be used to show the
    // difference quite clearly.

    // cap->context_switch = 1;
339 340 341 342 343 344 345 346 347 348 349 350
}

/* ----------------------------------------------------------------------------
   migrateThread
   ------------------------------------------------------------------------- */

void
migrateThread (Capability *from, StgTSO *tso, Capability *to)
{
    traceEventMigrateThread (from, tso, to->no);
    // ThreadMigrating tells the target cap that it needs to be added to
    // the run queue when it receives the MSG_TRY_WAKEUP.
Simon Marlow's avatar
Simon Marlow committed
351
    tso->why_blocked = ThreadMigrating;
352 353
    tso->cap = to;
    tryWakeupThread(from, tso);
354 355
}

356 357 358 359 360 361
/* ----------------------------------------------------------------------------
   awakenBlockedQueue

   wakes up all the threads on the specified queue.
   ------------------------------------------------------------------------- */

362
static void
363
wakeBlockingQueue(Capability *cap, StgBlockingQueue *bq)
364
{
365 366 367 368 369 370
    MessageBlackHole *msg;
    const StgInfoTable *i;

    ASSERT(bq->header.info == &stg_BLOCKING_QUEUE_DIRTY_info  ||
           bq->header.info == &stg_BLOCKING_QUEUE_CLEAN_info  );

371
    for (msg = bq->queue; msg != (MessageBlackHole*)END_TSO_QUEUE;
372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403
         msg = msg->link) {
        i = msg->header.info;
        if (i != &stg_IND_info) {
            ASSERT(i == &stg_MSG_BLACKHOLE_info);
            tryWakeupThread(cap,msg->tso);
        }
    }

    // overwrite the BQ with an indirection so it will be
    // collected at the next GC.
#if defined(DEBUG) && !defined(THREADED_RTS)
    // XXX FILL_SLOP, but not if THREADED_RTS because in that case
    // another thread might be looking at this BLOCKING_QUEUE and
    // checking the owner field at the same time.
    bq->bh = 0; bq->queue = 0; bq->owner = 0;
#endif
    OVERWRITE_INFO(bq, &stg_IND_info);
}

// If we update a closure that we know we BLACKHOLE'd, and the closure
// no longer points to the current TSO as its owner, then there may be
// an orphaned BLOCKING_QUEUE closure with blocked threads attached to
// it.  We therefore traverse the BLOCKING_QUEUEs attached to the
// current TSO to see if any can now be woken up.
void
checkBlockingQueues (Capability *cap, StgTSO *tso)
{
    StgBlockingQueue *bq, *next;
    StgClosure *p;

    debugTraceCap(DEBUG_sched, cap,
                  "collision occurred; checking blocking queues for thread %ld",
404
                  (W_)tso->id);
405

406 407 408 409 410 411 412 413
    for (bq = tso->bq; bq != (StgBlockingQueue*)END_TSO_QUEUE; bq = next) {
        next = bq->link;

        if (bq->header.info == &stg_IND_info) {
            // ToDo: could short it out right here, to avoid
            // traversing this IND multiple times.
            continue;
        }
414

415 416 417 418 419 420
        p = bq->bh;

        if (p->header.info != &stg_BLACKHOLE_info ||
            ((StgInd *)p)->indirectee != (StgClosure*)bq)
        {
            wakeBlockingQueue(cap,bq);
421
        }
422 423 424
    }
}

425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442
/* ----------------------------------------------------------------------------
   updateThunk

   Update a thunk with a value.  In order to do this, we need to know
   which TSO owns (or is evaluating) the thunk, in case we need to
   awaken any threads that are blocked on it.
   ------------------------------------------------------------------------- */

void
updateThunk (Capability *cap, StgTSO *tso, StgClosure *thunk, StgClosure *val)
{
    StgClosure *v;
    StgTSO *owner;
    const StgInfoTable *i;

    i = thunk->header.info;
    if (i != &stg_BLACKHOLE_info &&
        i != &stg_CAF_BLACKHOLE_info &&
443
        i != &__stg_EAGER_BLACKHOLE_info &&
444 445 446 447
        i != &stg_WHITEHOLE_info) {
        updateWithIndirection(cap, thunk, val);
        return;
    }
448

449 450 451 452
    v = ((StgInd*)thunk)->indirectee;

    updateWithIndirection(cap, thunk, val);

453 454 455 456 457 458 459
    // sometimes the TSO is locked when we reach here, so its header
    // might be WHITEHOLE.  Hence check for the correct owner using
    // pointer equality first.
    if ((StgTSO*)v == tso) {
        return;
    }

460 461
    i = v->header.info;
    if (i == &stg_TSO_info) {
462
        checkBlockingQueues(cap, tso);
463 464 465 466 467 468 469 470 471
        return;
    }

    if (i != &stg_BLOCKING_QUEUE_CLEAN_info &&
        i != &stg_BLOCKING_QUEUE_DIRTY_info) {
        checkBlockingQueues(cap, tso);
        return;
    }

472
    owner = ((StgBlockingQueue*)v)->owner;
473 474 475 476 477 478 479 480

    if (owner != tso) {
        checkBlockingQueues(cap, tso);
    } else {
        wakeBlockingQueue(cap, (StgBlockingQueue*)v);
    }
}

481 482 483 484
/* ---------------------------------------------------------------------------
 * rtsSupportsBoundThreads(): is the RTS built to support bound threads?
 * used by Control.Concurrent for error checking.
 * ------------------------------------------------------------------------- */
485

Ian Lynagh's avatar
Ian Lynagh committed
486
HsBool
487 488 489
rtsSupportsBoundThreads(void)
{
#if defined(THREADED_RTS)
Ian Lynagh's avatar
Ian Lynagh committed
490
  return HS_BOOL_TRUE;
491
#else
Ian Lynagh's avatar
Ian Lynagh committed
492
  return HS_BOOL_FALSE;
493 494 495 496 497 498
#endif
}

/* ---------------------------------------------------------------------------
 * isThreadBound(tso): check whether tso is bound to an OS thread.
 * ------------------------------------------------------------------------- */
499

500 501 502 503 504 505
StgBool
isThreadBound(StgTSO* tso USED_IF_THREADS)
{
#if defined(THREADED_RTS)
  return (tso->bound != NULL);
#endif
Ben Gamari's avatar
Ben Gamari committed
506
  return false;
507 508
}

509 510 511 512 513 514 515 516 517 518 519 520 521 522
/* -----------------------------------------------------------------------------
   Stack overflow

   If the thread has reached its maximum stack size, then raise the
   StackOverflow exception in the offending thread.  Otherwise
   relocate the TSO into a larger chunk of memory and adjust its stack
   size appropriately.
   -------------------------------------------------------------------------- */

void
threadStackOverflow (Capability *cap, StgTSO *tso)
{
    StgStack *new_stack, *old_stack;
    StgUnderflowFrame *frame;
523
    W_ chunk_size;
524 525 526

    IF_DEBUG(sanity,checkTSO(tso));

527 528
    if (RtsFlags.GcFlags.maxStkSize > 0
        && tso->tot_stack_size >= RtsFlags.GcFlags.maxStkSize) {
529 530 531 532 533 534 535 536 537
        // #3677: In a stack overflow situation, stack squeezing may
        // reduce the stack size, but we don't know whether it has been
        // reduced enough for the stack check to succeed if we try
        // again.  Fortunately stack squeezing is idempotent, so all we
        // need to do is record whether *any* squeezing happened.  If we
        // are at the stack's absolute -K limit, and stack squeezing
        // happened, then we try running the thread again.  The
        // TSO_SQUEEZED flag is set by threadPaused() to tell us whether
        // squeezing happened or not.
538 539 540
        if (tso->flags & TSO_SQUEEZED) {
            return;
        }
541 542 543 544 545 546 547 548 549 550 551

        debugTrace(DEBUG_gc,
                   "threadStackOverflow of TSO %ld (%p): stack too large (now %ld; max is %ld)",
                   (long)tso->id, tso, (long)tso->stackobj->stack_size,
                   RtsFlags.GcFlags.maxStkSize);
        IF_DEBUG(gc,
                 /* If we're debugging, just print out the top of the stack */
                 printStackChunk(tso->stackobj->sp,
                                 stg_min(tso->stackobj->stack + tso->stackobj->stack_size,
                                         tso->stackobj->sp+64)));

552 553
        // Note [Throw to self when masked], also #767 and #8303.
        throwToSelf(cap, tso, (StgClosure *)stackOverflow_closure);
554
        return;
555 556 557 558 559
    }


    // We also want to avoid enlarging the stack if squeezing has
    // already released some of it.  However, we don't want to get into
Edward Z. Yang's avatar
Edward Z. Yang committed
560
    // a pathological situation where a thread has a nearly full stack
561 562 563 564 565
    // (near its current limit, but not near the absolute -K limit),
    // keeps allocating a little bit, squeezing removes a little bit,
    // and then it runs again.  So to avoid this, if we squeezed *and*
    // there is still less than BLOCK_SIZE_W words free, then we enlarge
    // the stack anyway.
566 567 568 569 570 571 572 573
    //
    // NB: This reasoning only applies if the stack has been squeezed;
    // if no squeezing has occurred, then BLOCK_SIZE_W free space does
    // not mean there is enough stack to run; the thread may have
    // requested a large amount of stack (see below).  If the amount
    // we squeezed is not enough to run the thread, we'll come back
    // here (no squeezing will have occurred and thus we'll enlarge the
    // stack.)
574
    if ((tso->flags & TSO_SQUEEZED) &&
575 576 577 578
        ((W_)(tso->stackobj->sp - tso->stackobj->stack) >= BLOCK_SIZE_W)) {
        return;
    }

Simon Marlow's avatar
Simon Marlow committed
579 580 581 582 583 584 585 586 587 588 589 590 591 592
    old_stack = tso->stackobj;

    // If we used less than half of the previous stack chunk, then we
    // must have failed a stack check for a large amount of stack.  In
    // this case we allocate a double-sized chunk to try to
    // accommodate the large stack request.  If that also fails, the
    // next chunk will be 4x normal size, and so on.
    //
    // It would be better to have the mutator tell us how much stack
    // was needed, as we do with heap allocations, but this works for
    // now.
    //
    if (old_stack->sp > old_stack->stack + old_stack->stack_size / 2)
    {
593 594
        chunk_size = stg_max(2 * (old_stack->stack_size + sizeofW(StgStack)),
                             RtsFlags.GcFlags.stkChunkSize);
Simon Marlow's avatar
Simon Marlow committed
595 596 597 598 599 600
    }
    else
    {
        chunk_size = RtsFlags.GcFlags.stkChunkSize;
    }

601 602
    debugTraceCap(DEBUG_sched, cap,
                  "allocating new stack chunk of size %d bytes",
Simon Marlow's avatar
Simon Marlow committed
603
                  chunk_size * sizeof(W_));
604

605 606 607 608 609 610
    // Charge the current thread for allocating stack.  Stack usage is
    // non-deterministic, because the chunk boundaries might vary from
    // run to run, but accounting for this is better than not
    // accounting for it, since a deep recursion will otherwise not be
    // subject to allocation limits.
    cap->r.rCurrentTSO = tso;
611
    new_stack = (StgStack*) allocate(cap, chunk_size);
612 613
    cap->r.rCurrentTSO = NULL;

614
    SET_HDR(new_stack, &stg_STACK_info, old_stack->header.prof.ccs);
Simon Marlow's avatar
Simon Marlow committed
615
    TICK_ALLOC_STACK(chunk_size);
616 617

    new_stack->dirty = 0; // begin clean, we'll mark it dirty below
Simon Marlow's avatar
Simon Marlow committed
618
    new_stack->stack_size = chunk_size - sizeofW(StgStack);
619 620 621 622 623 624
    new_stack->sp = new_stack->stack + new_stack->stack_size;

    tso->tot_stack_size += new_stack->stack_size;

    {
        StgWord *sp;
Simon Marlow's avatar
Simon Marlow committed
625
        W_ chunk_words, size;
626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649

        // find the boundary of the chunk of old stack we're going to
        // copy to the new stack.  We skip over stack frames until we
        // reach the smaller of
        //
        //   * the chunk buffer size (+RTS -kb)
        //   * the end of the old stack
        //
        for (sp = old_stack->sp;
             sp < stg_min(old_stack->sp + RtsFlags.GcFlags.stkChunkBufferSize,
                          old_stack->stack + old_stack->stack_size); )
        {
            size = stack_frame_sizeW((StgClosure*)sp);

            // if including this frame would exceed the size of the
            // new stack (taking into account the underflow frame),
            // then stop at the previous frame.
            if (sp + size > old_stack->stack + (new_stack->stack_size -
                                                sizeofW(StgUnderflowFrame))) {
                break;
            }
            sp += size;
        }

650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671
        if (sp == old_stack->stack + old_stack->stack_size) {
            //
            // the old stack chunk is now empty, so we do *not* insert
            // an underflow frame pointing back to it.  There are two
            // cases: either the old stack chunk was the last one, in
            // which case it ends with a STOP_FRAME, or it is not the
            // last one, and it already ends with an UNDERFLOW_FRAME
            // pointing to the previous chunk.  In the latter case, we
            // will copy the UNDERFLOW_FRAME into the new stack chunk.
            // In both cases, the old chunk will be subsequently GC'd.
            //
            // With the default settings, -ki1k -kb1k, this means the
            // first stack chunk will be discarded after the first
            // overflow, being replaced by a non-moving 32k chunk.
            //
        } else {
            new_stack->sp -= sizeofW(StgUnderflowFrame);
            frame = (StgUnderflowFrame*)new_stack->sp;
            frame->info = &stg_stack_underflow_frame_info;
            frame->next_chunk  = old_stack;
        }

672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692
        // copy the stack chunk between tso->sp and sp to
        //   new_tso->sp + (tso->sp - sp)
        chunk_words = sp - old_stack->sp;

        memcpy(/* dest   */ new_stack->sp - chunk_words,
               /* source */ old_stack->sp,
               /* size   */ chunk_words * sizeof(W_));

        old_stack->sp += chunk_words;
        new_stack->sp -= chunk_words;
    }

    tso->stackobj = new_stack;

    // we're about to run it, better mark it dirty
    dirty_STACK(cap, new_stack);

    IF_DEBUG(sanity,checkTSO(tso));
    // IF_DEBUG(scheduler,printTSO(new_tso));
}

693

694 695 696 697 698

/* ---------------------------------------------------------------------------
   Stack underflow - called from the stg_stack_underflow_info frame
   ------------------------------------------------------------------------ */

Simon Marlow's avatar
Simon Marlow committed
699
W_ // returns offset to the return address
700 701 702 703
threadStackUnderflow (Capability *cap, StgTSO *tso)
{
    StgStack *new_stack, *old_stack;
    StgUnderflowFrame *frame;
704
    uint32_t retvals;
705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720

    debugTraceCap(DEBUG_sched, cap, "stack underflow");

    old_stack = tso->stackobj;

    frame = (StgUnderflowFrame*)(old_stack->stack + old_stack->stack_size
                                 - sizeofW(StgUnderflowFrame));
    ASSERT(frame->info == &stg_stack_underflow_frame_info);

    new_stack = (StgStack*)frame->next_chunk;
    tso->stackobj = new_stack;

    retvals = (P_)frame - old_stack->sp;
    if (retvals != 0)
    {
        // we have some return values to copy to the old stack
Simon Marlow's avatar
Simon Marlow committed
721
        if ((W_)(new_stack->sp - new_stack->stack) < retvals)
722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745
        {
            barf("threadStackUnderflow: not enough space for return values");
        }

        new_stack->sp -= retvals;

        memcpy(/* dest */ new_stack->sp,
               /* src  */ old_stack->sp,
               /* size */ retvals * sizeof(W_));
    }

    // empty the old stack.  The GC may still visit this object
    // because it is on the mutable list.
    old_stack->sp = old_stack->stack + old_stack->stack_size;

    // restore the stack parameters, and update tot_stack_size
    tso->tot_stack_size -= old_stack->stack_size;

    // we're about to run it, better mark it dirty
    dirty_STACK(cap, new_stack);

    return retvals;
}

Simon Marlow's avatar
Simon Marlow committed
746 747 748 749 750 751
/* ----------------------------------------------------------------------------
   Implementation of tryPutMVar#

   NOTE: this should be kept in sync with stg_tryPutMVarzh in PrimOps.cmm
   ------------------------------------------------------------------------- */

Ben Gamari's avatar
Ben Gamari committed
752
bool performTryPutMVar(Capability *cap, StgMVar *mvar, StgClosure *value)
Simon Marlow's avatar
Simon Marlow committed
753 754 755 756 757 758 759 760 761 762 763
{
    const StgInfoTable *info;
    StgMVarTSOQueue *q;
    StgTSO *tso;

    info = lockClosure((StgClosure*)mvar);

    if (mvar->value != &stg_END_TSO_QUEUE_closure) {
#if defined(THREADED_RTS)
        unlockClosure((StgClosure*)mvar, info);
#endif
Ben Gamari's avatar
Ben Gamari committed
764
        return false;
Simon Marlow's avatar
Simon Marlow committed
765 766 767 768 769 770 771 772 773 774 775 776
    }

    q = mvar->head;
loop:
    if (q == (StgMVarTSOQueue*)&stg_END_TSO_QUEUE_closure) {
        /* No further takes, the MVar is now full. */
        if (info == &stg_MVAR_CLEAN_info) {
            dirty_MVAR(&cap->r, (StgClosure*)mvar);
        }

        mvar->value = value;
        unlockClosure((StgClosure*)mvar, &stg_MVAR_DIRTY_info);
Ben Gamari's avatar
Ben Gamari committed
777
        return true;
Simon Marlow's avatar
Simon Marlow committed
778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821
    }
    if (q->header.info == &stg_IND_info ||
        q->header.info == &stg_MSG_NULL_info) {
        q = (StgMVarTSOQueue*)((StgInd*)q)->indirectee;
        goto loop;
    }

    // There are takeMVar(s) waiting: wake up the first one
    tso = q->tso;
    mvar->head = q->link;
    if (mvar->head == (StgMVarTSOQueue*)&stg_END_TSO_QUEUE_closure) {
        mvar->tail = (StgMVarTSOQueue*)&stg_END_TSO_QUEUE_closure;
    }

    ASSERT(tso->block_info.closure == (StgClosure*)mvar);
    // save why_blocked here, because waking up the thread destroys
    // this information
    StgWord why_blocked = tso->why_blocked;

    // actually perform the takeMVar
    StgStack* stack = tso->stackobj;
    stack->sp[1] = (W_)value;
    stack->sp[0] = (W_)&stg_ret_p_info;

    // indicate that the MVar operation has now completed.
    tso->_link = (StgTSO*)&stg_END_TSO_QUEUE_closure;

    if (stack->dirty == 0) {
        dirty_STACK(cap, stack);
    }

    tryWakeupThread(cap, tso);

    // If it was an readMVar, then we can still do work,
    // so loop back. (XXX: This could take a while)
    if (why_blocked == BlockedOnMVarRead) {
        q = ((StgMVarTSOQueue*)q)->link;
        goto loop;
    }

    ASSERT(why_blocked == BlockedOnMVar);

    unlockClosure((StgClosure*)mvar, info);

Ben Gamari's avatar
Ben Gamari committed
822
    return true;
Simon Marlow's avatar
Simon Marlow committed
823 824
}

825 826 827 828
/* ----------------------------------------------------------------------------
 * Debugging: why is a thread blocked
 * ------------------------------------------------------------------------- */

829
#ifdef DEBUG
830 831 832 833
void
printThreadBlockage(StgTSO *tso)
{
  switch (tso->why_blocked) {
Simon Marlow's avatar
Simon Marlow committed
834 835 836 837 838 839
#if defined(mingw32_HOST_OS)
    case BlockedOnDoProc:
    debugBelch("is blocked on proc (request: %u)", tso->block_info.async_result->reqID);
    break;
#endif
#if !defined(THREADED_RTS)
840 841 842 843 844 845 846 847 848
  case BlockedOnRead:
    debugBelch("is blocked on read from fd %d", (int)(tso->block_info.fd));
    break;
  case BlockedOnWrite:
    debugBelch("is blocked on write to fd %d", (int)(tso->block_info.fd));
    break;
  case BlockedOnDelay:
    debugBelch("is blocked until %ld", (long)(tso->block_info.target));
    break;
Simon Marlow's avatar
Simon Marlow committed
849
#endif
850 851 852
  case BlockedOnMVar:
    debugBelch("is blocked on an MVar @ %p", tso->block_info.closure);
    break;
853 854 855
  case BlockedOnMVarRead:
    debugBelch("is blocked on atomic MVar read @ %p", tso->block_info.closure);
    break;
856
  case BlockedOnBlackHole:
857
      debugBelch("is blocked on a black hole %p",
858
                 ((StgBlockingQueue*)tso->block_info.bh->bh));
859
    break;
860 861 862
  case BlockedOnMsgThrowTo:
    debugBelch("is blocked on a throwto message");
    break;
863 864 865
  case NotBlocked:
    debugBelch("is not blocked");
    break;
866 867 868
  case ThreadMigrating:
    debugBelch("is runnable, but not on the run queue");
    break;
869 870 871
  case BlockedOnCCall:
    debugBelch("is blocked on an external call");
    break;
872 873
  case BlockedOnCCall_Interruptible:
    debugBelch("is blocked on an external call (but may be interrupted)");
874 875 876 877 878 879
    break;
  case BlockedOnSTM:
    debugBelch("is blocked on an STM operation");
    break;
  default:
    barf("printThreadBlockage: strange tso->why_blocked: %d for TSO %d (%d)",
880
         tso->why_blocked, tso->id, tso);
881 882 883
  }
}

884

885 886 887
void
printThreadStatus(StgTSO *t)
{
888
  debugBelch("\tthread %4lu @ %p ", (unsigned long)t->id, (void *)t);
889 890 891 892
    {
      void *label = lookupThreadLabel(t->id);
      if (label) debugBelch("[\"%s\"] ",(char *)label);
    }
893
        switch (t->what_next) {
894 895 896 897 898 899 900 901 902
        case ThreadKilled:
            debugBelch("has been killed");
            break;
        case ThreadComplete:
            debugBelch("has completed");
            break;
        default:
            printThreadBlockage(t);
        }
903
        if (t->dirty) {
904 905
            debugBelch(" (TSO_DIRTY)");
        }
906
        debugBelch("\n");
907 908 909 910 911 912
}

void
printAllThreads(void)
{
  StgTSO *t, *next;
913
  uint32_t i, g;
914 915 916 917 918
  Capability *cap;

  debugBelch("all threads:\n");

  for (i = 0; i < n_capabilities; i++) {
919
      cap = capabilities[i];
920
      debugBelch("threads on capability %d:\n", cap->no);
921
      for (t = cap->run_queue_hd; t != END_TSO_QUEUE; t = t->_link) {
922
          printThreadStatus(t);
923 924 925 926
      }
  }

  debugBelch("other threads:\n");
Simon Marlow's avatar
Simon Marlow committed
927 928
  for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
    for (t = generations[g].threads; t != END_TSO_QUEUE; t = next) {
929
      if (t->why_blocked != NotBlocked) {
930
          printThreadStatus(t);
931
      }
932
      next = t->global_link;
933
    }
934 935 936 937
  }
}

// useful from gdb
938
void
939 940
printThreadQueue(StgTSO *t)
{
941
    uint32_t i = 0;
942
    for (; t != END_TSO_QUEUE; t = t->_link) {
943 944
        printThreadStatus(t);
        i++;
945 946 947 948 949
    }
    debugBelch("%d threads on queue\n", i);
}

#endif /* DEBUG */