Threads.c 26.8 KB
Newer Older
1 2 3 4 5 6 7 8 9 10
/* ---------------------------------------------------------------------------
 *
 * (c) The GHC Team, 2006
 *
 * Thread-related functionality
 *
 * --------------------------------------------------------------------------*/

#include "PosixSource.h"
#include "Rts.h"
Simon Marlow's avatar
Simon Marlow committed
11

12 13
#include "Capability.h"
#include "Updates.h"
14 15 16 17 18
#include "Threads.h"
#include "STM.h"
#include "Schedule.h"
#include "Trace.h"
#include "ThreadLabels.h"
19 20
#include "Updates.h"
#include "Messages.h"
21 22 23 24
#include "RaiseAsync.h"
#include "Prelude.h"
#include "Printer.h"
#include "sm/Sanity.h"
25
#include "sm/Storage.h"
26

27 28
#include <string.h>

29 30 31 32 33 34 35 36 37
/* Next thread ID to allocate.
 * LOCK: sched_mutex
 */
static StgThreadID next_thread_id = 1;

/* The smallest stack size that makes any sense is:
 *    RESERVED_STACK_WORDS    (so we can get back from the stack overflow)
 *  + sizeofW(StgStopFrame)   (the stg_stop_thread_info frame)
 *  + 1                       (the closure to enter)
38 39
 *  + 1                       (stg_ap_v_ret)
 *  + 1                       (spare slot req'd by stg_ap_v_ret)
40 41
 *
 * A thread with this stack will bomb immediately with a stack
42
 * overflow, which will increase its stack size.
43 44 45 46 47 48 49 50 51 52 53 54 55 56 57
 */
#define MIN_STACK_WORDS (RESERVED_STACK_WORDS + sizeofW(StgStopFrame) + 3)

/* ---------------------------------------------------------------------------
   Create a new thread.

   The new thread starts with the given stack size.  Before the
   scheduler can run, however, this thread needs to have a closure
   (and possibly some arguments) pushed on its stack.  See
   pushClosure() in Schedule.h.

   createGenThread() and createIOThread() (in SchedAPI.h) are
   convenient packaged versions of this function.
   ------------------------------------------------------------------------ */
StgTSO *
Simon Marlow's avatar
Simon Marlow committed
58
createThread(Capability *cap, W_ size)
59 60
{
    StgTSO *tso;
61
    StgStack *stack;
62 63 64 65 66
    nat stack_size;

    /* sched_mutex is *not* required */

    /* catch ridiculously small stack sizes */
67 68
    if (size < MIN_STACK_WORDS + sizeofW(StgStack) + sizeofW(StgTSO)) {
        size = MIN_STACK_WORDS + sizeofW(StgStack) + sizeofW(StgTSO);
69 70
    }

71 72 73 74 75 76 77 78 79 80 81 82 83 84
    /* The size argument we are given includes all the per-thread
     * overheads:
     *
     *    - The TSO structure
     *    - The STACK header
     *
     * This is so that we can use a nice round power of 2 for the
     * default stack size (e.g. 1k), and if we're allocating lots of
     * threads back-to-back they'll fit nicely in a block.  It's a bit
     * of a benchmark hack, but it doesn't do any harm.
     */
    stack_size = round_to_mblocks(size - sizeofW(StgTSO));
    stack = (StgStack *)allocate(cap, stack_size);
    TICK_ALLOC_STACK(stack_size);
85
    SET_HDR(stack, &stg_STACK_info, cap->r.rCCCS);
86 87 88 89 90 91
    stack->stack_size   = stack_size - sizeofW(StgStack);
    stack->sp           = stack->stack + stack->stack_size;
    stack->dirty        = 1;

    tso = (StgTSO *)allocate(cap, sizeofW(StgTSO));
    TICK_ALLOC_TSO();
92 93 94 95 96
    SET_HDR(tso, &stg_TSO_info, CCS_SYSTEM);

    // Always start with the compiled code evaluator
    tso->what_next = ThreadRunGHC;
    tso->why_blocked  = NotBlocked;
97
    tso->block_info.closure = (StgClosure *)END_TSO_QUEUE;
98
    tso->blocked_exceptions = END_BLOCKED_EXCEPTIONS_QUEUE;
99
    tso->bq = (StgBlockingQueue *)END_TSO_QUEUE;
100 101
    tso->flags = 0;
    tso->dirty = 1;
102 103
    tso->_link = END_TSO_QUEUE;

104 105 106
    tso->saved_errno = 0;
    tso->bound = NULL;
    tso->cap = cap;
107

108 109
    tso->stackobj       = stack;
    tso->tot_stack_size = stack->stack_size;
110

111
    ASSIGN_Int64((W_*)&(tso->alloc_limit), 0);
112

113
    tso->trec = NO_TREC;
114

115
#ifdef PROFILING
116
    tso->prof.cccs = CCS_MAIN;
117
#endif
118

119 120 121 122 123
    // put a stop frame on the stack
    stack->sp -= sizeofW(StgStopFrame);
    SET_HDR((StgClosure*)stack->sp,
            (StgInfoTable *)&stg_stop_thread_info,CCS_SYSTEM);

124 125 126 127
    /* Link the new thread on the global thread list.
     */
    ACQUIRE_LOCK(&sched_mutex);
    tso->id = next_thread_id++;  // while we have the mutex
Simon Marlow's avatar
Simon Marlow committed
128 129
    tso->global_link = g0->threads;
    g0->threads = tso;
130
    RELEASE_LOCK(&sched_mutex);
131

132
    // ToDo: report the stack size in the event?
133
    traceEventCreateThread(cap, tso);
Simon Marlow's avatar
Simon Marlow committed
134

135 136 137 138 139 140 141 142 143 144 145
    return tso;
}

/* ---------------------------------------------------------------------------
 * Comparing Thread ids.
 *
 * This is used from STG land in the implementation of the
 * instances of Eq/Ord for ThreadIds.
 * ------------------------------------------------------------------------ */

int
146 147 148
cmp_thread(StgPtr tso1, StgPtr tso2)
{
  StgThreadID id1 = ((StgTSO *)tso1)->id;
149
  StgThreadID id2 = ((StgTSO *)tso2)->id;
150

151 152 153 154 155 156 157 158 159 160 161
  if (id1 < id2) return (-1);
  if (id1 > id2) return 1;
  return 0;
}

/* ---------------------------------------------------------------------------
 * Fetching the ThreadID from an StgTSO.
 *
 * This is used in the implementation of Show for ThreadIds.
 * ------------------------------------------------------------------------ */
int
162
rts_getThreadId(StgPtr tso)
163 164 165 166
{
  return ((StgTSO *)tso)->id;
}

167 168 169 170 171 172 173
/* ---------------------------------------------------------------------------
 * Getting & setting the thread allocation limit
 * ------------------------------------------------------------------------ */
HsInt64 rts_getThreadAllocationCounter(StgPtr tso)
{
    // NB. doesn't take into account allocation in the current nursery
    // block, so it might be off by up to 4k.
174
    return PK_Int64((W_*)&(((StgTSO *)tso)->alloc_limit));
175 176 177 178
}

void rts_setThreadAllocationCounter(StgPtr tso, HsInt64 i)
{
179
    ASSIGN_Int64((W_*)&(((StgTSO *)tso)->alloc_limit), i);
180 181 182 183 184 185 186 187 188 189 190 191
}

void rts_enableThreadAllocationLimit(StgPtr tso)
{
    ((StgTSO *)tso)->flags |= TSO_ALLOC_LIMIT;
}

void rts_disableThreadAllocationLimit(StgPtr tso)
{
    ((StgTSO *)tso)->flags &= ~TSO_ALLOC_LIMIT;
}

192 193 194 195 196
/* -----------------------------------------------------------------------------
   Remove a thread from a queue.
   Fails fatally if the TSO is not on the queue.
   -------------------------------------------------------------------------- */

197
rtsBool // returns True if we modified queue
198
removeThreadFromQueue (Capability *cap, StgTSO **queue, StgTSO *tso)
199 200 201 202
{
    StgTSO *t, *prev;

    prev = NULL;
203
    for (t = *queue; t != END_TSO_QUEUE; prev = t, t = t->_link) {
204 205 206
        if (t == tso) {
            if (prev) {
                setTSOLink(cap,prev,t->_link);
207
                t->_link = END_TSO_QUEUE;
208
                return rtsFalse;
209 210
            } else {
                *queue = t->_link;
211
                t->_link = END_TSO_QUEUE;
212
                return rtsTrue;
213 214
            }
        }
215 216 217 218
    }
    barf("removeThreadFromQueue: not found");
}

219
rtsBool // returns True if we modified head or tail
220
removeThreadFromDeQueue (Capability *cap,
221
                         StgTSO **head, StgTSO **tail, StgTSO *tso)
222 223
{
    StgTSO *t, *prev;
224
    rtsBool flag = rtsFalse;
225 226

    prev = NULL;
227
    for (t = *head; t != END_TSO_QUEUE; prev = t, t = t->_link) {
228 229 230
        if (t == tso) {
            if (prev) {
                setTSOLink(cap,prev,t->_link);
231
                flag = rtsFalse;
232 233
            } else {
                *head = t->_link;
234
                flag = rtsTrue;
235
            }
236 237
            t->_link = END_TSO_QUEUE;
            if (*tail == tso) {
238 239 240 241 242
                if (prev) {
                    *tail = prev;
                } else {
                    *tail = END_TSO_QUEUE;
                }
243
                return rtsTrue;
244
            } else {
245 246
                return flag;
            }
247
        }
248
    }
Simon Marlow's avatar
Simon Marlow committed
249
    barf("removeThreadFromDeQueue: not found");
250 251 252
}

/* ----------------------------------------------------------------------------
253
   tryWakeupThread()
254

255 256 257
   Attempt to wake up a thread.  tryWakeupThread is idempotent: it is
   always safe to call it too many times, but it is not safe in
   general to omit a call.
258

259
   ------------------------------------------------------------------------- */
260

261
void
262 263
tryWakeupThread (Capability *cap, StgTSO *tso)
{
264 265
    traceEventThreadWakeup (cap, tso, tso->cap->no);

266 267 268 269 270 271 272 273
#ifdef THREADED_RTS
    if (tso->cap != cap)
    {
        MessageWakeup *msg;
        msg = (MessageWakeup *)allocate(cap,sizeofW(MessageWakeup));
        SET_HDR(msg, &stg_MSG_TRY_WAKEUP_info, CCS_SYSTEM);
        msg->tso = tso;
        sendMessage(cap, tso->cap, (Message*)msg);
274
        debugTraceCap(DEBUG_sched, cap, "message: try wakeup thread %ld on cap %d",
275
                      (W_)tso->id, tso->cap->no);
276 277 278 279 280 281
        return;
    }
#endif

    switch (tso->why_blocked)
    {
282
    case BlockedOnMVar:
283
    case BlockedOnMVarRead:
284 285 286 287 288 289 290 291 292
    {
        if (tso->_link == END_TSO_QUEUE) {
            tso->block_info.closure = (StgClosure*)END_TSO_QUEUE;
            goto unblock;
        } else {
            return;
        }
    }

293 294 295
    case BlockedOnMsgThrowTo:
    {
        const StgInfoTable *i;
296

297 298 299 300
        i = lockClosure(tso->block_info.closure);
        unlockClosure(tso->block_info.closure, i);
        if (i != &stg_MSG_NULL_info) {
            debugTraceCap(DEBUG_sched, cap, "thread %ld still blocked on throwto (%p)",
301
                          (W_)tso->id, tso->block_info.throwto->header.info);
302
            return;
303 304 305
        }

        // remove the block frame from the stack
306 307
        ASSERT(tso->stackobj->sp[0] == (StgWord)&stg_block_throwto_info);
        tso->stackobj->sp += 3;
308
        goto unblock;
309
    }
310

311 312
    case BlockedOnBlackHole:
    case BlockedOnSTM:
313 314 315
    case ThreadMigrating:
        goto unblock;

316 317
    default:
        // otherwise, do nothing
318
        return;
319
    }
320 321 322 323 324 325

unblock:
    // just run the thread now, if the BH is not really available,
    // we'll block again.
    tso->why_blocked = NotBlocked;
    appendToRunQueue(cap,tso);
Simon Marlow's avatar
comment  
Simon Marlow committed
326 327 328 329 330 331 332 333 334 335 336 337 338

    // We used to set the context switch flag here, which would
    // trigger a context switch a short time in the future (at the end
    // of the current nursery block).  The idea is that we have just
    // woken up a thread, so we may need to load-balance and migrate
    // threads to other CPUs.  On the other hand, setting the context
    // switch flag here unfairly penalises the current thread by
    // yielding its time slice too early.
    //
    // The synthetic benchmark nofib/smp/chan can be used to show the
    // difference quite clearly.

    // cap->context_switch = 1;
339 340 341 342 343 344 345 346 347 348 349 350
}

/* ----------------------------------------------------------------------------
   migrateThread
   ------------------------------------------------------------------------- */

void
migrateThread (Capability *from, StgTSO *tso, Capability *to)
{
    traceEventMigrateThread (from, tso, to->no);
    // ThreadMigrating tells the target cap that it needs to be added to
    // the run queue when it receives the MSG_TRY_WAKEUP.
Simon Marlow's avatar
Simon Marlow committed
351
    tso->why_blocked = ThreadMigrating;
352 353
    tso->cap = to;
    tryWakeupThread(from, tso);
354 355
}

356 357 358 359 360 361 362
/* ----------------------------------------------------------------------------
   awakenBlockedQueue

   wakes up all the threads on the specified queue.
   ------------------------------------------------------------------------- */

void
363
wakeBlockingQueue(Capability *cap, StgBlockingQueue *bq)
364
{
365 366 367 368 369 370
    MessageBlackHole *msg;
    const StgInfoTable *i;

    ASSERT(bq->header.info == &stg_BLOCKING_QUEUE_DIRTY_info  ||
           bq->header.info == &stg_BLOCKING_QUEUE_CLEAN_info  );

371
    for (msg = bq->queue; msg != (MessageBlackHole*)END_TSO_QUEUE;
372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403
         msg = msg->link) {
        i = msg->header.info;
        if (i != &stg_IND_info) {
            ASSERT(i == &stg_MSG_BLACKHOLE_info);
            tryWakeupThread(cap,msg->tso);
        }
    }

    // overwrite the BQ with an indirection so it will be
    // collected at the next GC.
#if defined(DEBUG) && !defined(THREADED_RTS)
    // XXX FILL_SLOP, but not if THREADED_RTS because in that case
    // another thread might be looking at this BLOCKING_QUEUE and
    // checking the owner field at the same time.
    bq->bh = 0; bq->queue = 0; bq->owner = 0;
#endif
    OVERWRITE_INFO(bq, &stg_IND_info);
}

// If we update a closure that we know we BLACKHOLE'd, and the closure
// no longer points to the current TSO as its owner, then there may be
// an orphaned BLOCKING_QUEUE closure with blocked threads attached to
// it.  We therefore traverse the BLOCKING_QUEUEs attached to the
// current TSO to see if any can now be woken up.
void
checkBlockingQueues (Capability *cap, StgTSO *tso)
{
    StgBlockingQueue *bq, *next;
    StgClosure *p;

    debugTraceCap(DEBUG_sched, cap,
                  "collision occurred; checking blocking queues for thread %ld",
404
                  (W_)tso->id);
405

406 407 408 409 410 411 412 413
    for (bq = tso->bq; bq != (StgBlockingQueue*)END_TSO_QUEUE; bq = next) {
        next = bq->link;

        if (bq->header.info == &stg_IND_info) {
            // ToDo: could short it out right here, to avoid
            // traversing this IND multiple times.
            continue;
        }
414

415 416 417 418 419 420
        p = bq->bh;

        if (p->header.info != &stg_BLACKHOLE_info ||
            ((StgInd *)p)->indirectee != (StgClosure*)bq)
        {
            wakeBlockingQueue(cap,bq);
421
        }
422 423 424
    }
}

425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442
/* ----------------------------------------------------------------------------
   updateThunk

   Update a thunk with a value.  In order to do this, we need to know
   which TSO owns (or is evaluating) the thunk, in case we need to
   awaken any threads that are blocked on it.
   ------------------------------------------------------------------------- */

void
updateThunk (Capability *cap, StgTSO *tso, StgClosure *thunk, StgClosure *val)
{
    StgClosure *v;
    StgTSO *owner;
    const StgInfoTable *i;

    i = thunk->header.info;
    if (i != &stg_BLACKHOLE_info &&
        i != &stg_CAF_BLACKHOLE_info &&
443
        i != &__stg_EAGER_BLACKHOLE_info &&
444 445 446 447
        i != &stg_WHITEHOLE_info) {
        updateWithIndirection(cap, thunk, val);
        return;
    }
448

449 450 451 452
    v = ((StgInd*)thunk)->indirectee;

    updateWithIndirection(cap, thunk, val);

453 454 455 456 457 458 459
    // sometimes the TSO is locked when we reach here, so its header
    // might be WHITEHOLE.  Hence check for the correct owner using
    // pointer equality first.
    if ((StgTSO*)v == tso) {
        return;
    }

460 461
    i = v->header.info;
    if (i == &stg_TSO_info) {
462
        checkBlockingQueues(cap, tso);
463 464 465 466 467 468 469 470 471
        return;
    }

    if (i != &stg_BLOCKING_QUEUE_CLEAN_info &&
        i != &stg_BLOCKING_QUEUE_DIRTY_info) {
        checkBlockingQueues(cap, tso);
        return;
    }

472
    owner = ((StgBlockingQueue*)v)->owner;
473 474 475 476 477 478 479 480

    if (owner != tso) {
        checkBlockingQueues(cap, tso);
    } else {
        wakeBlockingQueue(cap, (StgBlockingQueue*)v);
    }
}

481 482 483 484
/* ---------------------------------------------------------------------------
 * rtsSupportsBoundThreads(): is the RTS built to support bound threads?
 * used by Control.Concurrent for error checking.
 * ------------------------------------------------------------------------- */
485

Ian Lynagh's avatar
Ian Lynagh committed
486
HsBool
487 488 489
rtsSupportsBoundThreads(void)
{
#if defined(THREADED_RTS)
Ian Lynagh's avatar
Ian Lynagh committed
490
  return HS_BOOL_TRUE;
491
#else
Ian Lynagh's avatar
Ian Lynagh committed
492
  return HS_BOOL_FALSE;
493 494 495 496 497 498
#endif
}

/* ---------------------------------------------------------------------------
 * isThreadBound(tso): check whether tso is bound to an OS thread.
 * ------------------------------------------------------------------------- */
499

500 501 502 503 504 505 506 507 508
StgBool
isThreadBound(StgTSO* tso USED_IF_THREADS)
{
#if defined(THREADED_RTS)
  return (tso->bound != NULL);
#endif
  return rtsFalse;
}

509 510 511 512 513 514 515 516 517 518 519 520 521 522
/* -----------------------------------------------------------------------------
   Stack overflow

   If the thread has reached its maximum stack size, then raise the
   StackOverflow exception in the offending thread.  Otherwise
   relocate the TSO into a larger chunk of memory and adjust its stack
   size appropriately.
   -------------------------------------------------------------------------- */

void
threadStackOverflow (Capability *cap, StgTSO *tso)
{
    StgStack *new_stack, *old_stack;
    StgUnderflowFrame *frame;
523
    W_ chunk_size;
524 525 526

    IF_DEBUG(sanity,checkTSO(tso));

527 528
    if (RtsFlags.GcFlags.maxStkSize > 0
        && tso->tot_stack_size >= RtsFlags.GcFlags.maxStkSize) {
529 530 531 532 533 534 535 536 537
        // #3677: In a stack overflow situation, stack squeezing may
        // reduce the stack size, but we don't know whether it has been
        // reduced enough for the stack check to succeed if we try
        // again.  Fortunately stack squeezing is idempotent, so all we
        // need to do is record whether *any* squeezing happened.  If we
        // are at the stack's absolute -K limit, and stack squeezing
        // happened, then we try running the thread again.  The
        // TSO_SQUEEZED flag is set by threadPaused() to tell us whether
        // squeezing happened or not.
538 539 540
        if (tso->flags & TSO_SQUEEZED) {
            return;
        }
541 542 543 544 545 546 547 548 549 550 551

        debugTrace(DEBUG_gc,
                   "threadStackOverflow of TSO %ld (%p): stack too large (now %ld; max is %ld)",
                   (long)tso->id, tso, (long)tso->stackobj->stack_size,
                   RtsFlags.GcFlags.maxStkSize);
        IF_DEBUG(gc,
                 /* If we're debugging, just print out the top of the stack */
                 printStackChunk(tso->stackobj->sp,
                                 stg_min(tso->stackobj->stack + tso->stackobj->stack_size,
                                         tso->stackobj->sp+64)));

552 553
        // Note [Throw to self when masked], also #767 and #8303.
        throwToSelf(cap, tso, (StgClosure *)stackOverflow_closure);
554 555 556 557 558
    }


    // We also want to avoid enlarging the stack if squeezing has
    // already released some of it.  However, we don't want to get into
Edward Z. Yang's avatar
Edward Z. Yang committed
559
    // a pathological situation where a thread has a nearly full stack
560 561 562 563 564
    // (near its current limit, but not near the absolute -K limit),
    // keeps allocating a little bit, squeezing removes a little bit,
    // and then it runs again.  So to avoid this, if we squeezed *and*
    // there is still less than BLOCK_SIZE_W words free, then we enlarge
    // the stack anyway.
565 566 567 568 569 570 571 572
    //
    // NB: This reasoning only applies if the stack has been squeezed;
    // if no squeezing has occurred, then BLOCK_SIZE_W free space does
    // not mean there is enough stack to run; the thread may have
    // requested a large amount of stack (see below).  If the amount
    // we squeezed is not enough to run the thread, we'll come back
    // here (no squeezing will have occurred and thus we'll enlarge the
    // stack.)
573
    if ((tso->flags & TSO_SQUEEZED) &&
574 575 576 577
        ((W_)(tso->stackobj->sp - tso->stackobj->stack) >= BLOCK_SIZE_W)) {
        return;
    }

Simon Marlow's avatar
Simon Marlow committed
578 579 580 581 582 583 584 585 586 587 588 589 590 591
    old_stack = tso->stackobj;

    // If we used less than half of the previous stack chunk, then we
    // must have failed a stack check for a large amount of stack.  In
    // this case we allocate a double-sized chunk to try to
    // accommodate the large stack request.  If that also fails, the
    // next chunk will be 4x normal size, and so on.
    //
    // It would be better to have the mutator tell us how much stack
    // was needed, as we do with heap allocations, but this works for
    // now.
    //
    if (old_stack->sp > old_stack->stack + old_stack->stack_size / 2)
    {
592 593
        chunk_size = stg_max(2 * (old_stack->stack_size + sizeofW(StgStack)),
                             RtsFlags.GcFlags.stkChunkSize);
Simon Marlow's avatar
Simon Marlow committed
594 595 596 597 598 599
    }
    else
    {
        chunk_size = RtsFlags.GcFlags.stkChunkSize;
    }

600 601
    debugTraceCap(DEBUG_sched, cap,
                  "allocating new stack chunk of size %d bytes",
Simon Marlow's avatar
Simon Marlow committed
602
                  chunk_size * sizeof(W_));
603

604
    new_stack = (StgStack*) allocate(cap, chunk_size);
605
    SET_HDR(new_stack, &stg_STACK_info, old_stack->header.prof.ccs);
Simon Marlow's avatar
Simon Marlow committed
606
    TICK_ALLOC_STACK(chunk_size);
607 608

    new_stack->dirty = 0; // begin clean, we'll mark it dirty below
Simon Marlow's avatar
Simon Marlow committed
609
    new_stack->stack_size = chunk_size - sizeofW(StgStack);
610 611 612 613 614 615
    new_stack->sp = new_stack->stack + new_stack->stack_size;

    tso->tot_stack_size += new_stack->stack_size;

    {
        StgWord *sp;
Simon Marlow's avatar
Simon Marlow committed
616
        W_ chunk_words, size;
617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640

        // find the boundary of the chunk of old stack we're going to
        // copy to the new stack.  We skip over stack frames until we
        // reach the smaller of
        //
        //   * the chunk buffer size (+RTS -kb)
        //   * the end of the old stack
        //
        for (sp = old_stack->sp;
             sp < stg_min(old_stack->sp + RtsFlags.GcFlags.stkChunkBufferSize,
                          old_stack->stack + old_stack->stack_size); )
        {
            size = stack_frame_sizeW((StgClosure*)sp);

            // if including this frame would exceed the size of the
            // new stack (taking into account the underflow frame),
            // then stop at the previous frame.
            if (sp + size > old_stack->stack + (new_stack->stack_size -
                                                sizeofW(StgUnderflowFrame))) {
                break;
            }
            sp += size;
        }

641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662
        if (sp == old_stack->stack + old_stack->stack_size) {
            //
            // the old stack chunk is now empty, so we do *not* insert
            // an underflow frame pointing back to it.  There are two
            // cases: either the old stack chunk was the last one, in
            // which case it ends with a STOP_FRAME, or it is not the
            // last one, and it already ends with an UNDERFLOW_FRAME
            // pointing to the previous chunk.  In the latter case, we
            // will copy the UNDERFLOW_FRAME into the new stack chunk.
            // In both cases, the old chunk will be subsequently GC'd.
            //
            // With the default settings, -ki1k -kb1k, this means the
            // first stack chunk will be discarded after the first
            // overflow, being replaced by a non-moving 32k chunk.
            //
        } else {
            new_stack->sp -= sizeofW(StgUnderflowFrame);
            frame = (StgUnderflowFrame*)new_stack->sp;
            frame->info = &stg_stack_underflow_frame_info;
            frame->next_chunk  = old_stack;
        }

663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683
        // copy the stack chunk between tso->sp and sp to
        //   new_tso->sp + (tso->sp - sp)
        chunk_words = sp - old_stack->sp;

        memcpy(/* dest   */ new_stack->sp - chunk_words,
               /* source */ old_stack->sp,
               /* size   */ chunk_words * sizeof(W_));

        old_stack->sp += chunk_words;
        new_stack->sp -= chunk_words;
    }

    tso->stackobj = new_stack;

    // we're about to run it, better mark it dirty
    dirty_STACK(cap, new_stack);

    IF_DEBUG(sanity,checkTSO(tso));
    // IF_DEBUG(scheduler,printTSO(new_tso));
}

684

685 686 687 688 689

/* ---------------------------------------------------------------------------
   Stack underflow - called from the stg_stack_underflow_info frame
   ------------------------------------------------------------------------ */

Simon Marlow's avatar
Simon Marlow committed
690
W_ // returns offset to the return address
691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711
threadStackUnderflow (Capability *cap, StgTSO *tso)
{
    StgStack *new_stack, *old_stack;
    StgUnderflowFrame *frame;
    nat retvals;

    debugTraceCap(DEBUG_sched, cap, "stack underflow");

    old_stack = tso->stackobj;

    frame = (StgUnderflowFrame*)(old_stack->stack + old_stack->stack_size
                                 - sizeofW(StgUnderflowFrame));
    ASSERT(frame->info == &stg_stack_underflow_frame_info);

    new_stack = (StgStack*)frame->next_chunk;
    tso->stackobj = new_stack;

    retvals = (P_)frame - old_stack->sp;
    if (retvals != 0)
    {
        // we have some return values to copy to the old stack
Simon Marlow's avatar
Simon Marlow committed
712
        if ((W_)(new_stack->sp - new_stack->stack) < retvals)
713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736
        {
            barf("threadStackUnderflow: not enough space for return values");
        }

        new_stack->sp -= retvals;

        memcpy(/* dest */ new_stack->sp,
               /* src  */ old_stack->sp,
               /* size */ retvals * sizeof(W_));
    }

    // empty the old stack.  The GC may still visit this object
    // because it is on the mutable list.
    old_stack->sp = old_stack->stack + old_stack->stack_size;

    // restore the stack parameters, and update tot_stack_size
    tso->tot_stack_size -= old_stack->stack_size;

    // we're about to run it, better mark it dirty
    dirty_STACK(cap, new_stack);

    return retvals;
}

737 738 739 740 741 742 743 744 745
/* ----------------------------------------------------------------------------
 * Debugging: why is a thread blocked
 * ------------------------------------------------------------------------- */

#if DEBUG
void
printThreadBlockage(StgTSO *tso)
{
  switch (tso->why_blocked) {
Simon Marlow's avatar
Simon Marlow committed
746 747 748 749 750 751
#if defined(mingw32_HOST_OS)
    case BlockedOnDoProc:
    debugBelch("is blocked on proc (request: %u)", tso->block_info.async_result->reqID);
    break;
#endif
#if !defined(THREADED_RTS)
752 753 754 755 756 757 758 759 760
  case BlockedOnRead:
    debugBelch("is blocked on read from fd %d", (int)(tso->block_info.fd));
    break;
  case BlockedOnWrite:
    debugBelch("is blocked on write to fd %d", (int)(tso->block_info.fd));
    break;
  case BlockedOnDelay:
    debugBelch("is blocked until %ld", (long)(tso->block_info.target));
    break;
Simon Marlow's avatar
Simon Marlow committed
761
#endif
762 763 764
  case BlockedOnMVar:
    debugBelch("is blocked on an MVar @ %p", tso->block_info.closure);
    break;
765 766 767
  case BlockedOnMVarRead:
    debugBelch("is blocked on atomic MVar read @ %p", tso->block_info.closure);
    break;
768
  case BlockedOnBlackHole:
769
      debugBelch("is blocked on a black hole %p",
770
                 ((StgBlockingQueue*)tso->block_info.bh->bh));
771
    break;
772 773 774
  case BlockedOnMsgThrowTo:
    debugBelch("is blocked on a throwto message");
    break;
775 776 777
  case NotBlocked:
    debugBelch("is not blocked");
    break;
778 779 780
  case ThreadMigrating:
    debugBelch("is runnable, but not on the run queue");
    break;
781 782 783
  case BlockedOnCCall:
    debugBelch("is blocked on an external call");
    break;
784 785
  case BlockedOnCCall_Interruptible:
    debugBelch("is blocked on an external call (but may be interrupted)");
786 787 788 789 790 791
    break;
  case BlockedOnSTM:
    debugBelch("is blocked on an STM operation");
    break;
  default:
    barf("printThreadBlockage: strange tso->why_blocked: %d for TSO %d (%d)",
792
         tso->why_blocked, tso->id, tso);
793 794 795
  }
}

796

797 798 799
void
printThreadStatus(StgTSO *t)
{
800
  debugBelch("\tthread %4lu @ %p ", (unsigned long)t->id, (void *)t);
801 802 803 804
    {
      void *label = lookupThreadLabel(t->id);
      if (label) debugBelch("[\"%s\"] ",(char *)label);
    }
805
        switch (t->what_next) {
806 807 808 809 810 811 812 813 814
        case ThreadKilled:
            debugBelch("has been killed");
            break;
        case ThreadComplete:
            debugBelch("has completed");
            break;
        default:
            printThreadBlockage(t);
        }
815
        if (t->dirty) {
816 817
            debugBelch(" (TSO_DIRTY)");
        }
818
        debugBelch("\n");
819 820 821 822 823 824
}

void
printAllThreads(void)
{
  StgTSO *t, *next;
Simon Marlow's avatar
Simon Marlow committed
825
  nat i, g;
826 827 828 829 830
  Capability *cap;

  debugBelch("all threads:\n");

  for (i = 0; i < n_capabilities; i++) {
831
      cap = capabilities[i];
832
      debugBelch("threads on capability %d:\n", cap->no);
833
      for (t = cap->run_queue_hd; t != END_TSO_QUEUE; t = t->_link) {
834
          printThreadStatus(t);
835 836 837 838
      }
  }

  debugBelch("other threads:\n");
Simon Marlow's avatar
Simon Marlow committed
839 840
  for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
    for (t = generations[g].threads; t != END_TSO_QUEUE; t = next) {
841
      if (t->why_blocked != NotBlocked) {
842
          printThreadStatus(t);
843
      }
844
      next = t->global_link;
845
    }
846 847 848 849
  }
}

// useful from gdb
850
void
851 852 853
printThreadQueue(StgTSO *t)
{
    nat i = 0;
854
    for (; t != END_TSO_QUEUE; t = t->_link) {
855 856
        printThreadStatus(t);
        i++;
857 858 859 860 861
    }
    debugBelch("%d threads on queue\n", i);
}

#endif /* DEBUG */