Threads.c 25.3 KB
Newer Older
1 2 3 4 5 6 7 8 9 10
/* ---------------------------------------------------------------------------
 *
 * (c) The GHC Team, 2006
 *
 * Thread-related functionality
 *
 * --------------------------------------------------------------------------*/

#include "PosixSource.h"
#include "Rts.h"
Simon Marlow's avatar
Simon Marlow committed
11

12 13
#include "Capability.h"
#include "Updates.h"
14 15 16 17 18
#include "Threads.h"
#include "STM.h"
#include "Schedule.h"
#include "Trace.h"
#include "ThreadLabels.h"
19 20
#include "Updates.h"
#include "Messages.h"
21 22 23 24
#include "RaiseAsync.h"
#include "Prelude.h"
#include "Printer.h"
#include "sm/Sanity.h"
25
#include "sm/Storage.h"
26

27 28
#include <string.h>

29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62
/* Next thread ID to allocate.
 * LOCK: sched_mutex
 */
static StgThreadID next_thread_id = 1;

/* The smallest stack size that makes any sense is:
 *    RESERVED_STACK_WORDS    (so we can get back from the stack overflow)
 *  + sizeofW(StgStopFrame)   (the stg_stop_thread_info frame)
 *  + 1                       (the closure to enter)
 *  + 1			      (stg_ap_v_ret)
 *  + 1			      (spare slot req'd by stg_ap_v_ret)
 *
 * A thread with this stack will bomb immediately with a stack
 * overflow, which will increase its stack size.  
 */
#define MIN_STACK_WORDS (RESERVED_STACK_WORDS + sizeofW(StgStopFrame) + 3)

/* ---------------------------------------------------------------------------
   Create a new thread.

   The new thread starts with the given stack size.  Before the
   scheduler can run, however, this thread needs to have a closure
   (and possibly some arguments) pushed on its stack.  See
   pushClosure() in Schedule.h.

   createGenThread() and createIOThread() (in SchedAPI.h) are
   convenient packaged versions of this function.

   currently pri (priority) is only used in a GRAN setup -- HWL
   ------------------------------------------------------------------------ */
StgTSO *
createThread(Capability *cap, nat size)
{
    StgTSO *tso;
63
    StgStack *stack;
64 65 66 67 68
    nat stack_size;

    /* sched_mutex is *not* required */

    /* catch ridiculously small stack sizes */
69 70
    if (size < MIN_STACK_WORDS + sizeofW(StgStack)) {
        size = MIN_STACK_WORDS + sizeofW(StgStack);
71 72
    }

73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93
    /* The size argument we are given includes all the per-thread
     * overheads:
     *
     *    - The TSO structure
     *    - The STACK header
     *
     * This is so that we can use a nice round power of 2 for the
     * default stack size (e.g. 1k), and if we're allocating lots of
     * threads back-to-back they'll fit nicely in a block.  It's a bit
     * of a benchmark hack, but it doesn't do any harm.
     */
    stack_size = round_to_mblocks(size - sizeofW(StgTSO));
    stack = (StgStack *)allocate(cap, stack_size);
    TICK_ALLOC_STACK(stack_size);
    SET_HDR(stack, &stg_STACK_info, CCS_SYSTEM);
    stack->stack_size   = stack_size - sizeofW(StgStack);
    stack->sp           = stack->stack + stack->stack_size;
    stack->dirty        = 1;

    tso = (StgTSO *)allocate(cap, sizeofW(StgTSO));
    TICK_ALLOC_TSO();
94 95 96 97 98
    SET_HDR(tso, &stg_TSO_info, CCS_SYSTEM);

    // Always start with the compiled code evaluator
    tso->what_next = ThreadRunGHC;
    tso->why_blocked  = NotBlocked;
99
    tso->block_info.closure = (StgClosure *)END_TSO_QUEUE;
100
    tso->blocked_exceptions = END_BLOCKED_EXCEPTIONS_QUEUE;
101
    tso->bq = (StgBlockingQueue *)END_TSO_QUEUE;
102 103
    tso->flags = 0;
    tso->dirty = 1;
104 105
    tso->_link = END_TSO_QUEUE;

106 107 108 109
    tso->saved_errno = 0;
    tso->bound = NULL;
    tso->cap = cap;
    
110 111
    tso->stackobj       = stack;
    tso->tot_stack_size = stack->stack_size;
112 113

    tso->trec = NO_TREC;
114

115
#ifdef PROFILING
116
    tso->prof.cccs = CCS_MAIN;
117 118
#endif
    
119 120 121 122 123
    // put a stop frame on the stack
    stack->sp -= sizeofW(StgStopFrame);
    SET_HDR((StgClosure*)stack->sp,
            (StgInfoTable *)&stg_stop_thread_info,CCS_SYSTEM);

124 125 126 127
    /* Link the new thread on the global thread list.
     */
    ACQUIRE_LOCK(&sched_mutex);
    tso->id = next_thread_id++;  // while we have the mutex
Simon Marlow's avatar
Simon Marlow committed
128 129
    tso->global_link = g0->threads;
    g0->threads = tso;
130 131
    RELEASE_LOCK(&sched_mutex);
    
132
    // ToDo: report the stack size in the event?
133
    traceEventCreateThread(cap, tso);
Simon Marlow's avatar
Simon Marlow committed
134

135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171
    return tso;
}

/* ---------------------------------------------------------------------------
 * Comparing Thread ids.
 *
 * This is used from STG land in the implementation of the
 * instances of Eq/Ord for ThreadIds.
 * ------------------------------------------------------------------------ */

int
cmp_thread(StgPtr tso1, StgPtr tso2) 
{ 
  StgThreadID id1 = ((StgTSO *)tso1)->id; 
  StgThreadID id2 = ((StgTSO *)tso2)->id;
 
  if (id1 < id2) return (-1);
  if (id1 > id2) return 1;
  return 0;
}

/* ---------------------------------------------------------------------------
 * Fetching the ThreadID from an StgTSO.
 *
 * This is used in the implementation of Show for ThreadIds.
 * ------------------------------------------------------------------------ */
int
rts_getThreadId(StgPtr tso) 
{
  return ((StgTSO *)tso)->id;
}

/* -----------------------------------------------------------------------------
   Remove a thread from a queue.
   Fails fatally if the TSO is not on the queue.
   -------------------------------------------------------------------------- */

172
rtsBool // returns True if we modified queue
173
removeThreadFromQueue (Capability *cap, StgTSO **queue, StgTSO *tso)
174 175 176 177
{
    StgTSO *t, *prev;

    prev = NULL;
178
    for (t = *queue; t != END_TSO_QUEUE; prev = t, t = t->_link) {
179 180
	if (t == tso) {
	    if (prev) {
181
		setTSOLink(cap,prev,t->_link);
182
                t->_link = END_TSO_QUEUE;
183
                return rtsFalse;
184
	    } else {
185
		*queue = t->_link;
186
                t->_link = END_TSO_QUEUE;
187
                return rtsTrue;
188 189 190 191 192 193
	    }
	}
    }
    barf("removeThreadFromQueue: not found");
}

194
rtsBool // returns True if we modified head or tail
195 196
removeThreadFromDeQueue (Capability *cap, 
                         StgTSO **head, StgTSO **tail, StgTSO *tso)
197 198
{
    StgTSO *t, *prev;
199
    rtsBool flag = rtsFalse;
200 201

    prev = NULL;
202
    for (t = *head; t != END_TSO_QUEUE; prev = t, t = t->_link) {
203 204
	if (t == tso) {
	    if (prev) {
205
		setTSOLink(cap,prev,t->_link);
206
                flag = rtsFalse;
207
	    } else {
208
		*head = t->_link;
209
                flag = rtsTrue;
210
	    }
211 212
            t->_link = END_TSO_QUEUE;
            if (*tail == tso) {
213 214 215 216 217
		if (prev) {
		    *tail = prev;
		} else {
		    *tail = END_TSO_QUEUE;
		}
218 219 220 221
                return rtsTrue;
	    } else {
                return flag;
            }
222 223 224 225 226 227
	}
    }
    barf("removeThreadFromMVarQueue: not found");
}

/* ----------------------------------------------------------------------------
228
   tryWakeupThread()
229

230 231 232
   Attempt to wake up a thread.  tryWakeupThread is idempotent: it is
   always safe to call it too many times, but it is not safe in
   general to omit a call.
233

234
   ------------------------------------------------------------------------- */
235

236
void
237 238
tryWakeupThread (Capability *cap, StgTSO *tso)
{
239 240
    traceEventThreadWakeup (cap, tso, tso->cap->no);

241 242 243 244 245 246 247 248
#ifdef THREADED_RTS
    if (tso->cap != cap)
    {
        MessageWakeup *msg;
        msg = (MessageWakeup *)allocate(cap,sizeofW(MessageWakeup));
        SET_HDR(msg, &stg_MSG_TRY_WAKEUP_info, CCS_SYSTEM);
        msg->tso = tso;
        sendMessage(cap, tso->cap, (Message*)msg);
249 250
        debugTraceCap(DEBUG_sched, cap, "message: try wakeup thread %ld on cap %d",
                      (lnat)tso->id, tso->cap->no);
251 252 253 254 255 256
        return;
    }
#endif

    switch (tso->why_blocked)
    {
257 258 259 260 261 262 263 264 265 266
    case BlockedOnMVar:
    {
        if (tso->_link == END_TSO_QUEUE) {
            tso->block_info.closure = (StgClosure*)END_TSO_QUEUE;
            goto unblock;
        } else {
            return;
        }
    }

267 268 269 270 271 272 273 274 275
    case BlockedOnMsgThrowTo:
    {
        const StgInfoTable *i;
        
        i = lockClosure(tso->block_info.closure);
        unlockClosure(tso->block_info.closure, i);
        if (i != &stg_MSG_NULL_info) {
            debugTraceCap(DEBUG_sched, cap, "thread %ld still blocked on throwto (%p)",
                          (lnat)tso->id, tso->block_info.throwto->header.info);
276
            return;
277 278 279
        }

        // remove the block frame from the stack
280 281
        ASSERT(tso->stackobj->sp[0] == (StgWord)&stg_block_throwto_info);
        tso->stackobj->sp += 3;
282
        goto unblock;
283
    }
284

285 286
    case BlockedOnBlackHole:
    case BlockedOnSTM:
287 288 289
    case ThreadMigrating:
        goto unblock;

290 291
    default:
        // otherwise, do nothing
292
        return;
293
    }
294 295 296 297 298 299

unblock:
    // just run the thread now, if the BH is not really available,
    // we'll block again.
    tso->why_blocked = NotBlocked;
    appendToRunQueue(cap,tso);
Simon Marlow's avatar
comment  
Simon Marlow committed
300 301 302 303 304 305 306 307 308 309 310 311 312

    // We used to set the context switch flag here, which would
    // trigger a context switch a short time in the future (at the end
    // of the current nursery block).  The idea is that we have just
    // woken up a thread, so we may need to load-balance and migrate
    // threads to other CPUs.  On the other hand, setting the context
    // switch flag here unfairly penalises the current thread by
    // yielding its time slice too early.
    //
    // The synthetic benchmark nofib/smp/chan can be used to show the
    // difference quite clearly.

    // cap->context_switch = 1;
313 314 315 316 317 318 319 320 321 322 323 324
}

/* ----------------------------------------------------------------------------
   migrateThread
   ------------------------------------------------------------------------- */

void
migrateThread (Capability *from, StgTSO *tso, Capability *to)
{
    traceEventMigrateThread (from, tso, to->no);
    // ThreadMigrating tells the target cap that it needs to be added to
    // the run queue when it receives the MSG_TRY_WAKEUP.
Simon Marlow's avatar
Simon Marlow committed
325
    tso->why_blocked = ThreadMigrating;
326 327
    tso->cap = to;
    tryWakeupThread(from, tso);
328 329
}

330 331 332 333 334 335 336
/* ----------------------------------------------------------------------------
   awakenBlockedQueue

   wakes up all the threads on the specified queue.
   ------------------------------------------------------------------------- */

void
337
wakeBlockingQueue(Capability *cap, StgBlockingQueue *bq)
338
{
339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395
    MessageBlackHole *msg;
    const StgInfoTable *i;

    ASSERT(bq->header.info == &stg_BLOCKING_QUEUE_DIRTY_info  ||
           bq->header.info == &stg_BLOCKING_QUEUE_CLEAN_info  );

    for (msg = bq->queue; msg != (MessageBlackHole*)END_TSO_QUEUE; 
         msg = msg->link) {
        i = msg->header.info;
        if (i != &stg_IND_info) {
            ASSERT(i == &stg_MSG_BLACKHOLE_info);
            tryWakeupThread(cap,msg->tso);
        }
    }

    // overwrite the BQ with an indirection so it will be
    // collected at the next GC.
#if defined(DEBUG) && !defined(THREADED_RTS)
    // XXX FILL_SLOP, but not if THREADED_RTS because in that case
    // another thread might be looking at this BLOCKING_QUEUE and
    // checking the owner field at the same time.
    bq->bh = 0; bq->queue = 0; bq->owner = 0;
#endif
    OVERWRITE_INFO(bq, &stg_IND_info);
}

// If we update a closure that we know we BLACKHOLE'd, and the closure
// no longer points to the current TSO as its owner, then there may be
// an orphaned BLOCKING_QUEUE closure with blocked threads attached to
// it.  We therefore traverse the BLOCKING_QUEUEs attached to the
// current TSO to see if any can now be woken up.
void
checkBlockingQueues (Capability *cap, StgTSO *tso)
{
    StgBlockingQueue *bq, *next;
    StgClosure *p;

    debugTraceCap(DEBUG_sched, cap,
                  "collision occurred; checking blocking queues for thread %ld",
                  (lnat)tso->id);
    
    for (bq = tso->bq; bq != (StgBlockingQueue*)END_TSO_QUEUE; bq = next) {
        next = bq->link;

        if (bq->header.info == &stg_IND_info) {
            // ToDo: could short it out right here, to avoid
            // traversing this IND multiple times.
            continue;
        }
        
        p = bq->bh;

        if (p->header.info != &stg_BLACKHOLE_info ||
            ((StgInd *)p)->indirectee != (StgClosure*)bq)
        {
            wakeBlockingQueue(cap,bq);
        }   
396 397 398
    }
}

399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416
/* ----------------------------------------------------------------------------
   updateThunk

   Update a thunk with a value.  In order to do this, we need to know
   which TSO owns (or is evaluating) the thunk, in case we need to
   awaken any threads that are blocked on it.
   ------------------------------------------------------------------------- */

void
updateThunk (Capability *cap, StgTSO *tso, StgClosure *thunk, StgClosure *val)
{
    StgClosure *v;
    StgTSO *owner;
    const StgInfoTable *i;

    i = thunk->header.info;
    if (i != &stg_BLACKHOLE_info &&
        i != &stg_CAF_BLACKHOLE_info &&
417
        i != &__stg_EAGER_BLACKHOLE_info &&
418 419 420 421 422 423 424 425 426 427 428
        i != &stg_WHITEHOLE_info) {
        updateWithIndirection(cap, thunk, val);
        return;
    }
    
    v = ((StgInd*)thunk)->indirectee;

    updateWithIndirection(cap, thunk, val);

    i = v->header.info;
    if (i == &stg_TSO_info) {
429
        owner = (StgTSO*)v;
430 431 432 433 434 435 436 437 438 439 440 441
        if (owner != tso) {
            checkBlockingQueues(cap, tso);
        }
        return;
    }

    if (i != &stg_BLOCKING_QUEUE_CLEAN_info &&
        i != &stg_BLOCKING_QUEUE_DIRTY_info) {
        checkBlockingQueues(cap, tso);
        return;
    }

442
    owner = ((StgBlockingQueue*)v)->owner;
443 444 445 446 447 448 449 450

    if (owner != tso) {
        checkBlockingQueues(cap, tso);
    } else {
        wakeBlockingQueue(cap, (StgBlockingQueue*)v);
    }
}

451 452 453 454 455
/* ---------------------------------------------------------------------------
 * rtsSupportsBoundThreads(): is the RTS built to support bound threads?
 * used by Control.Concurrent for error checking.
 * ------------------------------------------------------------------------- */
 
Ian Lynagh's avatar
Ian Lynagh committed
456
HsBool
457 458 459
rtsSupportsBoundThreads(void)
{
#if defined(THREADED_RTS)
Ian Lynagh's avatar
Ian Lynagh committed
460
  return HS_BOOL_TRUE;
461
#else
Ian Lynagh's avatar
Ian Lynagh committed
462
  return HS_BOOL_FALSE;
463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478
#endif
}

/* ---------------------------------------------------------------------------
 * isThreadBound(tso): check whether tso is bound to an OS thread.
 * ------------------------------------------------------------------------- */
 
StgBool
isThreadBound(StgTSO* tso USED_IF_THREADS)
{
#if defined(THREADED_RTS)
  return (tso->bound != NULL);
#endif
  return rtsFalse;
}

479 480 481 482 483 484 485 486 487 488 489 490 491 492
/* -----------------------------------------------------------------------------
   Stack overflow

   If the thread has reached its maximum stack size, then raise the
   StackOverflow exception in the offending thread.  Otherwise
   relocate the TSO into a larger chunk of memory and adjust its stack
   size appropriately.
   -------------------------------------------------------------------------- */

void
threadStackOverflow (Capability *cap, StgTSO *tso)
{
    StgStack *new_stack, *old_stack;
    StgUnderflowFrame *frame;
Simon Marlow's avatar
Simon Marlow committed
493
    lnat chunk_size;
494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546

    IF_DEBUG(sanity,checkTSO(tso));

    if (tso->tot_stack_size >= RtsFlags.GcFlags.maxStkSize
        && !(tso->flags & TSO_BLOCKEX)) {
        // NB. never raise a StackOverflow exception if the thread is
        // inside Control.Exceptino.block.  It is impractical to protect
        // against stack overflow exceptions, since virtually anything
        // can raise one (even 'catch'), so this is the only sensible
        // thing to do here.  See bug #767.
        //

        if (tso->flags & TSO_SQUEEZED) {
            return;
        }
        // #3677: In a stack overflow situation, stack squeezing may
        // reduce the stack size, but we don't know whether it has been
        // reduced enough for the stack check to succeed if we try
        // again.  Fortunately stack squeezing is idempotent, so all we
        // need to do is record whether *any* squeezing happened.  If we
        // are at the stack's absolute -K limit, and stack squeezing
        // happened, then we try running the thread again.  The
        // TSO_SQUEEZED flag is set by threadPaused() to tell us whether
        // squeezing happened or not.

        debugTrace(DEBUG_gc,
                   "threadStackOverflow of TSO %ld (%p): stack too large (now %ld; max is %ld)",
                   (long)tso->id, tso, (long)tso->stackobj->stack_size,
                   RtsFlags.GcFlags.maxStkSize);
        IF_DEBUG(gc,
                 /* If we're debugging, just print out the top of the stack */
                 printStackChunk(tso->stackobj->sp,
                                 stg_min(tso->stackobj->stack + tso->stackobj->stack_size,
                                         tso->stackobj->sp+64)));

        // Send this thread the StackOverflow exception
        throwToSingleThreaded(cap, tso, (StgClosure *)stackOverflow_closure);
    }


    // We also want to avoid enlarging the stack if squeezing has
    // already released some of it.  However, we don't want to get into
    // a pathalogical situation where a thread has a nearly full stack
    // (near its current limit, but not near the absolute -K limit),
    // keeps allocating a little bit, squeezing removes a little bit,
    // and then it runs again.  So to avoid this, if we squeezed *and*
    // there is still less than BLOCK_SIZE_W words free, then we enlarge
    // the stack anyway.
    if ((tso->flags & TSO_SQUEEZED) && 
        ((W_)(tso->stackobj->sp - tso->stackobj->stack) >= BLOCK_SIZE_W)) {
        return;
    }

Simon Marlow's avatar
Simon Marlow committed
547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567
    old_stack = tso->stackobj;

    // If we used less than half of the previous stack chunk, then we
    // must have failed a stack check for a large amount of stack.  In
    // this case we allocate a double-sized chunk to try to
    // accommodate the large stack request.  If that also fails, the
    // next chunk will be 4x normal size, and so on.
    //
    // It would be better to have the mutator tell us how much stack
    // was needed, as we do with heap allocations, but this works for
    // now.
    //
    if (old_stack->sp > old_stack->stack + old_stack->stack_size / 2)
    {
        chunk_size = 2 * (old_stack->stack_size + sizeofW(StgStack));
    }
    else
    {
        chunk_size = RtsFlags.GcFlags.stkChunkSize;
    }

568 569
    debugTraceCap(DEBUG_sched, cap,
                  "allocating new stack chunk of size %d bytes",
Simon Marlow's avatar
Simon Marlow committed
570
                  chunk_size * sizeof(W_));
571

Simon Marlow's avatar
Simon Marlow committed
572
    new_stack = (StgStack*) allocate(cap, chunk_size);
573
    SET_HDR(new_stack, &stg_STACK_info, CCS_SYSTEM);
Simon Marlow's avatar
Simon Marlow committed
574
    TICK_ALLOC_STACK(chunk_size);
575 576

    new_stack->dirty = 0; // begin clean, we'll mark it dirty below
Simon Marlow's avatar
Simon Marlow committed
577
    new_stack->stack_size = chunk_size - sizeofW(StgStack);
578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608
    new_stack->sp = new_stack->stack + new_stack->stack_size;

    tso->tot_stack_size += new_stack->stack_size;

    {
        StgWord *sp;
        nat chunk_words, size;

        // find the boundary of the chunk of old stack we're going to
        // copy to the new stack.  We skip over stack frames until we
        // reach the smaller of
        //
        //   * the chunk buffer size (+RTS -kb)
        //   * the end of the old stack
        //
        for (sp = old_stack->sp;
             sp < stg_min(old_stack->sp + RtsFlags.GcFlags.stkChunkBufferSize,
                          old_stack->stack + old_stack->stack_size); )
        {
            size = stack_frame_sizeW((StgClosure*)sp);

            // if including this frame would exceed the size of the
            // new stack (taking into account the underflow frame),
            // then stop at the previous frame.
            if (sp + size > old_stack->stack + (new_stack->stack_size -
                                                sizeofW(StgUnderflowFrame))) {
                break;
            }
            sp += size;
        }

609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630
        if (sp == old_stack->stack + old_stack->stack_size) {
            //
            // the old stack chunk is now empty, so we do *not* insert
            // an underflow frame pointing back to it.  There are two
            // cases: either the old stack chunk was the last one, in
            // which case it ends with a STOP_FRAME, or it is not the
            // last one, and it already ends with an UNDERFLOW_FRAME
            // pointing to the previous chunk.  In the latter case, we
            // will copy the UNDERFLOW_FRAME into the new stack chunk.
            // In both cases, the old chunk will be subsequently GC'd.
            //
            // With the default settings, -ki1k -kb1k, this means the
            // first stack chunk will be discarded after the first
            // overflow, being replaced by a non-moving 32k chunk.
            //
        } else {
            new_stack->sp -= sizeofW(StgUnderflowFrame);
            frame = (StgUnderflowFrame*)new_stack->sp;
            frame->info = &stg_stack_underflow_frame_info;
            frame->next_chunk  = old_stack;
        }

631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678
        // copy the stack chunk between tso->sp and sp to
        //   new_tso->sp + (tso->sp - sp)
        chunk_words = sp - old_stack->sp;

        memcpy(/* dest   */ new_stack->sp - chunk_words,
               /* source */ old_stack->sp,
               /* size   */ chunk_words * sizeof(W_));

        old_stack->sp += chunk_words;
        new_stack->sp -= chunk_words;
    }

    tso->stackobj = new_stack;

    // we're about to run it, better mark it dirty
    dirty_STACK(cap, new_stack);

    IF_DEBUG(sanity,checkTSO(tso));
    // IF_DEBUG(scheduler,printTSO(new_tso));
}


/* ---------------------------------------------------------------------------
   Stack underflow - called from the stg_stack_underflow_info frame
   ------------------------------------------------------------------------ */

nat // returns offset to the return address
threadStackUnderflow (Capability *cap, StgTSO *tso)
{
    StgStack *new_stack, *old_stack;
    StgUnderflowFrame *frame;
    nat retvals;

    debugTraceCap(DEBUG_sched, cap, "stack underflow");

    old_stack = tso->stackobj;

    frame = (StgUnderflowFrame*)(old_stack->stack + old_stack->stack_size
                                 - sizeofW(StgUnderflowFrame));
    ASSERT(frame->info == &stg_stack_underflow_frame_info);

    new_stack = (StgStack*)frame->next_chunk;
    tso->stackobj = new_stack;

    retvals = (P_)frame - old_stack->sp;
    if (retvals != 0)
    {
        // we have some return values to copy to the old stack
Simon Marlow's avatar
Simon Marlow committed
679
        if ((nat)(new_stack->sp - new_stack->stack) < retvals)
680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703
        {
            barf("threadStackUnderflow: not enough space for return values");
        }

        new_stack->sp -= retvals;

        memcpy(/* dest */ new_stack->sp,
               /* src  */ old_stack->sp,
               /* size */ retvals * sizeof(W_));
    }

    // empty the old stack.  The GC may still visit this object
    // because it is on the mutable list.
    old_stack->sp = old_stack->stack + old_stack->stack_size;

    // restore the stack parameters, and update tot_stack_size
    tso->tot_stack_size -= old_stack->stack_size;

    // we're about to run it, better mark it dirty
    dirty_STACK(cap, new_stack);

    return retvals;
}

704 705 706 707 708 709 710 711 712
/* ----------------------------------------------------------------------------
 * Debugging: why is a thread blocked
 * ------------------------------------------------------------------------- */

#if DEBUG
void
printThreadBlockage(StgTSO *tso)
{
  switch (tso->why_blocked) {
Simon Marlow's avatar
Simon Marlow committed
713 714 715 716 717 718
#if defined(mingw32_HOST_OS)
    case BlockedOnDoProc:
    debugBelch("is blocked on proc (request: %u)", tso->block_info.async_result->reqID);
    break;
#endif
#if !defined(THREADED_RTS)
719 720 721 722 723 724 725 726 727
  case BlockedOnRead:
    debugBelch("is blocked on read from fd %d", (int)(tso->block_info.fd));
    break;
  case BlockedOnWrite:
    debugBelch("is blocked on write to fd %d", (int)(tso->block_info.fd));
    break;
  case BlockedOnDelay:
    debugBelch("is blocked until %ld", (long)(tso->block_info.target));
    break;
Simon Marlow's avatar
Simon Marlow committed
728
#endif
729 730 731 732
  case BlockedOnMVar:
    debugBelch("is blocked on an MVar @ %p", tso->block_info.closure);
    break;
  case BlockedOnBlackHole:
733 734
      debugBelch("is blocked on a black hole %p", 
                 ((StgBlockingQueue*)tso->block_info.bh->bh));
735
    break;
736 737 738
  case BlockedOnMsgThrowTo:
    debugBelch("is blocked on a throwto message");
    break;
739 740 741
  case NotBlocked:
    debugBelch("is not blocked");
    break;
742 743 744
  case ThreadMigrating:
    debugBelch("is runnable, but not on the run queue");
    break;
745 746 747
  case BlockedOnCCall:
    debugBelch("is blocked on an external call");
    break;
748 749
  case BlockedOnCCall_Interruptible:
    debugBelch("is blocked on an external call (but may be interrupted)");
750 751 752 753 754 755 756 757 758 759
    break;
  case BlockedOnSTM:
    debugBelch("is blocked on an STM operation");
    break;
  default:
    barf("printThreadBlockage: strange tso->why_blocked: %d for TSO %d (%d)",
	 tso->why_blocked, tso->id, tso);
  }
}

760

761 762 763
void
printThreadStatus(StgTSO *t)
{
764
  debugBelch("\tthread %4lu @ %p ", (unsigned long)t->id, (void *)t);
765 766 767 768
    {
      void *label = lookupThreadLabel(t->id);
      if (label) debugBelch("[\"%s\"] ",(char *)label);
    }
769
        switch (t->what_next) {
770 771 772 773 774 775 776 777 778
	case ThreadKilled:
	    debugBelch("has been killed");
	    break;
	case ThreadComplete:
	    debugBelch("has completed");
	    break;
	default:
	    printThreadBlockage(t);
	}
779
        if (t->dirty) {
780 781
            debugBelch(" (TSO_DIRTY)");
        }
782 783 784 785 786 787 788
	debugBelch("\n");
}

void
printAllThreads(void)
{
  StgTSO *t, *next;
Simon Marlow's avatar
Simon Marlow committed
789
  nat i, g;
790 791 792 793 794 795 796
  Capability *cap;

  debugBelch("all threads:\n");

  for (i = 0; i < n_capabilities; i++) {
      cap = &capabilities[i];
      debugBelch("threads on capability %d:\n", cap->no);
797
      for (t = cap->run_queue_hd; t != END_TSO_QUEUE; t = t->_link) {
798 799 800 801 802
	  printThreadStatus(t);
      }
  }

  debugBelch("other threads:\n");
Simon Marlow's avatar
Simon Marlow committed
803 804
  for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
    for (t = generations[g].threads; t != END_TSO_QUEUE; t = next) {
805 806 807
      if (t->why_blocked != NotBlocked) {
	  printThreadStatus(t);
      }
808
      next = t->global_link;
809
    }
810 811 812 813 814 815 816 817
  }
}

// useful from gdb
void 
printThreadQueue(StgTSO *t)
{
    nat i = 0;
818
    for (; t != END_TSO_QUEUE; t = t->_link) {
819 820 821 822 823 824 825
	printThreadStatus(t);
	i++;
    }
    debugBelch("%d threads on queue\n", i);
}

#endif /* DEBUG */