Exception.cmm 20.7 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13
/* -----------------------------------------------------------------------------
 *
 * (c) The GHC Team, 1998-2004
 *
 * Exception support
 *
 * This file is written in a subset of C--, extended with various
 * features specific to GHC.  It is compiled by GHC directly.  For the
 * syntax of .cmm files, see the parser in ghc/compiler/cmm/CmmParse.y.
 *
 * ---------------------------------------------------------------------------*/

#include "Cmm.h"
14
#include "RaiseAsync.h"
15

16
import ghczmprim_GHCziTypes_True_closure;
17

18 19 20 21
/* -----------------------------------------------------------------------------
   Exception Primitives

   A thread can request that asynchronous exceptions not be delivered
22
   ("masked") for the duration of an I/O computation.  The primitives
23
   
24
	maskAsyncExceptions# :: IO a -> IO a
25

26 27 28 29 30 31
   and

        maskUninterruptible# :: IO a -> IO a

   are used for this purpose.  During a masked section, asynchronous
   exceptions may be unmasked again temporarily:
32

33
	unmaskAsyncExceptions# :: IO a -> IO a
34

35 36
   Furthermore, asynchronous exceptions are masked automatically during
   the execution of an exception handler.  All three of these primitives
37
   leave a continuation on the stack which reverts to the previous
38 39
   state (masked interruptible, masked non-interruptible, or unmasked)
   on exit.
40 41 42

   A thread which wants to raise an exception in another thread (using
   killThread#) must block until the target thread is ready to receive
43
   it.  The action of unmasking exceptions in a thread will release all
44 45
   the threads waiting to deliver exceptions to that thread.

46
   NB. there's a bug in here.  If a thread is inside an
47 48
   unsafePerformIO, and inside maskAsyncExceptions# (there is an
   unmaskAsyncExceptions_ret on the stack), and it is blocked in an
49 50
   interruptible operation, and it receives an exception, then the
   unsafePerformIO thunk will be updated with a stack object
51
   containing the unmaskAsyncExceptions_ret frame.  Later, when
52
   someone else evaluates this thunk, the original masking state is
53
   not restored.
54

55 56
   -------------------------------------------------------------------------- */

57

58 59
INFO_TABLE_RET(stg_unmaskAsyncExceptionszh_ret, RET_SMALL, W_ info_ptr)
    /* explicit stack */
60
{
61 62
    CInt r;

63 64 65
    P_ ret;
    ret = R1;

66 67
    StgTSO_flags(CurrentTSO) = %lobits32(
      TO_W_(StgTSO_flags(CurrentTSO)) & ~(TSO_BLOCKEX|TSO_INTERRUPTIBLE));
68

69
    /* Eagerly raise a masked exception, if there is one */
70
    if (StgTSO_blocked_exceptions(CurrentTSO) != END_TSO_QUEUE) {
71

72
        STK_CHK_P_LL (WDS(2), stg_unmaskAsyncExceptionszh_ret_info, R1);
73 74 75 76 77
        /* 
         * We have to be very careful here, as in killThread#, since
         * we are about to raise an async exception in the current
         * thread, which might result in the thread being killed.
         */
78
        Sp_adj(-2);
79
        Sp(1) = ret;
80
        Sp(0) = stg_ret_p_info;
81
        SAVE_THREAD_STATE();
82
        (r) = ccall maybePerformBlockedException (MyCapability() "ptr",
83
                                                      CurrentTSO "ptr");
84 85
        if (r != 0::CInt) {
            if (StgTSO_what_next(CurrentTSO) == ThreadKilled::I16) {
86
                jump stg_threadFinished [];
87 88 89
            } else {
                LOAD_THREAD_STATE();
                ASSERT(StgTSO_what_next(CurrentTSO) == ThreadRunGHC::I16);
90
                R1 = ret;
91
                jump %ENTRY_CODE(Sp(0)) [R1];
92 93
            }
        }
94 95 96 97 98 99 100 101
        else {
            /*
               the thread might have been removed from the
               blocked_exception list by someone else in the meantime.
               Just restore the stack pointer and continue.  
            */   
            Sp_adj(2);
        }
102 103
    }

104
    Sp_adj(1);
105
    R1 = ret;
106
    jump %ENTRY_CODE(Sp(0)) [R1];
107 108
}

109 110
INFO_TABLE_RET(stg_maskAsyncExceptionszh_ret, RET_SMALL, W_ info_ptr)
    return (P_ ret)
111
{
112 113 114 115 116
    StgTSO_flags(CurrentTSO) = 
       %lobits32(
	 TO_W_(StgTSO_flags(CurrentTSO))
          | TSO_BLOCKEX | TSO_INTERRUPTIBLE
      );
117

118
    return (ret);
119 120
}

121 122
INFO_TABLE_RET(stg_maskUninterruptiblezh_ret, RET_SMALL, W_ info_ptr)
    return (P_ ret)
123 124 125 126 127 128 129 130
{
    StgTSO_flags(CurrentTSO) = 
       %lobits32(
	(TO_W_(StgTSO_flags(CurrentTSO))
          | TSO_BLOCKEX)
          & ~TSO_INTERRUPTIBLE
       );

131
    return (ret);
132 133
}

134
stg_maskAsyncExceptionszh /* explicit stack */
135 136
{
    /* Args: R1 :: IO a */
137
    STK_CHK_P_LL (WDS(1)/* worst case */, stg_maskAsyncExceptionszh, R1);
138

139
    if ((TO_W_(StgTSO_flags(CurrentTSO)) & TSO_BLOCKEX) == 0) {
140 141 142 143 144 145 146 147 148 149 150 151 152
        /* avoid growing the stack unnecessarily */
        if (Sp(0) == stg_maskAsyncExceptionszh_ret_info) {
            Sp_adj(1);
        } else {
            Sp_adj(-1);
            Sp(0) = stg_unmaskAsyncExceptionszh_ret_info;
        }
    } else {
        if ((TO_W_(StgTSO_flags(CurrentTSO)) & TSO_INTERRUPTIBLE) == 0) {
            Sp_adj(-1);
            Sp(0) = stg_maskUninterruptiblezh_ret_info;
        }
    }
153

154 155 156 157
    StgTSO_flags(CurrentTSO) = %lobits32(
        TO_W_(StgTSO_flags(CurrentTSO)) | TSO_BLOCKEX | TSO_INTERRUPTIBLE);

    TICK_UNKNOWN_CALL();
nfrisby's avatar
nfrisby committed
158
    TICK_SLOW_CALL_fast_v();
159
    jump stg_ap_v_fast [R1];
160 161
}

162
stg_maskUninterruptiblezh /* explicit stack */
163 164
{
    /* Args: R1 :: IO a */
Simon Marlow's avatar
Simon Marlow committed
165
    STK_CHK_P_LL (WDS(1)/* worst case */, stg_maskUninterruptiblezh, R1);
166 167 168 169 170 171 172 173 174 175 176 177 178 179

    if ((TO_W_(StgTSO_flags(CurrentTSO)) & TSO_BLOCKEX) == 0) {
        /* avoid growing the stack unnecessarily */
        if (Sp(0) == stg_maskUninterruptiblezh_ret_info) {
            Sp_adj(1);
        } else {
            Sp_adj(-1);
            Sp(0) = stg_unmaskAsyncExceptionszh_ret_info;
        }
    } else {
        if ((TO_W_(StgTSO_flags(CurrentTSO)) & TSO_INTERRUPTIBLE) != 0) {
            Sp_adj(-1);
            Sp(0) = stg_maskAsyncExceptionszh_ret_info;
        }
180
    }
181 182 183 184

    StgTSO_flags(CurrentTSO) = %lobits32(
        (TO_W_(StgTSO_flags(CurrentTSO)) | TSO_BLOCKEX) & ~TSO_INTERRUPTIBLE);

185
    TICK_UNKNOWN_CALL();
nfrisby's avatar
nfrisby committed
186
    TICK_SLOW_CALL_fast_v();
187
    jump stg_ap_v_fast [R1];
188 189
}

190
stg_unmaskAsyncExceptionszh /* explicit stack */
191
{
192
    CInt r;
193
    W_ level;
194

195
    /* Args: R1 :: IO a */
196 197 198
    P_ io;
    io = R1;

199
    STK_CHK_P_LL (WDS(4), stg_unmaskAsyncExceptionszh, io);
200
    /* 4 words: one for the unmask frame, 3 for setting up the
201 202
     * stack to call maybePerformBlockedException() below.
     */
203

204
    /* If exceptions are already unmasked, there's nothing to do */
205
    if ((TO_W_(StgTSO_flags(CurrentTSO)) & TSO_BLOCKEX) != 0) {
206

207
	/* avoid growing the stack unnecessarily */
208
	if (Sp(0) == stg_unmaskAsyncExceptionszh_ret_info) {
209 210 211
	    Sp_adj(1);
	} else {
	    Sp_adj(-1);
212 213 214 215 216
            if ((TO_W_(StgTSO_flags(CurrentTSO)) & TSO_INTERRUPTIBLE) != 0) {
                Sp(0) = stg_maskAsyncExceptionszh_ret_info;
            } else {
                Sp(0) = stg_maskUninterruptiblezh_ret_info;
            }
217 218
	}

219 220 221
	StgTSO_flags(CurrentTSO) = %lobits32(
            TO_W_(StgTSO_flags(CurrentTSO)) & ~(TSO_BLOCKEX|TSO_INTERRUPTIBLE));

222
        /* Eagerly raise a masked exception, if there is one */
223 224 225 226 227
        if (StgTSO_blocked_exceptions(CurrentTSO) != END_TSO_QUEUE) {
            /* 
             * We have to be very careful here, as in killThread#, since
             * we are about to raise an async exception in the current
             * thread, which might result in the thread being killed.
228 229 230 231 232 233
             *
             * Now, if we are to raise an exception in the current
             * thread, there might be an update frame above us on the
             * stack due to unsafePerformIO.  Hence, the stack must
             * make sense, because it is about to be snapshotted into
             * an AP_STACK.
234
             */
235 236
            Sp_adj(-3);
            Sp(2) = stg_ap_v_info;
237
            Sp(1) = io;
238 239
            Sp(0) = stg_enter_info;

240
            SAVE_THREAD_STATE();
241
            (r) = ccall maybePerformBlockedException (MyCapability() "ptr",
242
                                                      CurrentTSO "ptr");
243 244 245

            if (r != 0::CInt) {
                if (StgTSO_what_next(CurrentTSO) == ThreadKilled::I16) {
246
                    jump stg_threadFinished [];
247 248 249
	        } else {
	            LOAD_THREAD_STATE();
	            ASSERT(StgTSO_what_next(CurrentTSO) == ThreadRunGHC::I16);
250
                    R1 = io;
251
                    jump %ENTRY_CODE(Sp(0)) [R1];
252
	        }
253 254 255
            } else {
                /* we'll just call R1 directly, below */
                Sp_adj(3);
256 257 258
            }
        }

259 260
    }
    TICK_UNKNOWN_CALL();
nfrisby's avatar
nfrisby committed
261
    TICK_SLOW_CALL_fast_v();
262
    R1 = io;
263
    jump stg_ap_v_fast [R1];
264 265
}

266

267
stg_getMaskingStatezh ()
268 269
{
    /* args: none */
270 271 272 273 274
    /* 
       returns: 0 == unmasked,
                1 == masked, non-interruptible,
                2 == masked, interruptible
    */
275 276
    return (((TO_W_(StgTSO_flags(CurrentTSO)) & TSO_BLOCKEX) != 0) +
            ((TO_W_(StgTSO_flags(CurrentTSO)) & TSO_INTERRUPTIBLE) != 0));
277
}
278

279
stg_killThreadzh (P_ target, P_ exception)
280
{
281
    W_ why_blocked;
282

283
    /* Needs 3 words because throwToSingleThreaded uses some stack */
284
    STK_CHK_PP (WDS(3), stg_killThreadzh, target, exception);
285
    /* We call allocate in throwTo(), so better check for GC */
286
    MAYBE_GC_PP (stg_killThreadzh, target, exception);
287 288 289 290 291 292 293 294

    /* 
     * We might have killed ourselves.  In which case, better be *very*
     * careful.  If the exception killed us, then return to the scheduler.
     * If the exception went to a catch frame, we'll just continue from
     * the handler.
     */
    if (target == CurrentTSO) {
295 296 297 298 299 300
        /*
         * So what should happen if a thread calls "throwTo self" inside
         * unsafePerformIO, and later the closure is evaluated by another
         * thread?  Presumably it should behave as if throwTo just returned,
         * and then continue from there.  See #3279, #3288.  This is what
         * happens: on resumption, we will just jump to the next frame on
301
         * the stack, which is the return point for stg_killThreadzh.
302
         */
303 304 305
        R1 = target;
        R2 = exception;
        jump stg_killMyself [R1,R2];
306
    } else {
307
        W_ msg;
308

309
	(msg) = ccall throwTo(MyCapability() "ptr",
310 311
                                    CurrentTSO "ptr",
                                    target "ptr",
312
                                    exception "ptr");
313
	
314
        if (msg == NULL) {
315 316
            return ();
        } else {
317 318 319
            StgTSO_why_blocked(CurrentTSO) = BlockedOnMsgThrowTo;
            StgTSO_block_info(CurrentTSO) = msg;
	    // we must block, and unlock the message before returning
320
            jump stg_block_throwto (target, exception);
321 322
	}
    }
323 324
}

325 326 327 328 329 330 331 332 333 334 335 336
/*
 * We must switch into low-level Cmm in order to raise an exception in
 * the current thread, hence this is in a separate proc with arguments
 * passed explicitly in R1 and R2.
 */
stg_killMyself
{
    P_ target, exception;
    target = R1;
    exception = R2;

    SAVE_THREAD_STATE();
337
    /* ToDo: what if the current thread is masking exceptions? */
338 339 340 341 342 343 344 345 346 347 348
    ccall throwToSingleThreaded(MyCapability() "ptr", 
                                target "ptr", exception "ptr");
    if (StgTSO_what_next(CurrentTSO) == ThreadKilled::I16) {
        jump stg_threadFinished [];
    } else {
        LOAD_THREAD_STATE();
        ASSERT(StgTSO_what_next(CurrentTSO) == ThreadRunGHC::I16);
        jump %ENTRY_CODE(Sp(0)) [];
    }
}

349 350 351 352 353 354 355 356 357
/* -----------------------------------------------------------------------------
   Catch frames
   -------------------------------------------------------------------------- */

/* Catch frames are very similar to update frames, but when entering
 * one we just pop the frame off the stack and perform the correct
 * kind of return to the activation record underneath us on the stack.
 */

Simon Marlow's avatar
Simon Marlow committed
358
#define CATCH_FRAME_FIELDS(w_,p_,info_ptr,p1,p2,exceptions_blocked,handler)   \
359
  w_ info_ptr,                                                          \
Simon Marlow's avatar
Simon Marlow committed
360
  PROF_HDR_FIELDS(w_,p1,p2)                                             \
361 362 363 364
  w_ exceptions_blocked,                                                \
  p_ handler


365
INFO_TABLE_RET(stg_catch_frame, CATCH_FRAME,
Simon Marlow's avatar
Simon Marlow committed
366
               CATCH_FRAME_FIELDS(W_,P_,info_ptr, p1, p2,
367 368 369 370 371
                                  exceptions_blocked,handler))
    return (P_ ret)
{
    return (ret);
}
372 373 374 375 376 377 378 379 380 381 382 383

/* -----------------------------------------------------------------------------
 * The catch infotable
 *
 * This should be exactly the same as would be generated by this STG code
 *
 * catch = {x,h} \n {} -> catch#{x,h}
 *
 * It is used in deleteThread when reverting blackholes.
 * -------------------------------------------------------------------------- */

INFO_TABLE(stg_catch,2,0,FUN,"catch","catch")
384
    (P_ node)
385
{
386
    jump stg_catchzh(StgClosure_payload(node,0),StgClosure_payload(node,1));
387 388
}

389 390
stg_catchzh ( P_ io,      /* :: IO a */
              P_ handler  /* :: Exception -> IO a */ )
391
{
392 393 394
    W_ exceptions_blocked;

    STK_CHK_GEN();
395
  
396
    exceptions_blocked =
397
        TO_W_(StgTSO_flags(CurrentTSO)) & (TSO_BLOCKEX | TSO_INTERRUPTIBLE);
398 399 400 401
    TICK_CATCHF_PUSHED();

    /* Apply R1 to the realworld token */
    TICK_UNKNOWN_CALL();
nfrisby's avatar
nfrisby committed
402
    TICK_SLOW_CALL_fast_v();
403 404

    jump stg_ap_v_fast
Simon Marlow's avatar
Simon Marlow committed
405
        (CATCH_FRAME_FIELDS(,,stg_catch_frame_info, CCCS, 0,
406 407
                            exceptions_blocked, handler))
        (io);
408
}
409 410 411 412 413 414 415 416

/* -----------------------------------------------------------------------------
 * The raise infotable
 * 
 * This should be exactly the same as would be generated by this STG code
 *
 *   raise = {err} \n {} -> raise#{err}
 *
417
 * It is used in stg_raisezh to update thunks on the update list
418 419
 * -------------------------------------------------------------------------- */

420
INFO_TABLE(stg_raise,1,0,THUNK_1_0,"raise","raise")
421
{
422
    jump stg_raisezh(StgThunk_payload(R1,0));
423 424
}

425 426 427 428
section "data" {
  no_break_on_exception: W_[1];
}

429 430
INFO_TABLE_RET(stg_raise_ret, RET_SMALL, W_ info_ptr, P_ exception)
    return (P_ ret)
431
{
432 433
    W_[no_break_on_exception] = 1;
    jump stg_raisezh (exception);
434 435
}

436 437 438 439 440 441 442 443 444
stg_raisezh /* explicit stack */
/*
 * args : R1 :: Exception
 *
 * Here we assume that the NativeNodeCall convention always puts the
 * first argument in R1 (which it does).  We cannot use high-level cmm
 * due to all the LOAD_THREAD_STATE()/SAVE_THREAD_STATE() and stack
 * walking that happens in here.
 */
445 446 447
{
    W_ handler;
    W_ frame_type;
448
    W_ exception;
449

450
   exception = R1;
451 452 453 454 455 456 457

#if defined(PROFILING)
    /* Debugging tool: on raising an  exception, show where we are. */

    /* ToDo: currently this is a hack.  Would be much better if
     * the info was only displayed for an *uncaught* exception.
     */
Simon Marlow's avatar
Simon Marlow committed
458
    if (RtsFlags_ProfFlags_showCCSOnException(RtsFlags) != 0::I32) {
459
        SAVE_THREAD_STATE();
460
        ccall fprintCCS_stderr(CCCS "ptr",
461
                                     exception "ptr",
462
                                     CurrentTSO "ptr");
463
        LOAD_THREAD_STATE();
464 465
    }
#endif
andy@galois.com's avatar
andy@galois.com committed
466
    
467
retry_pop_stack:
468
    SAVE_THREAD_STATE();
469
    (frame_type) = ccall raiseExceptionHelper(BaseReg "ptr", CurrentTSO "ptr", exception "ptr");
470
    LOAD_THREAD_STATE();
471 472 473
    if (frame_type == ATOMICALLY_FRAME) {
      /* The exception has reached the edge of a memory transaction.  Check that 
       * the transaction is valid.  If not then perhaps the exception should
tharris@microsoft.com's avatar
tharris@microsoft.com committed
474 475 476 477 478 479 480
       * not have been thrown: re-run the transaction.  "trec" will either be
       * a top-level transaction running the atomic block, or a nested 
       * transaction running an invariant check.  In the latter case we
       * abort and de-allocate the top-level transaction that encloses it
       * as well (we could just abandon its transaction record, but this makes
       * sure it's marked as aborted and available for re-use). */
      W_ trec, outer;
481 482
      W_ r;
      trec = StgTSO_trec(CurrentTSO);
483
      (r) = ccall stmValidateNestOfTransactions(MyCapability() "ptr", trec "ptr");
484
      outer  = StgTRecHeader_enclosing_trec(trec);
485 486
      ccall stmAbortTransaction(MyCapability() "ptr", trec "ptr");
      ccall stmFreeAbortedTRec(MyCapability() "ptr", trec "ptr");
tharris@microsoft.com's avatar
tharris@microsoft.com committed
487 488

      if (outer != NO_TREC) {
489 490
        ccall stmAbortTransaction(MyCapability() "ptr", outer "ptr");
        ccall stmFreeAbortedTRec(MyCapability() "ptr", outer "ptr");
tharris@microsoft.com's avatar
tharris@microsoft.com committed
491 492
      }

493
      StgTSO_trec(CurrentTSO) = NO_TREC;
Simon Marlow's avatar
Simon Marlow committed
494
      if (r != 0) {
495 496 497 498 499 500
        // Transaction was valid: continue searching for a catch frame
        Sp = Sp + SIZEOF_StgAtomicallyFrame;
        goto retry_pop_stack;
      } else {
        // Transaction was not valid: we retry the exception (otherwise continue
        // with a further call to raiseExceptionHelper)
501
        ("ptr" trec) = ccall stmStartTransaction(MyCapability() "ptr", NO_TREC "ptr");
502 503
        StgTSO_trec(CurrentTSO) = trec;
        R1 = StgAtomicallyFrame_code(Sp);
504
        jump stg_ap_v_fast [R1];
505 506
      }          
    }
507

508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523
    // After stripping the stack, see whether we should break here for
    // GHCi (c.f. the -fbreak-on-exception flag).  We do this after
    // stripping the stack for a reason: we'll be inspecting values in
    // GHCi, and it helps if all the thunks under evaluation have
    // already been updated with the exception, rather than being left
    // as blackholes.
    if (W_[no_break_on_exception] != 0) {
        W_[no_break_on_exception] = 0;
    } else {
        if (TO_W_(CInt[rts_stop_on_exception]) != 0) {
            W_ ioAction;
            // we don't want any further exceptions to be caught,
            // until GHCi is ready to handle them.  This prevents
            // deadlock if an exception is raised in InteractiveUI,
            // for exmplae.  Perhaps the stop_on_exception flag should
            // be per-thread.
524
            CInt[rts_stop_on_exception] = 0;
525
            ("ptr" ioAction) = ccall deRefStablePtr (W_[rts_breakpoint_io_action] "ptr");
526 527 528
            Sp = Sp - WDS(6);
            Sp(5) = exception;
            Sp(4) = stg_raise_ret_info;
529
            Sp(3) = exception;             // the AP_STACK
530 531
            Sp(2) = ghczmprim_GHCziTypes_True_closure; // dummy breakpoint info
            Sp(1) = ghczmprim_GHCziTypes_True_closure; // True <=> a breakpoint
532
            R1 = ioAction;
533
            jump RET_LBL(stg_ap_pppv) [R1];
534 535 536
        }
    }

537
    if (frame_type == STOP_FRAME) {
538 539 540 541 542
	/*
	 * We've stripped the entire stack, the thread is now dead.
	 * We will leave the stack in a GC'able state, see the stg_stop_thread
	 * entry code in StgStartup.cmm.
	 */
543 544 545 546
        W_ stack;
        stack = StgTSO_stackobj(CurrentTSO);
        Sp = stack + OFFSET_StgStack_stack
                + WDS(TO_W_(StgStack_stack_size(stack))) - WDS(2);
547
	Sp(1) = exception;	/* save the exception */
548
	Sp(0) = stg_enter_info; /* so that GC can traverse this stack */
549 550
	StgTSO_what_next(CurrentTSO) = ThreadKilled::I16;
	SAVE_THREAD_STATE();	/* inline! */
551

552
        jump stg_threadFinished [];
553 554
    }

555 556 557
    /* Ok, Sp points to the enclosing CATCH_FRAME or CATCH_STM_FRAME.
     * Pop everything down to and including this frame, update Su,
     * push R1, and enter the handler.
558
     */
559 560 561 562 563
    if (frame_type == CATCH_FRAME) {
      handler = StgCatchFrame_handler(Sp);
    } else {
      handler = StgCatchSTMFrame_handler(Sp);
    }
564

565
    /* Restore the masked/unmasked state for asynchronous exceptions
566 567
     * at the CATCH_FRAME.  
     *
568
     * If exceptions were unmasked, arrange that they are unmasked
569
     * again after executing the handler by pushing an
570
     * unmaskAsyncExceptions_ret stack frame.
tharris@microsoft.com's avatar
tharris@microsoft.com committed
571 572 573
     *
     * If we've reached an STM catch frame then roll back the nested
     * transaction we were using.
574 575 576
     */
    W_ frame;
    frame = Sp;
577 578
    if (frame_type == CATCH_FRAME)
    {
579
      Sp = Sp + SIZEOF_StgCatchFrame;
580 581 582
      if ((StgCatchFrame_exceptions_blocked(frame) & TSO_BLOCKEX) == 0) {
          Sp_adj(-1);
          Sp(0) = stg_unmaskAsyncExceptionszh_ret_info;
583
      }
584

585
      /* Ensure that async exceptions are masked when running the handler.
586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602
      */
      StgTSO_flags(CurrentTSO) = %lobits32(
          TO_W_(StgTSO_flags(CurrentTSO)) | TSO_BLOCKEX | TSO_INTERRUPTIBLE);

      /* The interruptible state is inherited from the context of the
       * catch frame, but note that TSO_INTERRUPTIBLE is only meaningful
       * if TSO_BLOCKEX is set.  (we got this wrong earlier, and #4988
       * was a symptom of the bug).
       */
      if ((StgCatchFrame_exceptions_blocked(frame) &
           (TSO_BLOCKEX | TSO_INTERRUPTIBLE)) == TSO_BLOCKEX) {
          StgTSO_flags(CurrentTSO) = %lobits32(
              TO_W_(StgTSO_flags(CurrentTSO)) & ~TSO_INTERRUPTIBLE);
      }
    }
    else /* CATCH_STM_FRAME */
    {
tharris@microsoft.com's avatar
tharris@microsoft.com committed
603 604
      W_ trec, outer;
      trec = StgTSO_trec(CurrentTSO);
605
      outer  = StgTRecHeader_enclosing_trec(trec);
606 607
      ccall stmAbortTransaction(MyCapability() "ptr", trec "ptr");
      ccall stmFreeAbortedTRec(MyCapability() "ptr", trec "ptr");
tharris@microsoft.com's avatar
tharris@microsoft.com committed
608
      StgTSO_trec(CurrentTSO) = outer;
609
      Sp = Sp + SIZEOF_StgCatchSTMFrame;
610 611 612 613 614 615
    }

    /* Call the handler, passing the exception value and a realworld
     * token as arguments.
     */
    Sp_adj(-1);
616
    Sp(0) = exception;
617 618 619
    R1 = handler;
    Sp_adj(-1);
    TICK_UNKNOWN_CALL();
nfrisby's avatar
nfrisby committed
620
    TICK_SLOW_CALL_fast_pv();
621
    jump RET_LBL(stg_ap_pv) [R1];
622 623
}

624
stg_raiseIOzh (P_ exception)
625
{
626
    jump stg_raisezh (exception);
627
}