Commit 958faee6 authored by ian@well-typed.com's avatar ian@well-typed.com
Browse files

Merge branch 'master' of darcs.haskell.org:/srv/darcs//ghc

parents 53e9916f 4270d7e7
......@@ -56,7 +56,7 @@ assignArgumentsPos dflags off conv arg_ty reps = (stk_off, assignments)
(_, NativeReturn) -> getRegsWithNode dflags
-- GC calling convention *must* put values in registers
(_, GC) -> allRegs dflags
(_, Slow) -> noRegs
(_, Slow) -> nodeOnly
-- The calling conventions first assign arguments to registers,
-- then switch to the stack when we first run out of registers
-- (even if there are still available registers for args of a
......@@ -172,8 +172,8 @@ allRegs dflags = (allVanillaRegs dflags,
allLongRegs dflags,
allSseRegs dflags)
noRegs :: AvailRegs
noRegs = ([], [], [], [], [])
nodeOnly :: AvailRegs
nodeOnly = ([VanillaReg 1], [], [], [], [])
globalArgRegs :: DynFlags -> [GlobalReg]
globalArgRegs dflags = map ($ VGcPtr) (allVanillaRegs dflags) ++
......
......@@ -1086,7 +1086,7 @@ doJumpWithStack expr_code stk_code args_code = do
stk_args <- sequence stk_code
args <- sequence args_code
updfr_off <- getUpdFrameOff
emit (mkJumpExtra dflags expr args updfr_off stk_args)
emit (mkJumpExtra dflags NativeNodeCall expr args updfr_off stk_args)
doCall :: CmmParse CmmExpr -> [CmmParse LocalReg] -> [CmmParse CmmExpr]
-> CmmParse ()
......
......@@ -9,7 +9,7 @@ module MkGraph
, stackStubExpr
, mkNop, mkAssign, mkStore, mkUnsafeCall, mkFinalCall, mkCallReturnsTo
, mkJumpReturnsTo
, mkJump, mkJumpExtra, mkDirectJump, mkForeignJump, mkForeignJumpExtra
, mkJump, mkJumpExtra
, mkRawJump
, mkCbranch, mkSwitch
, mkReturn, mkComment, mkCallEntry, mkBranch
......@@ -188,10 +188,12 @@ mkStore :: CmmExpr -> CmmExpr -> CmmAGraph
mkStore l r = mkMiddle $ CmmStore l r
---------- Control transfer
mkJump :: DynFlags -> CmmExpr -> [CmmActual] -> UpdFrameOffset
mkJump :: DynFlags -> Convention -> CmmExpr
-> [CmmActual]
-> UpdFrameOffset
-> CmmAGraph
mkJump dflags e actuals updfr_off =
lastWithArgs dflags Jump Old NativeNodeCall actuals updfr_off $
mkJump dflags conv e actuals updfr_off =
lastWithArgs dflags Jump Old conv actuals updfr_off $
toCall e Nothing updfr_off 0
-- | A jump where the caller says what the live GlobalRegs are. Used
......@@ -203,28 +205,10 @@ mkRawJump dflags e updfr_off vols =
\arg_space _ -> toCall e Nothing updfr_off 0 arg_space vols
mkJumpExtra :: DynFlags -> CmmExpr -> [CmmActual] -> UpdFrameOffset
-> [CmmActual] -> CmmAGraph
mkJumpExtra dflags e actuals updfr_off extra_stack =
lastWithArgsAndExtraStack dflags Jump Old NativeNodeCall actuals updfr_off extra_stack $
toCall e Nothing updfr_off 0
mkDirectJump :: DynFlags -> CmmExpr -> [CmmActual] -> UpdFrameOffset
-> CmmAGraph
mkDirectJump dflags e actuals updfr_off =
lastWithArgs dflags Jump Old NativeDirectCall actuals updfr_off $
toCall e Nothing updfr_off 0
mkForeignJump :: DynFlags
-> Convention -> CmmExpr -> [CmmActual] -> UpdFrameOffset
-> CmmAGraph
mkForeignJump dflags conv e actuals updfr_off =
mkForeignJumpExtra dflags conv e actuals updfr_off noExtraStack
mkForeignJumpExtra :: DynFlags -> Convention -> CmmExpr -> [CmmActual]
mkJumpExtra :: DynFlags -> Convention -> CmmExpr -> [CmmActual]
-> UpdFrameOffset -> [CmmActual]
-> CmmAGraph
mkForeignJumpExtra dflags conv e actuals updfr_off extra_stack =
mkJumpExtra dflags conv e actuals updfr_off extra_stack =
lastWithArgsAndExtraStack dflags Jump Old conv actuals updfr_off extra_stack $
toCall e Nothing updfr_off 0
......
......@@ -450,7 +450,7 @@ closureCodeBody top_lvl bndr cl_info cc args arity body fv_details
; emitClosureProcAndInfoTable top_lvl bndr lf_info info_tbl args $
\(_offset, node, arg_regs) -> do
-- Emit slow-entry code (for entering a closure through a PAP)
{ mkSlowEntryCode cl_info arg_regs
{ mkSlowEntryCode bndr cl_info arg_regs
; dflags <- getDynFlags
; let lf_info = closureLFInfo cl_info
......@@ -494,21 +494,22 @@ load_fvs node lf_info = mapM_ (\ (reg, off) ->
--
-- The slow entry point is used for unknown calls: eg. stg_PAP_entry
mkSlowEntryCode :: ClosureInfo -> [LocalReg] -> FCode ()
mkSlowEntryCode :: Id -> ClosureInfo -> [LocalReg] -> FCode ()
-- If this function doesn't have a specialised ArgDescr, we need
-- to generate the function's arg bitmap and slow-entry code.
-- Here, we emit the slow-entry code.
mkSlowEntryCode cl_info arg_regs -- function closure is already in `Node'
mkSlowEntryCode bndr cl_info arg_regs -- function closure is already in `Node'
| Just (_, ArgGen _) <- closureFunInfo cl_info
= do dflags <- getDynFlags
let slow_lbl = closureSlowEntryLabel cl_info
let node = idToReg dflags (NonVoid bndr)
slow_lbl = closureSlowEntryLabel cl_info
fast_lbl = closureLocalEntryLabel dflags cl_info
-- mkDirectJump does not clobber `Node' containing function closure
jump = mkDirectJump dflags
(mkLblExpr fast_lbl)
(map (CmmReg . CmmLocal) arg_regs)
(initUpdFrameOff dflags)
emitProcWithConvention Slow Nothing slow_lbl arg_regs jump
jump = mkJump dflags NativeNodeCall
(mkLblExpr fast_lbl)
(map (CmmReg . CmmLocal) (node : arg_regs))
(initUpdFrameOff dflags)
emitProcWithConvention Slow Nothing slow_lbl (node : arg_regs) jump
| otherwise = return ()
-----------------------------------------
......@@ -728,7 +729,7 @@ link_caf node _is_upd = do
-- assuming lots of things, like the stack pointer hasn't
-- moved since we entered the CAF.
(let target = entryCode dflags (closureInfoPtr dflags (CmmReg (CmmLocal node))) in
mkJump dflags target [] updfr)
mkJump dflags NativeNodeCall target [] updfr)
; return hp_rel }
......
......@@ -685,8 +685,8 @@ emitEnter fun = do
-- test, just generating an enter.
Return _ -> do
{ let entry = entryCode dflags $ closureInfoPtr dflags $ CmmReg nodeReg
; emit $ mkForeignJump dflags NativeNodeCall entry
[cmmUntag dflags fun] updfr_off
; emit $ mkJump dflags NativeNodeCall entry
[cmmUntag dflags fun] updfr_off
; return AssignedDirectly
}
......
......@@ -366,21 +366,17 @@ entryHeapCheck' is_fastf node arity args code
Function (fast): call (NativeNode) stg_gc_fun(fun, args)
Function (slow): R1 = fun
call (slow) stg_gc_fun(args)
XXX: this is a bit naughty, we should really pass R1 as an
argument and use a special calling convention.
Function (slow): call (slow) stg_gc_fun(fun, args)
-}
gc_call upd
| is_thunk
= mkJump dflags stg_gc_enter1 [node] upd
= mkJump dflags NativeNodeCall stg_gc_enter1 [node] upd
| is_fastf
= mkJump dflags stg_gc_fun (node : args') upd
= mkJump dflags NativeNodeCall stg_gc_fun (node : args') upd
| otherwise
= mkAssign nodeReg node <*>
mkForeignJump dflags Slow stg_gc_fun args' upd
= mkJump dflags Slow stg_gc_fun (node : args') upd
updfr_sz <- getUpdFrameOff
......
......@@ -121,7 +121,7 @@ emitCallWithExtraStack (callConv, retConv) fun args extra_stack
; updfr_off <- getUpdFrameOff
; case sequel of
Return _ -> do
emit $ mkForeignJumpExtra dflags callConv fun args updfr_off extra_stack
emit $ mkJumpExtra dflags callConv fun args updfr_off extra_stack
return AssignedDirectly
AssignTo res_regs _ -> do
k <- newLabelC
......
......@@ -885,9 +885,9 @@ INFO_TABLE_RET(stg_catch_stm_frame, CATCH_STM_FRAME,
stg_atomicallyzh (P_ stm)
{
W_ old_trec;
W_ new_trec;
W_ code, next_invariant, frame_result;
P_ old_trec;
P_ new_trec;
P_ code, next_invariant, frame_result;
// stmStartTransaction may allocate
MAYBE_GC_P(stg_atomicallyzh, stm);
......
......@@ -24,10 +24,11 @@
* to the scheduler marking the thread as finished.
*/
#define CHECK_SENSIBLE_REGS() \
ASSERT(Hp != 0); \
ASSERT(Sp != 0); \
ASSERT(SpLim != 0); \
#define CHECK_SENSIBLE_REGS() \
ASSERT(Hp != 0); \
ASSERT(HpAlloc == 0); \
ASSERT(Sp != 0); \
ASSERT(SpLim != 0); \
ASSERT(SpLim - WDS(RESERVED_STACK_WORDS) <= Sp);
/* -----------------------------------------------------------------------------
......
......@@ -29,6 +29,8 @@ INFO_TABLE_RET ( stg_upd_frame, UPDATE_FRAME,
UPDATE_FRAME_FIELDS(W_,P_,info_ptr,_ccs,_unused,updatee) )
return (P_ ret) /* the closure being returned */
{
ASSERT(HpAlloc == 0); // Note [HpAlloc]
/* ToDo: it might be a PAP, so we should check... */
TICK_UPD_CON_IN_NEW(sizeW_fromITBL(%GET_STD_INFO(updatee)));
......@@ -45,7 +47,9 @@ INFO_TABLE_RET ( stg_marked_upd_frame, UPDATE_FRAME,
UPDATE_FRAME_FIELDS(W_,P_,info_ptr,_ccs,_unused,updatee) )
return (P_ ret) /* the closure being returned */
{
W_ v, i, tso, link;
W_ v;
ASSERT(HpAlloc == 0); // Note [HpAlloc]
// we know the closure is a BLACKHOLE
v = StgInd_indirectee(updatee);
......@@ -85,3 +89,15 @@ INFO_TABLE_RET ( stg_bh_upd_frame, UPDATE_FRAME,
( UPDATE_FRAME_FIELDS(,,info_ptr,ccs,_unused,updatee) )
(ret);
}
/* Note [HpAlloc]
*
* HpAlloc is required to be zero unless we just bumped Hp and failed
* the heap check: see HeapStackCheck.cmm. Failures that result from
* HpAlloc being non-zero are very hard to track down, because they
* manifest as spurious heap corruption that happens only with +RTS
* -N2 or greater (because then we have a lot more
* interruptCapability() calls happening). Hence, we assert
* HpAlloc==0 as often as possible, and in the update code is a good
* place to do that.
*/
......@@ -499,6 +499,9 @@ checkSTACK (StgStack *stack)
void
checkTSO(StgTSO *tso)
{
StgTSO *next;
const StgInfoTable *info;
if (tso->what_next == ThreadKilled) {
/* The garbage collector doesn't bother following any pointers
* from dead threads, so don't check sanity here.
......@@ -506,9 +509,13 @@ checkTSO(StgTSO *tso)
return;
}
ASSERT(tso->_link == END_TSO_QUEUE ||
tso->_link->header.info == &stg_MVAR_TSO_QUEUE_info ||
tso->_link->header.info == &stg_TSO_info);
next = tso->_link;
info = (const StgInfoTable*) tso->header.info;
ASSERT(next == END_TSO_QUEUE ||
info == &stg_MVAR_TSO_QUEUE_info ||
info == &stg_TSO_info ||
info == &stg_WHITEHOLE_info); // happens due to STM doing lockTSO()
if ( tso->why_blocked == BlockedOnMVar
|| tso->why_blocked == BlockedOnBlackHole
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment