Commit d70b19bf authored by Simon Marlow's avatar Simon Marlow

Per-thread allocation counters and limits

This reverts commit f0fcc41d.

New changes: now works on 32-bit platforms too.  I added some basic
support for 64-bit subtraction and comparison operations to the x86
NCG.
parent c774b28f
...@@ -992,9 +992,12 @@ lowerSafeForeignCall dflags block ...@@ -992,9 +992,12 @@ lowerSafeForeignCall dflags block
id <- newTemp (bWord dflags) id <- newTemp (bWord dflags)
new_base <- newTemp (cmmRegType dflags (CmmGlobal BaseReg)) new_base <- newTemp (cmmRegType dflags (CmmGlobal BaseReg))
let (caller_save, caller_load) = callerSaveVolatileRegs dflags let (caller_save, caller_load) = callerSaveVolatileRegs dflags
load_tso <- newTemp (gcWord dflags)
load_stack <- newTemp (gcWord dflags) load_stack <- newTemp (gcWord dflags)
let suspend = saveThreadState dflags <*> tso <- newTemp (gcWord dflags)
cn <- newTemp (bWord dflags)
bdfree <- newTemp (bWord dflags)
bdstart <- newTemp (bWord dflags)
let suspend = saveThreadState dflags tso cn <*>
caller_save <*> caller_save <*>
mkMiddle (callSuspendThread dflags id intrbl) mkMiddle (callSuspendThread dflags id intrbl)
midCall = mkUnsafeCall tgt res args midCall = mkUnsafeCall tgt res args
...@@ -1003,7 +1006,7 @@ lowerSafeForeignCall dflags block ...@@ -1003,7 +1006,7 @@ lowerSafeForeignCall dflags block
-- might now have a different Capability! -- might now have a different Capability!
mkAssign (CmmGlobal BaseReg) (CmmReg (CmmLocal new_base)) <*> mkAssign (CmmGlobal BaseReg) (CmmReg (CmmLocal new_base)) <*>
caller_load <*> caller_load <*>
loadThreadState dflags load_tso load_stack loadThreadState dflags tso load_stack cn bdfree bdstart
(_, regs, copyout) = (_, regs, copyout) =
copyOutOflow dflags NativeReturn Jump (Young succ) copyOutOflow dflags NativeReturn Jump (Young succ)
......
...@@ -3,7 +3,7 @@ ...@@ -3,7 +3,7 @@
module CmmMachOp module CmmMachOp
( MachOp(..) ( MachOp(..)
, pprMachOp, isCommutableMachOp, isAssociativeMachOp , pprMachOp, isCommutableMachOp, isAssociativeMachOp
, isComparisonMachOp, machOpResultType , isComparisonMachOp, maybeIntComparison, machOpResultType
, machOpArgReps, maybeInvertComparison , machOpArgReps, maybeInvertComparison
-- MachOp builders -- MachOp builders
...@@ -11,9 +11,11 @@ module CmmMachOp ...@@ -11,9 +11,11 @@ module CmmMachOp
, mo_wordSRem, mo_wordSNeg, mo_wordUQuot, mo_wordURem , mo_wordSRem, mo_wordSNeg, mo_wordUQuot, mo_wordURem
, mo_wordSGe, mo_wordSLe, mo_wordSGt, mo_wordSLt, mo_wordUGe , mo_wordSGe, mo_wordSLe, mo_wordSGt, mo_wordSLt, mo_wordUGe
, mo_wordULe, mo_wordUGt, mo_wordULt , mo_wordULe, mo_wordUGt, mo_wordULt
, mo_wordAnd, mo_wordOr, mo_wordXor, mo_wordNot, mo_wordShl, mo_wordSShr, mo_wordUShr , mo_wordAnd, mo_wordOr, mo_wordXor, mo_wordNot
, mo_wordShl, mo_wordSShr, mo_wordUShr
, mo_u_8To32, mo_s_8To32, mo_u_16To32, mo_s_16To32 , mo_u_8To32, mo_s_8To32, mo_u_16To32, mo_s_16To32
, mo_u_8ToWord, mo_s_8ToWord, mo_u_16ToWord, mo_s_16ToWord, mo_u_32ToWord, mo_s_32ToWord , mo_u_8ToWord, mo_s_8ToWord, mo_u_16ToWord, mo_s_16ToWord
, mo_u_32ToWord, mo_s_32ToWord
, mo_32To8, mo_32To16, mo_WordTo8, mo_WordTo16, mo_WordTo32, mo_WordTo64 , mo_32To8, mo_32To16, mo_WordTo8, mo_WordTo16, mo_WordTo32, mo_WordTo64
-- CallishMachOp -- CallishMachOp
...@@ -260,6 +262,7 @@ isAssociativeMachOp mop = ...@@ -260,6 +262,7 @@ isAssociativeMachOp mop =
MO_Xor {} -> True MO_Xor {} -> True
_other -> False _other -> False
-- ---------------------------------------------------------------------------- -- ----------------------------------------------------------------------------
-- isComparisonMachOp -- isComparisonMachOp
...@@ -290,6 +293,25 @@ isComparisonMachOp mop = ...@@ -290,6 +293,25 @@ isComparisonMachOp mop =
MO_F_Lt {} -> True MO_F_Lt {} -> True
_other -> False _other -> False
{- |
Returns @Just w@ if the operation is an integer comparison with width
@w@, or @Nothing@ otherwise.
-}
maybeIntComparison :: MachOp -> Maybe Width
maybeIntComparison mop =
case mop of
MO_Eq w -> Just w
MO_Ne w -> Just w
MO_S_Ge w -> Just w
MO_S_Le w -> Just w
MO_S_Gt w -> Just w
MO_S_Lt w -> Just w
MO_U_Ge w -> Just w
MO_U_Le w -> Just w
MO_U_Gt w -> Just w
MO_U_Lt w -> Just w
_ -> Nothing
-- ----------------------------------------------------------------------------- -- -----------------------------------------------------------------------------
-- Inverting conditions -- Inverting conditions
......
This diff is collapsed.
...@@ -391,6 +391,21 @@ iselExpr64 (CmmMachOp (MO_Add _) [e1,e2]) = do ...@@ -391,6 +391,21 @@ iselExpr64 (CmmMachOp (MO_Add _) [e1,e2]) = do
ADC II32 (OpReg r2hi) (OpReg rhi) ] ADC II32 (OpReg r2hi) (OpReg rhi) ]
return (ChildCode64 code rlo) return (ChildCode64 code rlo)
iselExpr64 (CmmMachOp (MO_Sub _) [e1,e2]) = do
ChildCode64 code1 r1lo <- iselExpr64 e1
ChildCode64 code2 r2lo <- iselExpr64 e2
(rlo,rhi) <- getNewRegPairNat II32
let
r1hi = getHiVRegFromLo r1lo
r2hi = getHiVRegFromLo r2lo
code = code1 `appOL`
code2 `appOL`
toOL [ MOV II32 (OpReg r1lo) (OpReg rlo),
SUB II32 (OpReg r2lo) (OpReg rlo),
MOV II32 (OpReg r1hi) (OpReg rhi),
SBB II32 (OpReg r2hi) (OpReg rhi) ]
return (ChildCode64 code rlo)
iselExpr64 (CmmMachOp (MO_UU_Conv _ W64) [expr]) = do iselExpr64 (CmmMachOp (MO_UU_Conv _ W64) [expr]) = do
fn <- getAnyReg expr fn <- getAnyReg expr
r_dst_lo <- getNewRegNat II32 r_dst_lo <- getNewRegNat II32
...@@ -1272,24 +1287,23 @@ getCondCode (CmmMachOp mop [x, y]) ...@@ -1272,24 +1287,23 @@ getCondCode (CmmMachOp mop [x, y])
MO_F_Lt W64 -> condFltCode LTT x y MO_F_Lt W64 -> condFltCode LTT x y
MO_F_Le W64 -> condFltCode LE x y MO_F_Le W64 -> condFltCode LE x y
MO_Eq _ -> condIntCode EQQ x y _ -> condIntCode (machOpToCond mop) x y
MO_Ne _ -> condIntCode NE x y
MO_S_Gt _ -> condIntCode GTT x y
MO_S_Ge _ -> condIntCode GE x y
MO_S_Lt _ -> condIntCode LTT x y
MO_S_Le _ -> condIntCode LE x y
MO_U_Gt _ -> condIntCode GU x y
MO_U_Ge _ -> condIntCode GEU x y
MO_U_Lt _ -> condIntCode LU x y
MO_U_Le _ -> condIntCode LEU x y
_other -> pprPanic "getCondCode(x86,x86_64)" (ppr (CmmMachOp mop [x,y]))
getCondCode other = pprPanic "getCondCode(2)(x86,x86_64)" (ppr other) getCondCode other = pprPanic "getCondCode(2)(x86,x86_64)" (ppr other)
machOpToCond :: MachOp -> Cond
machOpToCond mo = case mo of
MO_Eq _ -> EQQ
MO_Ne _ -> NE
MO_S_Gt _ -> GTT
MO_S_Ge _ -> GE
MO_S_Lt _ -> LTT
MO_S_Le _ -> LE
MO_U_Gt _ -> GU
MO_U_Ge _ -> GEU
MO_U_Lt _ -> LU
MO_U_Le _ -> LEU
_other -> pprPanic "machOpToCond" (pprMachOp mo)
-- @cond(Int|Flt)Code@: Turn a boolean expression into a condition, to be -- @cond(Int|Flt)Code@: Turn a boolean expression into a condition, to be
...@@ -1538,7 +1552,31 @@ genCondJump ...@@ -1538,7 +1552,31 @@ genCondJump
-> CmmExpr -- the condition on which to branch -> CmmExpr -- the condition on which to branch
-> NatM InstrBlock -> NatM InstrBlock
genCondJump id bool = do genCondJump id expr = do
is32Bit <- is32BitPlatform
genCondJump' is32Bit id expr
genCondJump' :: Bool -> BlockId -> CmmExpr -> NatM InstrBlock
-- 64-bit integer comparisons on 32-bit
genCondJump' is32Bit true (CmmMachOp mop [e1,e2])
| is32Bit, Just W64 <- maybeIntComparison mop = do
ChildCode64 code1 r1_lo <- iselExpr64 e1
ChildCode64 code2 r2_lo <- iselExpr64 e2
let r1_hi = getHiVRegFromLo r1_lo
r2_hi = getHiVRegFromLo r2_lo
cond = machOpToCond mop
Just cond' = maybeFlipCond cond
false <- getBlockIdNat
return $ code1 `appOL` code2 `appOL` toOL [
CMP II32 (OpReg r2_hi) (OpReg r1_hi),
JXX cond true,
JXX cond' false,
CMP II32 (OpReg r2_lo) (OpReg r1_lo),
JXX cond true,
NEWBLOCK false ]
genCondJump' _ id bool = do
CondCode is_float cond cond_code <- getCondCode bool CondCode is_float cond cond_code <- getCondCode bool
use_sse2 <- sse2Enabled use_sse2 <- sse2Enabled
if not is_float || not use_sse2 if not is_float || not use_sse2
...@@ -1569,7 +1607,6 @@ genCondJump id bool = do ...@@ -1569,7 +1607,6 @@ genCondJump id bool = do
] ]
return (cond_code `appOL` code) return (cond_code `appOL` code)
-- ----------------------------------------------------------------------------- -- -----------------------------------------------------------------------------
-- Generating C calls -- Generating C calls
......
...@@ -196,6 +196,7 @@ data Instr ...@@ -196,6 +196,7 @@ data Instr
| ADD Size Operand Operand | ADD Size Operand Operand
| ADC Size Operand Operand | ADC Size Operand Operand
| SUB Size Operand Operand | SUB Size Operand Operand
| SBB Size Operand Operand
| MUL Size Operand Operand | MUL Size Operand Operand
| MUL2 Size Operand -- %edx:%eax = operand * %rax | MUL2 Size Operand -- %edx:%eax = operand * %rax
...@@ -365,6 +366,7 @@ x86_regUsageOfInstr platform instr ...@@ -365,6 +366,7 @@ x86_regUsageOfInstr platform instr
ADD _ src dst -> usageRM src dst ADD _ src dst -> usageRM src dst
ADC _ src dst -> usageRM src dst ADC _ src dst -> usageRM src dst
SUB _ src dst -> usageRM src dst SUB _ src dst -> usageRM src dst
SBB _ src dst -> usageRM src dst
IMUL _ src dst -> usageRM src dst IMUL _ src dst -> usageRM src dst
IMUL2 _ src -> mkRU (eax:use_R src []) [eax,edx] IMUL2 _ src -> mkRU (eax:use_R src []) [eax,edx]
MUL _ src dst -> usageRM src dst MUL _ src dst -> usageRM src dst
...@@ -543,6 +545,7 @@ x86_patchRegsOfInstr instr env ...@@ -543,6 +545,7 @@ x86_patchRegsOfInstr instr env
ADD sz src dst -> patch2 (ADD sz) src dst ADD sz src dst -> patch2 (ADD sz) src dst
ADC sz src dst -> patch2 (ADC sz) src dst ADC sz src dst -> patch2 (ADC sz) src dst
SUB sz src dst -> patch2 (SUB sz) src dst SUB sz src dst -> patch2 (SUB sz) src dst
SBB sz src dst -> patch2 (SBB sz) src dst
IMUL sz src dst -> patch2 (IMUL sz) src dst IMUL sz src dst -> patch2 (IMUL sz) src dst
IMUL2 sz src -> patch1 (IMUL2 sz) src IMUL2 sz src -> patch1 (IMUL2 sz) src
MUL sz src dst -> patch2 (MUL sz) src dst MUL sz src dst -> patch2 (MUL sz) src dst
......
...@@ -570,11 +570,10 @@ pprInstr (ADD size (OpImm (ImmInt (-1))) dst) ...@@ -570,11 +570,10 @@ pprInstr (ADD size (OpImm (ImmInt (-1))) dst)
= pprSizeOp (sLit "dec") size dst = pprSizeOp (sLit "dec") size dst
pprInstr (ADD size (OpImm (ImmInt 1)) dst) pprInstr (ADD size (OpImm (ImmInt 1)) dst)
= pprSizeOp (sLit "inc") size dst = pprSizeOp (sLit "inc") size dst
pprInstr (ADD size src dst) pprInstr (ADD size src dst) = pprSizeOpOp (sLit "add") size src dst
= pprSizeOpOp (sLit "add") size src dst pprInstr (ADC size src dst) = pprSizeOpOp (sLit "adc") size src dst
pprInstr (ADC size src dst)
= pprSizeOpOp (sLit "adc") size src dst
pprInstr (SUB size src dst) = pprSizeOpOp (sLit "sub") size src dst pprInstr (SUB size src dst) = pprSizeOpOp (sLit "sub") size src dst
pprInstr (SBB size src dst) = pprSizeOpOp (sLit "sbb") size src dst
pprInstr (IMUL size op1 op2) = pprSizeOpOp (sLit "imul") size op1 op2 pprInstr (IMUL size op1 op2) = pprSizeOpOp (sLit "imul") size op1 op2
pprInstr (ADD_CC size src dst) pprInstr (ADD_CC size src dst)
......
...@@ -276,6 +276,12 @@ ...@@ -276,6 +276,12 @@
*/ */
#define TSO_SQUEEZED 128 #define TSO_SQUEEZED 128
/*
* Enables the AllocationLimitExceeded exception when the thread's
* allocation limit goes negative.
*/
#define TSO_ALLOC_LIMIT 256
/* /*
* The number of times we spin in a spin lock before yielding (see * The number of times we spin in a spin lock before yielding (see
* #3758). To tune this value, use the benchmark in #3758: run the * #3758). To tune this value, use the benchmark in #3758: run the
......
...@@ -56,6 +56,14 @@ struct GC_FLAGS { ...@@ -56,6 +56,14 @@ struct GC_FLAGS {
rtsBool doIdleGC; rtsBool doIdleGC;
StgWord heapBase; /* address to ask the OS for memory */ StgWord heapBase; /* address to ask the OS for memory */
StgWord allocLimitGrace; /* units: *blocks*
* After an AllocationLimitExceeded
* exception has been raised, how much
* extra space is given to the thread
* to handle the exception before we
* raise it again.
*/
}; };
struct DEBUG_FLAGS { struct DEBUG_FLAGS {
......
...@@ -42,8 +42,12 @@ StgRegTable * resumeThread (void *); ...@@ -42,8 +42,12 @@ StgRegTable * resumeThread (void *);
// //
// Thread operations from Threads.c // Thread operations from Threads.c
// //
int cmp_thread (StgPtr tso1, StgPtr tso2); int cmp_thread (StgPtr tso1, StgPtr tso2);
int rts_getThreadId (StgPtr tso); int rts_getThreadId (StgPtr tso);
HsInt64 rts_getThreadAllocationCounter (StgPtr tso);
void rts_setThreadAllocationCounter (StgPtr tso, HsInt64 i);
void rts_enableThreadAllocationLimit (StgPtr tso);
void rts_disableThreadAllocationLimit (StgPtr tso);
#if !defined(mingw32_HOST_OS) #if !defined(mingw32_HOST_OS)
pid_t forkProcess (HsStablePtr *entry); pid_t forkProcess (HsStablePtr *entry);
......
...@@ -145,15 +145,18 @@ typedef struct StgTSO_ { ...@@ -145,15 +145,18 @@ typedef struct StgTSO_ {
*/ */
struct StgBlockingQueue_ *bq; struct StgBlockingQueue_ *bq;
#ifdef TICKY_TICKY /*
/* TICKY-specific stuff would go here. */ * The allocation limit for this thread, which is updated as the
#endif * thread allocates. If the value drops below zero, and
#ifdef PROFILING * TSO_ALLOC_LIMIT is set in flags, we raise an exception in the
StgTSOProfInfo prof; * thread, and give the thread a little more space to handle the
#endif * exception before we raise the exception again.
#ifdef mingw32_HOST_OS *
StgWord32 saved_winerror; * This is an integer, because we might update it in a place where
#endif * it isn't convenient to raise the exception, so we want it to
* stay negative until we get around to checking it.
*/
StgInt64 alloc_limit; /* in bytes */
/* /*
* sum of the sizes of all stack chunks (in words), used to decide * sum of the sizes of all stack chunks (in words), used to decide
...@@ -168,6 +171,16 @@ typedef struct StgTSO_ { ...@@ -168,6 +171,16 @@ typedef struct StgTSO_ {
*/ */
StgWord32 tot_stack_size; StgWord32 tot_stack_size;
#ifdef TICKY_TICKY
/* TICKY-specific stuff would go here. */
#endif
#ifdef PROFILING
StgTSOProfInfo prof;
#endif
#ifdef mingw32_HOST_OS
StgWord32 saved_winerror;
#endif
} *StgTSOPtr; } *StgTSOPtr;
typedef struct StgStack_ { typedef struct StgStack_ {
......
...@@ -48,6 +48,7 @@ module Control.Exception ( ...@@ -48,6 +48,7 @@ module Control.Exception (
NestedAtomically(..), NestedAtomically(..),
BlockedIndefinitelyOnMVar(..), BlockedIndefinitelyOnMVar(..),
BlockedIndefinitelyOnSTM(..), BlockedIndefinitelyOnSTM(..),
AllocationLimitExceeded(..),
Deadlock(..), Deadlock(..),
NoMethodError(..), NoMethodError(..),
PatternMatchFail(..), PatternMatchFail(..),
......
...@@ -31,6 +31,7 @@ module Control.Exception.Base ( ...@@ -31,6 +31,7 @@ module Control.Exception.Base (
NestedAtomically(..), NestedAtomically(..),
BlockedIndefinitelyOnMVar(..), BlockedIndefinitelyOnMVar(..),
BlockedIndefinitelyOnSTM(..), BlockedIndefinitelyOnSTM(..),
AllocationLimitExceeded(..),
Deadlock(..), Deadlock(..),
NoMethodError(..), NoMethodError(..),
PatternMatchFail(..), PatternMatchFail(..),
......
...@@ -59,6 +59,12 @@ module GHC.Conc ...@@ -59,6 +59,12 @@ module GHC.Conc
, threadWaitWriteSTM , threadWaitWriteSTM
, closeFdWith , closeFdWith
-- * Allocation counter and limit
, setAllocationCounter
, getAllocationCounter
, enableAllocationLimit
, disableAllocationLimit
-- * TVars -- * TVars
, STM(..) , STM(..)
, atomically , atomically
......
...@@ -60,6 +60,12 @@ module GHC.Conc.Sync ...@@ -60,6 +60,12 @@ module GHC.Conc.Sync
, threadStatus , threadStatus
, threadCapability , threadCapability
-- * Allocation counter and quota
, setAllocationCounter
, getAllocationCounter
, enableAllocationLimit
, disableAllocationLimit
-- * TVars -- * TVars
, STM(..) , STM(..)
, atomically , atomically
...@@ -171,16 +177,92 @@ instance Eq ThreadId where ...@@ -171,16 +177,92 @@ instance Eq ThreadId where
instance Ord ThreadId where instance Ord ThreadId where
compare = cmpThread compare = cmpThread
-- | Every thread has an allocation counter that tracks how much
-- memory has been allocated by the thread. The counter is
-- initialized to zero, and 'setAllocationCounter' sets the current
-- value. The allocation counter counts *down*, so in the absence of
-- a call to 'setAllocationCounter' its value is the negation of the
-- number of bytes of memory allocated by the thread.
--
-- There are two things that you can do with this counter:
--
-- * Use it as a simple profiling mechanism, with
-- 'getAllocationCounter'.
--
-- * Use it as a resource limit. See 'enableAllocationLimit'.
--
-- Allocation accounting is accurate only to about 4Kbytes.
--
setAllocationCounter :: Int64 -> IO ()
setAllocationCounter i = do
ThreadId t <- myThreadId
rts_setThreadAllocationCounter t i
-- | Return the current value of the allocation counter for the
-- current thread.
getAllocationCounter :: IO Int64
getAllocationCounter = do
ThreadId t <- myThreadId
rts_getThreadAllocationCounter t
-- | Enables the allocation counter to be treated as a limit for the
-- current thread. When the allocation limit is enabled, if the
-- allocation counter counts down below zero, the thread will be sent
-- the 'AllocationLimitExceeded' asynchronous exception. When this
-- happens, the counter is reinitialised (by default
-- to 100K, but tunable with the @+RTS -xq@ option) so that it can handle
-- the exception and perform any necessary clean up. If it exhausts
-- this additional allowance, another 'AllocationLimitExceeded' exception
-- is sent, and so forth.
--
-- Note that memory allocation is unrelated to /live memory/, also
-- known as /heap residency/. A thread can allocate a large amount of
-- memory and retain anything between none and all of it. It is
-- better to think of the allocation limit as a limit on
-- /CPU time/, rather than a limit on memory.
--
-- Compared to using timeouts, allocation limits don't count time
-- spent blocked or in foreign calls.
--
enableAllocationLimit :: IO ()
enableAllocationLimit = do
ThreadId t <- myThreadId
rts_enableThreadAllocationLimit t
-- | Disable allocation limit processing for the current thread.
disableAllocationLimit :: IO ()
disableAllocationLimit = do
ThreadId t <- myThreadId
rts_disableThreadAllocationLimit t
-- We cannot do these operations safely on another thread, because on
-- a 32-bit machine we cannot do atomic operations on a 64-bit value.
-- Therefore, we only expose APIs that allow getting and setting the
-- limit of the current thread.
foreign import ccall unsafe "rts_setThreadAllocationCounter"
rts_setThreadAllocationCounter :: ThreadId# -> Int64 -> IO ()
foreign import ccall unsafe "rts_getThreadAllocationCounter"
rts_getThreadAllocationCounter :: ThreadId# -> IO Int64
foreign import ccall unsafe "rts_enableThreadAllocationLimit"
rts_enableThreadAllocationLimit :: ThreadId# -> IO ()
foreign import ccall unsafe "rts_disableThreadAllocationLimit"
rts_disableThreadAllocationLimit :: ThreadId# -> IO ()
{- | {- |
Sparks off a new thread to run the 'IO' computation passed as the Creates a new thread to run the 'IO' computation passed as the
first argument, and returns the 'ThreadId' of the newly created first argument, and returns the 'ThreadId' of the newly created
thread. thread.
The new thread will be a lightweight thread; if you want to use a foreign The new thread will be a lightweight, /unbound/ thread. Foreign calls
library that uses thread-local storage, use 'Control.Concurrent.forkOS' instead. made by this thread are not guaranteed to be made by any particular OS
thread; if you need foreign calls to be made by a particular OS
thread, then use 'Control.Concurrent.forkOS' instead.
GHC note: the new thread inherits the /masked/ state of the parent The new thread inherits the /masked/ state of the parent (see
(see 'Control.Exception.mask'). 'Control.Exception.mask').
The newly created thread has an exception handler that discards the The newly created thread has an exception handler that discards the
exceptions 'BlockedIndefinitelyOnMVar', 'BlockedIndefinitelyOnSTM', and exceptions 'BlockedIndefinitelyOnMVar', 'BlockedIndefinitelyOnSTM', and
......
...@@ -22,6 +22,7 @@ module GHC.IO.Exception ( ...@@ -22,6 +22,7 @@ module GHC.IO.Exception (
BlockedIndefinitelyOnMVar(..), blockedIndefinitelyOnMVar, BlockedIndefinitelyOnMVar(..), blockedIndefinitelyOnMVar,
BlockedIndefinitelyOnSTM(..), blockedIndefinitelyOnSTM, BlockedIndefinitelyOnSTM(..), blockedIndefinitelyOnSTM,
Deadlock(..), Deadlock(..),
AllocationLimitExceeded(..), allocationLimitExceeded,
AssertionFailed(..), AssertionFailed(..),
SomeAsyncException(..), SomeAsyncException(..),
...@@ -98,6 +99,23 @@ instance Show Deadlock where ...@@ -98,6 +99,23 @@ instance Show Deadlock where
----- -----
-- |This thread has exceeded its allocation limit. See
-- 'GHC.Conc.setAllocationCounter' and
-- 'GHC.Conc.enableAllocationLimit'.
data AllocationLimitExceeded = AllocationLimitExceeded
deriving Typeable
instance Exception AllocationLimitExceeded
instance Show AllocationLimitExceeded where
showsPrec _ AllocationLimitExceeded =
showString "allocation limit exceeded"
allocationLimitExceeded :: SomeException -- for the RTS
allocationLimitExceeded = toException AllocationLimitExceeded
-----
-- |'assert' was applied to 'False'. -- |'assert' was applied to 'False'.
data AssertionFailed = AssertionFailed String data AssertionFailed = AssertionFailed String
deriving Typeable deriving Typeable
...@@ -174,7 +192,8 @@ data ArrayException ...@@ -174,7 +192,8 @@ data ArrayException
instance Exception ArrayException instance Exception ArrayException
stackOverflow, heapOverflow :: SomeException -- for the RTS -- for the RTS
stackOverflow, heapOverflow :: SomeException
stackOverflow = toException StackOverflow stackOverflow = toException StackOverflow
heapOverflow = toException HeapOverflow heapOverflow = toException HeapOverflow
......
...@@ -297,6 +297,10 @@ initCapability( Capability *cap, nat i ) ...@@ -297,6 +297,10 @@ initCapability( Capability *cap, nat i )
cap->r.rCCCS = NULL; cap->r.rCCCS = NULL;
#endif #endif
// cap->r.rCurrentTSO is charged for calls to allocate(), so we
// don't want it set when not running a Haskell thread.
cap->r.rCurrentTSO = NULL;
traceCapCreate(cap); traceCapCreate(cap);
traceCapsetAssignCap(CAPSET_OSPROCESS_DEFAULT, i); traceCapsetAssignCap(CAPSET_OSPROCESS_DEFAULT, i);
traceCapsetAssignCap(CAPSET_CLOCKDOMAIN_DEFAULT, i); traceCapsetAssignCap(CAPSET_CLOCKDOMAIN_DEFAULT, i);
......
...@@ -100,7 +100,9 @@ stg_gc_noregs ...@@ -100,7 +100,9 @@ stg_gc_noregs
CurrentNursery = bdescr_link(CurrentNursery); CurrentNursery = bdescr_link(CurrentNursery);
OPEN_NURSERY(); OPEN_NURSERY();
if (Capability_context_switch(MyCapability()) != 0 :: CInt || if (Capability_context_switch(MyCapability()) != 0 :: CInt ||
Capability_interrupt(MyCapability()) != 0 :: CInt) { Capability_interrupt(MyCapability()) != 0 :: CInt ||
(StgTSO_alloc_limit(CurrentTSO) `lt` (0::I64) &&
(TO_W_(StgTSO_flags(CurrentTSO)) & TSO_ALLOC_LIMIT) != 0)) {
ret = ThreadYielding; ret = ThreadYielding;
goto sched; goto sched;
} else { } else {
......
...@@ -1264,6 +1264,10 @@ typedef struct _RtsSymbolVal { ...@@ -1264,6 +1264,10 @@ typedef struct _RtsSymbolVal {
SymI_HasProto(rtsSupportsBoundThreads) \ SymI_HasProto(rtsSupportsBoundThreads) \
SymI_HasProto(rts_isProfiled) \ SymI_HasProto(rts_isProfiled) \
SymI_HasProto(rts_isDynamic) \ SymI_HasProto(rts_isDynamic) \
SymI_HasProto(rts_getThreadAllocationCounter) \
SymI_HasProto(rts_setThreadAllocationCounter) \
SymI_HasProto(rts_enableThreadAllocationLimit) \
SymI_HasProto(rts_disableThreadAllocationLimit) \
SymI_HasProto(setProgArgv) \ SymI_HasProto(setProgArgv) \
SymI_HasProto(startupHaskell) \ SymI_HasProto(startupHaskell) \
SymI_HasProto(shutdownHaskell) \ SymI_HasProto(shutdownHaskell) \
......
...@@ -37,6 +37,7 @@ extern StgClosure ZCMain_main_closure; ...@@ -37,6 +37,7 @@ extern StgClosure ZCMain_main_closure;
PRELUDE_CLOSURE(base_GHCziIOziException_stackOverflow_closure); PRELUDE_CLOSURE(base_GHCziIOziException_stackOverflow_closure);
PRELUDE_CLOSURE(base_GHCziIOziException_heapOverflow_closure); PRELUDE_CLOSURE(base_GHCziIOziException_heapOverflow_closure);
PRELUDE_CLOSURE(base_GHCziIOziException_allocationLimitExceeded_closure);
PRELUDE_CLOSURE(base_GHCziIOziException_blockedIndefinitelyOnThrowTo_closure); PRELUDE_CLOSURE(base_GHCziIOziException_blockedIndefinitelyOnThrowTo_closure);
PRELUDE_CLOSURE(base_GHCziIOziException_blockedIndefinitelyOnMVar_closure); PRELUDE_CLOSURE(base_GHCziIOziException_blockedIndefinitelyOnMVar_closure);
PRELUDE_CLOSURE(base_GHCziIOziException_blockedIndefinitelyOnSTM_closure); PRELUDE_CLOSURE(base_GHCziIOziException_blockedIndefinitelyOnSTM_closure);
...@@ -101,6 +102,7 @@ PRELUDE_INFO(base_GHCziStable_StablePtr_con_info); ...@@ -101,6 +102,7 @@ PRELUDE_INFO(base_GHCziStable_StablePtr_con_info);
#define stackOverflow_closure DLL_IMPORT_DATA_REF(base_GHCziIOziException_stackOverflow_closure) #define stackOverflow_closure DLL_IMPORT_DATA_REF(base_GHCziIOziException_stackOverflow_closure)
#define heapOverflow_closure DLL_IMPORT_DATA_REF(base_GHCziIOziException_heapOverflow_closure) #define heapOverflow_closure DLL_IMPORT_DATA_REF(base_GHCziIOziException_heapOverflow_closure)
#define allocationLimitExceeded_closure DLL_IMPORT_DATA_REF(base_GHCziIOziException_allocationLimitExceeded_closure)
#define blockedIndefinitelyOnMVar_closure DLL_IMPORT_DATA_REF(base_GHCziIOziException_blockedIndefinitelyOnMVar_closure) #define blockedIndefinitelyOnMVar_closure DLL_IMPORT_DATA_REF(base_GHCziIOziException_blockedIndefinitelyOnMVar_closure)
#define blockedIndefinitelyOnSTM_closure DLL_IMPORT_DATA_REF(base_GHCziIOziException_blockedIndefinitelyOnSTM_closure) #define blockedIndefinitelyOnSTM_closure DLL_IMPORT_DATA_REF(base_GHCziIOziException_blockedIndefinitelyOnSTM_closure)
#define nonTermination_closure DLL_IMPORT_DATA_REF(base_ControlziExceptionziBase_nonTermination_closure) #define nonTermination_closure DLL_IMPORT_DATA_REF(base_ControlziExceptionziBase_nonTermination_closure)
......
...@@ -88,6 +88,60 @@ suspendComputation (Capability *cap, StgTSO *tso, StgUpdateFrame *stop_here) ...@@ -88,6 +88,60 @@ suspendComputation (Capability *cap, StgTSO *tso, StgUpdateFrame *stop_here)
throwToSingleThreaded__ (cap, tso, NULL, rtsFalse, stop_here); throwToSingleThreaded__ (cap, tso, NULL, rtsFalse, stop_here);
} }
/* -----------------------------------------------------------------------------
throwToSelf
Useful for throwing an async exception in a thread from the
runtime. It handles unlocking the throwto message returned by
throwTo().
Note [Throw to self when masked]
When a StackOverflow occurs when the thread is masked, we want to
defer the exception to when the thread becomes unmasked/hits an
interruptible point. We already have a mechanism for doing this,