Commit 7fb4a5bc authored by Simon Peyton Jones's avatar Simon Peyton Jones
Browse files

Merge branch 'master' of http://darcs.haskell.org/ghc

parents 3a266f13 3fc546fe
......@@ -506,9 +506,10 @@ setupUpdate closure_info code
else do
tickyPushUpdateFrame
dflags <- getDynFlags
if not opt_SccProfilingOn && dopt Opt_EagerBlackHoling dflags
then pushBHUpdateFrame (CmmReg nodeReg) code
else pushUpdateFrame (CmmReg nodeReg) code
if blackHoleOnEntry closure_info &&
not opt_SccProfilingOn && dopt Opt_EagerBlackHoling dflags
then pushBHUpdateFrame (CmmReg nodeReg) code
else pushUpdateFrame (CmmReg nodeReg) code
| otherwise -- A static closure
= do { tickyUpdateBhCaf closure_info
......
......@@ -731,7 +731,7 @@ blackHoleOnEntry cl_info
= case closureLFInfo cl_info of
LFReEntrant _ _ _ _ -> False
LFLetNoEscape _ -> False
LFThunk _ no_fvs _updatable _ _ -> not no_fvs -- to plug space-leaks.
LFThunk _ _no_fvs _updatable _ _ -> True
_other -> panic "blackHoleOnEntry" -- Should never happen
isKnownFun :: LambdaFormInfo -> Bool
......
......@@ -565,12 +565,15 @@ setupUpdate closure_info node body
then do tickyUpdateFrameOmitted; body
else do
tickyPushUpdateFrame
--dflags <- getDynFlags
let es = [CmmReg (CmmLocal node), mkLblExpr mkUpdInfoLabel]
--if not opt_SccProfilingOn && dopt Opt_EagerBlackHoling dflags
-- then pushUpdateFrame es body -- XXX black hole
-- else pushUpdateFrame es body
pushUpdateFrame es body
dflags <- getDynFlags
let
bh = blackHoleOnEntry closure_info &&
not opt_SccProfilingOn && dopt Opt_EagerBlackHoling dflags
lbl | bh = mkBHUpdInfoLabel
| otherwise = mkUpdInfoLabel
pushUpdateFrame [CmmReg (CmmLocal node), mkLblExpr lbl] body
| otherwise -- A static closure
= do { tickyUpdateBhCaf closure_info
......@@ -579,7 +582,7 @@ setupUpdate closure_info node body
then do -- Blackhole the (updatable) CAF:
{ upd_closure <- link_caf True
; pushUpdateFrame [CmmReg (CmmLocal upd_closure),
mkLblExpr mkUpdInfoLabel] body } -- XXX black hole
mkLblExpr mkBHUpdInfoLabel] body }
else do {tickyUpdateFrameOmitted; body}
}
......
......@@ -728,7 +728,7 @@ blackHoleOnEntry cl_info
= case closureLFInfo cl_info of
LFReEntrant _ _ _ _ -> False
LFLetNoEscape -> False
LFThunk _ no_fvs _updatable _ _ -> not no_fvs -- to plug space-leaks.
LFThunk _ _no_fvs _updatable _ _ -> True
_other -> panic "blackHoleOnEntry" -- Should never happen
isStaticClosure :: ClosureInfo -> Bool
......
......@@ -1029,9 +1029,9 @@ ifeq "$(mingw32_TARGET_OS)" "1"
endif
ifeq "$(mingw32_TARGET_OS)" "1"
DOCDIR_TO_PUBLISH = bindisttest/"install dir"/doc
DOCDIR_TO_PUBLISH = $(BIN_DIST_INST_DIR)/doc
else
DOCDIR_TO_PUBLISH = bindisttest/"install dir"/share/doc/ghc
DOCDIR_TO_PUBLISH = $(BIN_DIST_INST_DIR)/share/doc/ghc
endif
.PHONY: publish-docs
......
......@@ -679,18 +679,22 @@ waitForReturnCapability (Capability **pCap, Task *task)
* yieldCapability
* ------------------------------------------------------------------------- */
void
yieldCapability (Capability** pCap, Task *task)
/* See Note [GC livelock] in Schedule.c for why we have gcAllowed
and return the rtsBool */
rtsBool /* Did we GC? */
yieldCapability (Capability** pCap, Task *task, rtsBool gcAllowed)
{
Capability *cap = *pCap;
if (pending_sync == SYNC_GC_PAR) {
if ((pending_sync == SYNC_GC_PAR) && gcAllowed) {
traceEventGcStart(cap);
gcWorkerThread(cap);
traceEventGcEnd(cap);
traceSparkCounters(cap);
// See Note [migrated bound threads 2]
if (task->cap == cap) return;
if (task->cap == cap) {
return rtsTrue;
}
}
debugTrace(DEBUG_sched, "giving up capability %d", cap->no);
......@@ -756,7 +760,7 @@ yieldCapability (Capability** pCap, Task *task)
ASSERT_FULL_CAPABILITY_INVARIANTS(cap,task);
return;
return rtsFalse;
}
// Note [migrated bound threads]
......
......@@ -257,7 +257,7 @@ EXTERN_INLINE void recordClosureMutated (Capability *cap, StgClosure *p);
// On return: *pCap is NULL if the capability was released. The
// current task should then re-acquire it using waitForCapability().
//
void yieldCapability (Capability** pCap, Task *task);
rtsBool yieldCapability (Capability** pCap, Task *task, rtsBool gcAllowed);
// Acquires a capability for doing some work.
//
......
......@@ -156,8 +156,7 @@ throwTo (Capability *cap, // the Capability we hold
MessageThrowTo *msg;
msg = (MessageThrowTo *) allocate(cap, sizeofW(MessageThrowTo));
// message starts locked; the caller has to unlock it when it is
// ready.
// the message starts locked; see below
SET_HDR(msg, &stg_WHITEHOLE_info, CCS_SYSTEM);
msg->source = source;
msg->target = target;
......@@ -166,9 +165,16 @@ throwTo (Capability *cap, // the Capability we hold
switch (throwToMsg(cap, msg))
{
case THROWTO_SUCCESS:
// unlock the message now, otherwise we leave a WHITEHOLE in
// the heap (#6103)
SET_HDR(msg, &stg_MSG_THROWTO_info, CCS_SYSTEM);
return NULL;
case THROWTO_BLOCKED:
default:
// the caller will unlock the message when it is ready. We
// cannot unlock it yet, because the calling thread will need
// to tidy up its state first.
return msg;
}
}
......
......@@ -638,15 +638,24 @@ scheduleFindWork (Capability **pcap)
#if defined(THREADED_RTS)
STATIC_INLINE rtsBool
shouldYieldCapability (Capability *cap, Task *task)
shouldYieldCapability (Capability *cap, Task *task, rtsBool didGcLast)
{
// we need to yield this capability to someone else if..
// - another thread is initiating a GC
// - another thread is initiating a GC, and we didn't just do a GC
// (see Note [GC livelock])
// - another Task is returning from a foreign call
// - the thread at the head of the run queue cannot be run
// by this Task (it is bound to another Task, or it is unbound
// and this task it bound).
return (pending_sync ||
//
// Note [GC livelock]
//
// If we are interrupted to do a GC, then we do not immediately do
// another one. This avoids a starvation situation where one
// Capability keeps forcing a GC and the other Capabilities make no
// progress at all.
return ((pending_sync && !didGcLast) ||
cap->returning_tasks_hd != NULL ||
(!emptyRunQueue(cap) && (task->incall->tso == NULL
? cap->run_queue_hd->bound != NULL
......@@ -667,20 +676,22 @@ static void
scheduleYield (Capability **pcap, Task *task)
{
Capability *cap = *pcap;
int didGcLast = rtsFalse;
// if we have work, and we don't need to give up the Capability, continue.
//
if (!shouldYieldCapability(cap,task) &&
if (!shouldYieldCapability(cap,task,rtsFalse) &&
(!emptyRunQueue(cap) ||
!emptyInbox(cap) ||
sched_state >= SCHED_INTERRUPTING))
sched_state >= SCHED_INTERRUPTING)) {
return;
}
// otherwise yield (sleep), and keep yielding if necessary.
do {
yieldCapability(&cap,task);
didGcLast = yieldCapability(&cap,task, !didGcLast);
}
while (shouldYieldCapability(cap,task));
while (shouldYieldCapability(cap,task,didGcLast));
// note there may still be no threads on the run queue at this
// point, the caller has to check.
......@@ -1374,7 +1385,7 @@ static nat requestSync (Capability **pcap, Task *task, nat sync_type)
debugTrace(DEBUG_sched, "someone else is trying to sync (%d)...",
prev_pending_sync);
ASSERT(*pcap);
yieldCapability(pcap,task);
yieldCapability(pcap,task,rtsTrue);
} while (pending_sync);
return prev_pending_sync; // NOTE: task->cap might have changed now
}
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment