Commit 8b75acd3 authored by Simon Marlow's avatar Simon Marlow

Make forkProcess work with +RTS -N

Consider this experimental for the time being.  There are a lot of
things that could go wrong, but I've verified that at least it works
on the test cases we have.

I also did some API cleanups while I was here.  Previously we had:

Capability * rts_eval (Capability *cap, HaskellObj p, /*out*/HaskellObj *ret);

but this API is particularly error-prone: if you forget to discard the
Capability * you passed in and use the return value instead, then
you're in for subtle bugs with +RTS -N later on.  So I changed all
these functions to this form:

void rts_eval (/* inout */ Capability **cap,
               /* in    */ HaskellObj p,
               /* out */   HaskellObj *ret)

It's much harder to use this version incorrectly, because you have to
pass the Capability in by reference.
parent 657773c8
......@@ -609,8 +609,8 @@ mkFExportCBits dflags c_nm maybe_target arg_htys res_hty is_IO_res_ty cc
, declareCResult
, text "cap = rts_lock();"
-- create the application + perform it.
, ptext (sLit "cap=rts_evalIO") <> parens (
cap <>
, ptext (sLit "rts_evalIO") <> parens (
char '&' <> cap <>
ptext (sLit "rts_apply") <> parens (
cap <>
text "(HaskellObj)"
......
......@@ -181,32 +181,44 @@ HsBool rts_getBool ( HaskellObj );
The versions ending in '_' allow you to specify an initial stack size.
Note that these calls may cause Garbage Collection, so all HaskellObj
references are rendered invalid by these calls.
All of these functions take a (Capability **) - there is a
Capability pointer both input and output. We use an inout
parameter because this is less error-prone for the client than a
return value - the client could easily forget to use the return
value, whereas incorrectly using an inout parameter will usually
result in a type error.
------------------------------------------------------------------------- */
Capability *
rts_eval (Capability *, HaskellObj p, /*out*/HaskellObj *ret);
Capability *
rts_eval_ (Capability *, HaskellObj p, unsigned int stack_size,
/*out*/HaskellObj *ret);
void rts_eval (/* inout */ Capability **,
/* in */ HaskellObj p,
/* out */ HaskellObj *ret);
void rts_eval_ (/* inout */ Capability **,
/* in */ HaskellObj p,
/* in */ unsigned int stack_size,
/* out */ HaskellObj *ret);
Capability *
rts_evalIO (Capability *, HaskellObj p, /*out*/HaskellObj *ret);
void rts_evalIO (/* inout */ Capability **,
/* in */ HaskellObj p,
/* out */ HaskellObj *ret);
Capability *
rts_evalStableIO (Capability *, HsStablePtr s, /*out*/HsStablePtr *ret);
void rts_evalStableIO (/* inout */ Capability **,
/* in */ HsStablePtr s,
/* out */ HsStablePtr *ret);
Capability *
rts_evalLazyIO (Capability *, HaskellObj p, /*out*/HaskellObj *ret);
void rts_evalLazyIO (/* inout */ Capability **,
/* in */ HaskellObj p,
/* out */ HaskellObj *ret);
Capability *
rts_evalLazyIO_ (Capability *, HaskellObj p, unsigned int stack_size,
/*out*/HaskellObj *ret);
void rts_evalLazyIO_ (/* inout */ Capability **,
/* in */ HaskellObj p,
/* in */ unsigned int stack_size,
/* out */ HaskellObj *ret);
void
rts_checkSchedStatus (char* site, Capability *);
void rts_checkSchedStatus (char* site, Capability *);
SchedulerStatus
rts_getSchedStatus (Capability *cap);
SchedulerStatus rts_getSchedStatus (Capability *cap);
/* --------------------------------------------------------------------------
Wrapper closures
......
......@@ -20,8 +20,9 @@
//
StgTSO *createThread (Capability *cap, nat stack_size);
Capability *scheduleWaitThread (StgTSO *tso, /*out*/HaskellObj* ret,
Capability *cap);
void scheduleWaitThread (/* in */ StgTSO *tso,
/* out */ HaskellObj* ret,
/* inout */ Capability **cap);
StgTSO *createGenThread (Capability *cap, nat stack_size,
StgClosure *closure);
......
......@@ -40,8 +40,12 @@ Capability *capabilities = NULL;
// locking, so we don't do that.
Capability *last_free_capability = NULL;
/* GC indicator, in scope for the scheduler, init'ed to false */
volatile StgWord waiting_for_gc = 0;
/*
* Indicates that the RTS wants to synchronise all the Capabilities
* for some reason. All Capabilities should stop and return to the
* scheduler.
*/
volatile StgWord pending_sync = 0;
/* Let foreign code get the current Capability -- assuming there is one!
* This is useful for unsafe foreign calls because they are called with
......@@ -422,13 +426,12 @@ releaseCapability_ (Capability* cap,
return;
}
if (waiting_for_gc == PENDING_GC_SEQ) {
if (pending_sync == SYNC_GC_SEQ || pending_sync == SYNC_FORK) {
last_free_capability = cap; // needed?
debugTrace(DEBUG_sched, "GC pending, set capability %d free", cap->no);
debugTrace(DEBUG_sched, "sync pending, set capability %d free", cap->no);
return;
}
// If the next thread on the run queue is a bound thread,
// give this Capability to the appropriate Task.
if (!emptyRunQueue(cap) && cap->run_queue_hd->bound) {
......@@ -536,7 +539,7 @@ releaseCapabilityAndQueueWorker (Capability* cap USED_IF_THREADS)
#endif
/* ----------------------------------------------------------------------------
* waitForReturnCapability( Task *task )
* waitForReturnCapability (Capability **pCap, Task *task)
*
* Purpose: when an OS thread returns from an external call,
* it calls waitForReturnCapability() (via Schedule.resumeThread())
......@@ -643,7 +646,7 @@ yieldCapability (Capability** pCap, Task *task)
{
Capability *cap = *pCap;
if (waiting_for_gc == PENDING_GC_PAR) {
if (pending_sync == SYNC_GC_PAR) {
traceEventGcStart(cap);
gcWorkerThread(cap);
traceEventGcEnd(cap);
......
......@@ -199,10 +199,15 @@ extern Capability *capabilities;
//
extern Capability *last_free_capability;
// GC indicator, in scope for the scheduler
#define PENDING_GC_SEQ 1
#define PENDING_GC_PAR 2
extern volatile StgWord waiting_for_gc;
//
// Indicates that the RTS wants to synchronise all the Capabilities
// for some reason. All Capabilities should stop and return to the
// scheduler.
//
#define SYNC_GC_SEQ 1
#define SYNC_GC_PAR 2
#define SYNC_FORK 3
extern volatile StgWord pending_sync;
// Acquires a capability at a return point. If *cap is non-NULL, then
// this is taken as a preference for the Capability we wish to
......
......@@ -421,36 +421,39 @@ createStrictIOThread(Capability *cap, nat stack_size, StgClosure *closure)
Evaluating Haskell expressions
------------------------------------------------------------------------- */
Capability *
rts_eval (Capability *cap, HaskellObj p, /*out*/HaskellObj *ret)
void rts_eval (/* inout */ Capability **cap,
/* in */ HaskellObj p,
/* out */ HaskellObj *ret)
{
StgTSO *tso;
tso = createGenThread(cap, RtsFlags.GcFlags.initialStkSize, p);
return scheduleWaitThread(tso,ret,cap);
tso = createGenThread(*cap, RtsFlags.GcFlags.initialStkSize, p);
scheduleWaitThread(tso,ret,cap);
}
Capability *
rts_eval_ (Capability *cap, HaskellObj p, unsigned int stack_size,
/*out*/HaskellObj *ret)
void rts_eval_ (/* inout */ Capability **cap,
/* in */ HaskellObj p,
/* in */ unsigned int stack_size,
/* out */ HaskellObj *ret)
{
StgTSO *tso;
tso = createGenThread(cap, stack_size, p);
return scheduleWaitThread(tso,ret,cap);
tso = createGenThread(*cap, stack_size, p);
scheduleWaitThread(tso,ret,cap);
}
/*
* rts_evalIO() evaluates a value of the form (IO a), forcing the action's
* result to WHNF before returning.
*/
Capability *
rts_evalIO (Capability *cap, HaskellObj p, /*out*/HaskellObj *ret)
void rts_evalIO (/* inout */ Capability **cap,
/* in */ HaskellObj p,
/* out */ HaskellObj *ret)
{
StgTSO* tso;
tso = createStrictIOThread(cap, RtsFlags.GcFlags.initialStkSize, p);
return scheduleWaitThread(tso,ret,cap);
tso = createStrictIOThread(*cap, RtsFlags.GcFlags.initialStkSize, p);
scheduleWaitThread(tso,ret,cap);
}
/*
......@@ -459,49 +462,50 @@ rts_evalIO (Capability *cap, HaskellObj p, /*out*/HaskellObj *ret)
* action's result to WHNF before returning. The result is returned
* in a StablePtr.
*/
Capability *
rts_evalStableIO (Capability *cap, HsStablePtr s, /*out*/HsStablePtr *ret)
void rts_evalStableIO (/* inout */ Capability **cap,
/* in */ HsStablePtr s,
/* out */ HsStablePtr *ret)
{
StgTSO* tso;
StgClosure *p, *r;
SchedulerStatus stat;
p = (StgClosure *)deRefStablePtr(s);
tso = createStrictIOThread(cap, RtsFlags.GcFlags.initialStkSize, p);
tso = createStrictIOThread(*cap, RtsFlags.GcFlags.initialStkSize, p);
// async exceptions are always blocked by default in the created
// thread. See #1048.
tso->flags |= TSO_BLOCKEX | TSO_INTERRUPTIBLE;
cap = scheduleWaitThread(tso,&r,cap);
stat = rts_getSchedStatus(cap);
scheduleWaitThread(tso,&r,cap);
stat = rts_getSchedStatus(*cap);
if (stat == Success && ret != NULL) {
ASSERT(r != NULL);
*ret = getStablePtr((StgPtr)r);
}
return cap;
}
/*
* Like rts_evalIO(), but doesn't force the action's result.
*/
Capability *
rts_evalLazyIO (Capability *cap, HaskellObj p, /*out*/HaskellObj *ret)
void rts_evalLazyIO (/* inout */ Capability **cap,
/* in */ HaskellObj p,
/* out */ HaskellObj *ret)
{
StgTSO *tso;
tso = createIOThread(cap, RtsFlags.GcFlags.initialStkSize, p);
return scheduleWaitThread(tso,ret,cap);
tso = createIOThread(*cap, RtsFlags.GcFlags.initialStkSize, p);
scheduleWaitThread(tso,ret,cap);
}
Capability *
rts_evalLazyIO_ (Capability *cap, HaskellObj p, unsigned int stack_size,
/*out*/HaskellObj *ret)
void rts_evalLazyIO_ (/* inout */ Capability **cap,
/* in */ HaskellObj p,
/* in */ unsigned int stack_size,
/* out */ HaskellObj *ret)
{
StgTSO *tso;
tso = createIOThread(cap, stack_size, p);
return scheduleWaitThread(tso,ret,cap);
tso = createIOThread(*cap, stack_size, p);
scheduleWaitThread(tso,ret,cap);
}
/* Convenience function for decoding the returned status. */
......
......@@ -60,7 +60,7 @@ static void real_main(void)
/* ToDo: want to start with a larger stack size */
{
Capability *cap = rts_lock();
cap = rts_evalLazyIO(cap,progmain_closure, NULL);
rts_evalLazyIO(&cap,progmain_closure, NULL);
status = rts_getSchedStatus(cap);
taskTimeStamp(myTask());
rts_unlock(cap);
......
......@@ -431,7 +431,7 @@ static void flushStdHandles(void)
{
Capability *cap;
cap = rts_lock();
cap = rts_evalIO(cap, flushStdHandles_closure, NULL);
rts_evalIO(&cap, flushStdHandles_closure, NULL);
rts_unlock(cap);
}
......
This diff is collapsed.
......@@ -77,7 +77,7 @@ static snEntry *stable_ptr_free = NULL;
static unsigned int SPT_size = 0;
#ifdef THREADED_RTS
static Mutex stable_mutex;
Mutex stable_mutex;
#endif
static void enlargeStablePtrTable(void);
......
......@@ -33,6 +33,11 @@ void updateStablePtrTable ( rtsBool full );
void stablePtrPreGC ( void );
void stablePtrPostGC ( void );
#ifdef THREADED_RTS
// needed by Schedule.c:forkProcess()
extern Mutex stable_mutex;
#endif
#include "EndPrivate.h"
#endif /* STABLE_H */
......@@ -197,7 +197,7 @@ forkOS_createThreadWrapper ( void * entry )
{
Capability *cap;
cap = rts_lock();
cap = rts_evalStableIO(cap, (HsStablePtr) entry, NULL);
rts_evalStableIO(&cap, (HsStablePtr) entry, NULL);
taskTimeStamp(myTask());
rts_unlock(cap);
return NULL;
......
......@@ -145,11 +145,10 @@ ioManagerDie (void)
}
}
Capability *
ioManagerStartCap (Capability *cap)
void
ioManagerStartCap (Capability **cap)
{
return rts_evalIO(
cap,&base_GHCziConcziIO_ensureIOManagerIsRunning_closure,NULL);
rts_evalIO(cap,&base_GHCziConcziIO_ensureIOManagerIsRunning_closure,NULL);
}
void
......@@ -159,7 +158,7 @@ ioManagerStart (void)
Capability *cap;
if (io_manager_control_fd < 0 || io_manager_wakeup_fd < 0) {
cap = rts_lock();
cap = ioManagerStartCap(cap);
ioManagerStartCap(&cap);
rts_unlock(cap);
}
}
......
......@@ -24,7 +24,7 @@ extern siginfo_t *next_pending_handler;
void startSignalHandlers(Capability *cap);
#endif
Capability *ioManagerStartCap (Capability *cap);
void ioManagerStartCap (/* inout */ Capability **cap);
extern StgInt *signal_handlers;
......
......@@ -259,7 +259,7 @@ GarbageCollect (rtsBool force_major_gc,
* We don't try to parallelise minor GCs (unless the user asks for
* it with +RTS -gn0), or mark/compact/sweep GC.
*/
if (gc_type == PENDING_GC_PAR) {
if (gc_type == SYNC_GC_PAR) {
n_gc_threads = RtsFlags.ParFlags.nNodes;
} else {
n_gc_threads = 1;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment