Commit 16214216 authored by simonmar's avatar simonmar

[project @ 2005-04-05 12:19:54 by simonmar]

Some multi-processor hackery, including

  - Don't hang blocked threads off BLACKHOLEs any more, instead keep
    them all on a separate queue which is checked periodically for
    threads to wake up.

    This is good because (a) we don't have to worry about locking the
    closure in SMP mode when we want to block on it, and (b) it means
    the standard update code doesn't need to wake up any threads or
    check for a BLACKHOLE_BQ, simplifying the update code.

    The downside is that if there are lots of threads blocked on
    BLACKHOLEs, we might have to do a lot of repeated list traversal.
    We don't expect this to be common, though.  conc023 goes slower
    with this change, but we expect most programs to benefit from the
    shorter update code.

  - Fixing up the Capability code to handle multiple capabilities (SMP
    mode), and related changes to get the SMP mode at least building.
parent 14a5aadb
......@@ -59,7 +59,6 @@
#define STOP_FRAME 44
#define CAF_BLACKHOLE 45
#define BLACKHOLE 46
#define BLACKHOLE_BQ 47
#define SE_BLACKHOLE 48
#define SE_CAF_BLACKHOLE 49
#define MVAR 50
......
......@@ -403,13 +403,6 @@ typedef struct StgRBH_ {
struct StgBlockingQueueElement_ *blocking_queue; /* start of the BQ */
} StgRBH;
#else
typedef struct StgBlockingQueue_ {
StgHeader header;
struct StgTSO_ *blocking_queue;
} StgBlockingQueue;
#endif
#if defined(PAR)
......
......@@ -26,9 +26,6 @@ extern StgTSO *createThread(nat stack_size, StgInt pri);
#else
extern StgTSO *createThread(nat stack_size);
#endif
#if defined(PAR) || defined(SMP)
extern void taskStart(void);
#endif
extern void scheduleThread(StgTSO *tso);
extern SchedulerStatus scheduleWaitThread(StgTSO *tso, /*out*/HaskellObj* ret,
Capability *initialCapability);
......
......@@ -97,7 +97,6 @@ RTS_INFO(stg_CAF_UNENTERED_info);
RTS_INFO(stg_CAF_ENTERED_info);
RTS_INFO(stg_BLACKHOLE_info);
RTS_INFO(stg_CAF_BLACKHOLE_info);
RTS_INFO(stg_BLACKHOLE_BQ_info);
#ifdef TICKY_TICKY
RTS_INFO(stg_SE_BLACKHOLE_info);
RTS_INFO(stg_SE_CAF_BLACKHOLE_info);
......@@ -158,7 +157,6 @@ RTS_ENTRY(stg_CAF_UNENTERED_entry);
RTS_ENTRY(stg_CAF_ENTERED_entry);
RTS_ENTRY(stg_BLACKHOLE_entry);
RTS_ENTRY(stg_CAF_BLACKHOLE_entry);
RTS_ENTRY(stg_BLACKHOLE_BQ_entry);
#ifdef TICKY_TICKY
RTS_ENTRY(stg_SE_BLACKHOLE_entry);
RTS_ENTRY(stg_SE_CAF_BLACKHOLE_entry);
......
......@@ -187,18 +187,6 @@ extern void GarbageCollect(void (*get_roots)(evac_fn),rtsBool force_major_gc);
-------------------------------------------------------------------------- */
/*
* Storage manager mutex
*/
#if defined(SMP)
extern Mutex sm_mutex;
#define ACQUIRE_SM_LOCK ACQUIRE_LOCK(&sm_mutex)
#define RELEASE_SM_LOCK RELEASE_LOCK(&sm_mutex)
#else
#define ACQUIRE_SM_LOCK
#define RELEASE_SM_LOCK
#endif
/* ToDo: shouldn't recordMutable acquire some
* kind of lock in the SMP case? Or do we need per-processor
* mutable lists?
......@@ -277,7 +265,7 @@ INLINE_HEADER StgOffset THUNK_SELECTOR_sizeW ( void )
{ return stg_max(sizeofW(StgHeader)+MIN_UPD_SIZE, sizeofW(StgSelector)); }
INLINE_HEADER StgOffset BLACKHOLE_sizeW ( void )
{ return stg_max(sizeofW(StgHeader)+MIN_UPD_SIZE, sizeofW(StgBlockingQueue)); }
{ return sizeofW(StgHeader)+MIN_UPD_SIZE; }
/* --------------------------------------------------------------------------
Sizes of closures
......
......@@ -64,8 +64,7 @@
BLOCK_BEGIN \
DECLARE_IPTR(info); \
info = GET_INFO(updclosure); \
AWAKEN_BQ(info,updclosure); \
updateWithIndirection(GET_INFO(updclosure), ind_info, \
updateWithIndirection(ind_info, \
updclosure, \
heapptr, \
and_then); \
......@@ -74,11 +73,7 @@
#if defined(PROFILING) || defined(TICKY_TICKY)
#define UPD_PERM_IND(updclosure, heapptr) \
BLOCK_BEGIN \
DECLARE_IPTR(info); \
info = GET_INFO(updclosure); \
AWAKEN_BQ(info,updclosure); \
updateWithPermIndirection(info, \
updclosure, \
updateWithPermIndirection(updclosure, \
heapptr); \
BLOCK_END
#endif
......@@ -88,20 +83,13 @@
# ifdef TICKY_TICKY
# define UPD_IND_NOLOCK(updclosure, heapptr) \
BLOCK_BEGIN \
DECLARE_IPTR(info); \
info = GET_INFO(updclosure); \
AWAKEN_BQ_NOLOCK(info,updclosure); \
updateWithPermIndirection(info, \
updclosure, \
updateWithPermIndirection(updclosure, \
heapptr); \
BLOCK_END
# else
# define UPD_IND_NOLOCK(updclosure, heapptr) \
BLOCK_BEGIN \
DECLARE_IPTR(info); \
info = GET_INFO(updclosure); \
AWAKEN_BQ_NOLOCK(info,updclosure); \
updateWithIndirection(info, INFO_PTR(stg_IND_info), \
updateWithIndirection(INFO_PTR(stg_IND_info), \
updclosure, \
heapptr,); \
BLOCK_END
......@@ -167,31 +155,6 @@ extern void awakenBlockedQueue(StgBlockingQueueElement *q, StgClosure *node);
DO_AWAKEN_BQ(((StgBlockingQueue *)closure)->blocking_queue, closure); \
}
#else /* !GRAN && !PAR */
#define DO_AWAKEN_BQ(closure) \
FCALL awakenBlockedQueue(StgBlockingQueue_blocking_queue(closure) ARG_PTR);
#define AWAKEN_BQ(info,closure) \
if (info == INFO_PTR(stg_BLACKHOLE_BQ_info)) { \
DO_AWAKEN_BQ(closure); \
}
#define AWAKEN_STATIC_BQ(info,closure) \
if (info == INFO_PTR(stg_BLACKHOLE_BQ_STATIC_info)) { \
DO_AWAKEN_BQ(closure); \
}
#ifdef RTS_SUPPORTS_THREADS
#define DO_AWAKEN_BQ_NOLOCK(closure) \
FCALL awakenBlockedQueueNoLock(StgBlockingQueue_blocking_queue(closure) ARG_PTR);
#define AWAKEN_BQ_NOLOCK(info,closure) \
if (info == INFO_PTR(stg_BLACKHOLE_BQ_info)) { \
DO_AWAKEN_BQ_NOLOCK(closure); \
}
#endif
#endif /* GRAN || PAR */
/* -----------------------------------------------------------------------------
......@@ -279,7 +242,7 @@ DEBUG_FILL_SLOP(StgClosure *p)
*/
#ifdef CMINUSMINUS
#define generation(n) (W_[generations] + n*SIZEOF_generation)
#define updateWithIndirection(info, ind_info, p1, p2, and_then) \
#define updateWithIndirection(ind_info, p1, p2, and_then) \
W_ bd; \
\
/* ASSERT( p1 != p2 && !closure_IND(p1) ); \
......@@ -292,11 +255,9 @@ DEBUG_FILL_SLOP(StgClosure *p)
TICK_UPD_NEW_IND(); \
and_then; \
} else { \
if (info != stg_BLACKHOLE_BQ_info) { \
DEBUG_FILL_SLOP(p1); \
foreign "C" recordMutableGen(p1 "ptr", \
DEBUG_FILL_SLOP(p1); \
foreign "C" recordMutableGen(p1 "ptr", \
generation(TO_W_(bdescr_gen_no(bd))) "ptr"); \
} \
StgInd_indirectee(p1) = p2; \
SET_INFO(p1, stg_IND_OLDGEN_info); \
LDV_RECORD_CREATE(p1); \
......@@ -304,7 +265,7 @@ DEBUG_FILL_SLOP(StgClosure *p)
and_then; \
}
#else
#define updateWithIndirection(_info, ind_info, p1, p2, and_then) \
#define updateWithIndirection(ind_info, p1, p2, and_then) \
{ \
bdescr *bd; \
\
......@@ -318,10 +279,8 @@ DEBUG_FILL_SLOP(StgClosure *p)
TICK_UPD_NEW_IND(); \
and_then; \
} else { \
if (_info != &stg_BLACKHOLE_BQ_info) { \
DEBUG_FILL_SLOP(p1); \
recordMutableGen(p1, &generations[bd->gen_no]); \
} \
DEBUG_FILL_SLOP(p1); \
recordMutableGen(p1, &generations[bd->gen_no]); \
((StgInd *)p1)->indirectee = p2; \
SET_INFO(p1, &stg_IND_OLDGEN_info); \
TICK_UPD_OLD_IND(); \
......@@ -335,8 +294,7 @@ DEBUG_FILL_SLOP(StgClosure *p)
*/
#ifndef CMINUSMINUS
INLINE_HEADER void
updateWithPermIndirection(const StgInfoTable *info,
StgClosure *p1,
updateWithPermIndirection(StgClosure *p1,
StgClosure *p2)
{
bdescr *bd;
......@@ -361,9 +319,7 @@ updateWithPermIndirection(const StgInfoTable *info,
LDV_RECORD_CREATE(p1);
TICK_UPD_NEW_PERM_IND(p1);
} else {
if (info != &stg_BLACKHOLE_BQ_info) {
recordMutableGen(p1, &generations[bd->gen_no]);
}
recordMutableGen(p1, &generations[bd->gen_no]);
((StgInd *)p1)->indirectee = p2;
SET_INFO(p1, &stg_IND_OLDGEN_PERM_info);
/*
......
......@@ -273,9 +273,6 @@ main(int argc, char *argv[])
opt_struct_size(StgTSOGranInfo,GRAN);
opt_struct_size(StgTSODistInfo,DIST);
closure_size(StgBlockingQueue);
closure_field(StgBlockingQueue, blocking_queue);
closure_field(StgUpdateFrame, updatee);
closure_field(StgCatchFrame, handler);
......
......@@ -28,10 +28,10 @@
Capability MainCapability; /* for non-SMP, we have one global capability */
#endif
#if defined(RTS_SUPPORTS_THREADS)
nat rts_n_free_capabilities;
#if defined(RTS_SUPPORTS_THREADS)
/* returning_worker_cond: when a worker thread returns from executing an
* external call, it needs to wait for an RTS Capability before passing
* on the result of the call to the Haskell thread that made it.
......@@ -76,6 +76,13 @@ static Condition *passTarget = NULL;
static rtsBool passingCapability = rtsFalse;
#endif
#if defined(SMP)
/*
* Free capability list.
*/
Capability *free_capabilities;
#endif
#ifdef SMP
#define UNUSED_IF_NOT_SMP
#else
......@@ -83,9 +90,9 @@ static rtsBool passingCapability = rtsFalse;
#endif
#if defined(RTS_USER_SIGNALS)
#define ANY_WORK_TO_DO() (!EMPTY_RUN_QUEUE() || interrupted || signals_pending())
#define ANY_WORK_TO_DO() (!EMPTY_RUN_QUEUE() || interrupted || blackholes_need_checking || signals_pending())
#else
#define ANY_WORK_TO_DO() (!EMPTY_RUN_QUEUE() || interrupted)
#define ANY_WORK_TO_DO() (!EMPTY_RUN_QUEUE() || interrupted || blackholes_need_checking)
#endif
/* ----------------------------------------------------------------------------
......@@ -99,9 +106,34 @@ initCapability( Capability *cap )
cap->f.stgGCFun = (F_)__stg_gc_fun;
}
/* -----------------------------------------------------------------------------
* Function: initCapabilities_(nat)
*
* Purpose: upon startup, allocate and fill in table
* holding 'n' Capabilities. Only for SMP, since
* it is the only build that supports multiple
* capabilities within the RTS.
* -------------------------------------------------------------------------- */
#if defined(SMP)
static void initCapabilities_(nat n);
#endif
static void
initCapabilities_(nat n)
{
nat i;
Capability *cap, *prev;
cap = NULL;
prev = NULL;
for (i = 0; i < n; i++) {
cap = stgMallocBytes(sizeof(Capability), "initCapabilities");
initCapability(cap);
cap->link = prev;
prev = cap;
}
free_capabilities = cap;
rts_n_free_capabilities = n;
IF_DEBUG(scheduler,
sched_belch("allocated %d capabilities", rts_n_free_capabilities));
}
#endif /* SMP */
/* ---------------------------------------------------------------------------
* Function: initCapabilities()
......@@ -123,19 +155,11 @@ initCapabilities( void )
#if defined(RTS_SUPPORTS_THREADS)
initCondition(&returning_worker_cond);
initCondition(&thread_ready_cond);
rts_n_free_capabilities = 1;
#endif
return;
rts_n_free_capabilities = 1;
}
#if defined(SMP)
/* Free capability list. */
static Capability *free_capabilities; /* Available capabilities for running threads */
static Capability *returning_capabilities;
/* Capabilities being passed to returning worker threads */
#endif
/* ----------------------------------------------------------------------------
grabCapability( Capability** )
......@@ -149,17 +173,18 @@ static
void
grabCapability( Capability** cap )
{
#if !defined(SMP)
#if defined(RTS_SUPPORTS_THREADS)
#if defined(SMP)
ASSERT(rts_n_free_capabilities > 0);
*cap = free_capabilities;
free_capabilities = (*cap)->link;
rts_n_free_capabilities--;
#else
# if defined(RTS_SUPPORTS_THREADS)
ASSERT(rts_n_free_capabilities == 1);
rts_n_free_capabilities = 0;
#endif
# endif
*cap = &MainCapability;
handleSignalsInThisThread();
#else
*cap = free_capabilities;
free_capabilities = (*cap)->link;
rts_n_free_capabilities--;
#endif
#if defined(RTS_SUPPORTS_THREADS)
IF_DEBUG(scheduler, sched_belch("worker: got capability"));
......@@ -179,7 +204,7 @@ releaseCapability( Capability* cap UNUSED_IF_NOT_SMP )
{
// Precondition: sched_mutex is held.
#if defined(RTS_SUPPORTS_THREADS)
#ifndef SMP
#if !defined(SMP)
ASSERT(rts_n_free_capabilities == 0);
#endif
// Check to see whether a worker thread can be given
......@@ -191,8 +216,8 @@ releaseCapability( Capability* cap UNUSED_IF_NOT_SMP )
#if defined(SMP)
// SMP variant untested
cap->link = returning_capabilities;
returning_capabilities = cap;
cap->link = free_capabilities;
free_capabilities = cap;
#endif
rts_n_waiting_workers--;
......@@ -272,13 +297,14 @@ waitForReturnCapability( Mutex* pMutex, Capability** pCap )
context_switch = 1; // make sure it's our turn soon
waitCondition(&returning_worker_cond, pMutex);
#if defined(SMP)
*pCap = returning_capabilities;
returning_capabilities = (*pCap)->link;
*pCap = free_capabilities;
free_capabilities = (*pCap)->link;
ASSERT(pCap != NULL);
#else
*pCap = &MainCapability;
ASSERT(rts_n_free_capabilities == 0);
handleSignalsInThisThread();
#endif
handleSignalsInThisThread();
} else {
grabCapability(pCap);
}
......@@ -313,7 +339,7 @@ yieldCapability( Capability** pCap )
*pCap = NULL;
}
// Post-condition: pMutex is assumed held, and either:
// Post-condition: either:
//
// 1. *pCap is NULL, in which case the current thread does not
// hold a capability now, or
......@@ -418,36 +444,3 @@ threadRunnable ( void )
startSchedulerTaskIfNecessary();
#endif
}
/* ------------------------------------------------------------------------- */
#if defined(SMP)
/*
* Function: initCapabilities_(nat)
*
* Purpose: upon startup, allocate and fill in table
* holding 'n' Capabilities. Only for SMP, since
* it is the only build that supports multiple
* capabilities within the RTS.
*/
static void
initCapabilities_(nat n)
{
nat i;
Capability *cap, *prev;
cap = NULL;
prev = NULL;
for (i = 0; i < n; i++) {
cap = stgMallocBytes(sizeof(Capability), "initCapabilities");
initCapability(cap);
cap->link = prev;
prev = cap;
}
free_capabilities = cap;
rts_n_free_capabilities = n;
returning_capabilities = NULL;
IF_DEBUG(scheduler,
sched_belch("allocated %d capabilities", n_free_capabilities));
}
#endif /* SMP */
......@@ -80,6 +80,9 @@ extern void passCapability(Condition *pTargetThreadCond);
extern void passCapabilityToWorker( void );
extern nat rts_n_free_capabilities;
extern Capability *free_capabilities;
/* number of worker threads waiting for a return capability
*/
extern nat rts_n_waiting_workers;
......@@ -101,7 +104,11 @@ static inline rtsBool noCapabilities (void)
static inline rtsBool allFreeCapabilities (void)
{
#if defined(SMP)
return (rts_n_free_capabilities == RTS_DEREF(RtsFlags).ParFlags.nNodes);
#else
return (rts_n_free_capabilities == 1);
#endif
}
#else // !RTS_SUPPORTS_THREADS
......
......@@ -11,6 +11,7 @@
#include "RtsFlags.h"
#include "RtsUtils.h"
#include "Apply.h"
#include "OSThreads.h"
#include "Storage.h"
#include "LdvProfile.h"
#include "Updates.h"
......@@ -1624,7 +1625,9 @@ evacuate_large(StgPtr p)
REGPARM1 static StgClosure *
evacuate(StgClosure *q)
{
#if defined(PAR)
StgClosure *to;
#endif
bdescr *bd = NULL;
step *stp;
const StgInfoTable *info;
......@@ -1755,10 +1758,6 @@ loop:
case BLACKHOLE:
return copyPart(q,BLACKHOLE_sizeW(),sizeofW(StgHeader),stp);
case BLACKHOLE_BQ:
to = copy(q,BLACKHOLE_sizeW(),stp);
return to;
case THUNK_SELECTOR:
{
StgClosure *p;
......@@ -1919,7 +1918,7 @@ loop:
}
#if defined(PAR)
case RBH: // cf. BLACKHOLE_BQ
case RBH:
{
//StgInfoTable *rip = get_closure_info(q, &size, &ptrs, &nonptrs, &vhs, str);
to = copy(q,BLACKHOLE_sizeW(),stp);
......@@ -2167,7 +2166,6 @@ selector_loop:
case SE_CAF_BLACKHOLE:
case SE_BLACKHOLE:
case BLACKHOLE:
case BLACKHOLE_BQ:
#if defined(PAR)
case RBH:
case BLOCKED_FETCH:
......@@ -2614,16 +2612,6 @@ scavenge(step *stp)
p += BLACKHOLE_sizeW();
break;
case BLACKHOLE_BQ:
{
StgBlockingQueue *bh = (StgBlockingQueue *)p;
bh->blocking_queue =
(StgTSO *)evacuate((StgClosure *)bh->blocking_queue);
failed_to_evac = rtsTrue;
p += BLACKHOLE_sizeW();
break;
}
case THUNK_SELECTOR:
{
StgSelector *s = (StgSelector *)p;
......@@ -2697,7 +2685,7 @@ scavenge(step *stp)
}
#if defined(PAR)
case RBH: // cf. BLACKHOLE_BQ
case RBH:
{
#if 0
nat size, ptrs, nonptrs, vhs;
......@@ -2740,7 +2728,7 @@ scavenge(step *stp)
p += sizeofW(StgFetchMe);
break; // nothing to do in this case
case FETCH_ME_BQ: // cf. BLACKHOLE_BQ
case FETCH_ME_BQ:
{
StgFetchMeBlockingQueue *fmbq = (StgFetchMeBlockingQueue *)p;
(StgClosure *)fmbq->blocking_queue =
......@@ -2969,15 +2957,6 @@ linear_scan:
case ARR_WORDS:
break;
case BLACKHOLE_BQ:
{
StgBlockingQueue *bh = (StgBlockingQueue *)p;
bh->blocking_queue =
(StgTSO *)evacuate((StgClosure *)bh->blocking_queue);
failed_to_evac = rtsTrue;
break;
}
case THUNK_SELECTOR:
{
StgSelector *s = (StgSelector *)p;
......@@ -3039,7 +3018,7 @@ linear_scan:
}
#if defined(PAR)
case RBH: // cf. BLACKHOLE_BQ
case RBH:
{
#if 0
nat size, ptrs, nonptrs, vhs;
......@@ -3078,7 +3057,7 @@ linear_scan:
case FETCH_ME:
break; // nothing to do in this case
case FETCH_ME_BQ: // cf. BLACKHOLE_BQ
case FETCH_ME_BQ:
{
StgFetchMeBlockingQueue *fmbq = (StgFetchMeBlockingQueue *)p;
(StgClosure *)fmbq->blocking_queue =
......@@ -3271,16 +3250,6 @@ scavenge_one(StgPtr p)
case BLACKHOLE:
break;
case BLACKHOLE_BQ:
{
StgBlockingQueue *bh = (StgBlockingQueue *)p;
evac_gen = 0; // repeatedly mutable
bh->blocking_queue =
(StgTSO *)evacuate((StgClosure *)bh->blocking_queue);
failed_to_evac = rtsTrue;
break;
}
case THUNK_SELECTOR:
{
StgSelector *s = (StgSelector *)p;
......@@ -3347,7 +3316,7 @@ scavenge_one(StgPtr p)
}
#if defined(PAR)
case RBH: // cf. BLACKHOLE_BQ
case RBH:
{
#if 0
nat size, ptrs, nonptrs, vhs;
......@@ -3387,7 +3356,7 @@ scavenge_one(StgPtr p)
case FETCH_ME:
break; // nothing to do in this case
case FETCH_ME_BQ: // cf. BLACKHOLE_BQ
case FETCH_ME_BQ:
{
StgFetchMeBlockingQueue *fmbq = (StgFetchMeBlockingQueue *)p;
(StgClosure *)fmbq->blocking_queue =
......@@ -3941,7 +3910,7 @@ threadLazyBlackHole(StgTSO *tso)
{
StgClosure *frame;
StgRetInfoTable *info;
StgBlockingQueue *bh;
StgClosure *bh;
StgPtr stack_end;
stack_end = &tso->stack[tso->stack_size];
......@@ -3954,7 +3923,7 @@ threadLazyBlackHole(StgTSO *tso)
switch (info->i.type) {
case UPDATE_FRAME:
bh = (StgBlockingQueue *)((StgUpdateFrame *)frame)->updatee;
bh = ((StgUpdateFrame *)frame)->updatee;
/* if the thunk is already blackholed, it means we've also
* already blackholed the rest of the thunks on this stack,
......@@ -3967,8 +3936,7 @@ threadLazyBlackHole(StgTSO *tso)
return;
}
if (bh->header.info != &stg_BLACKHOLE_BQ_info &&
bh->header.info != &stg_CAF_BLACKHOLE_info) {
if (bh->header.info != &stg_CAF_BLACKHOLE_info) {
#if (!defined(LAZY_BLACKHOLING)) && defined(DEBUG)
debugBelch("Unexpected lazy BHing required at 0x%04x",(int)bh);
#endif
......@@ -4072,7 +4040,6 @@ threadSqueezeStack(StgTSO *tso)
* screw us up if we don't check.
*/
if (upd->updatee != updatee && !closure_IND(upd->updatee)) {
// this wakes the threads up
UPD_IND_NOLOCK(upd->updatee, updatee);
}
......@@ -4090,11 +4057,10 @@ threadSqueezeStack(StgTSO *tso)
// single update frame, or the topmost update frame in a series
else {
StgBlockingQueue *bh = (StgBlockingQueue *)upd->updatee;
StgClosure *bh = upd->updatee;
// Do lazy black-holing
if (bh->header.info != &stg_BLACKHOLE_info &&
bh->header.info != &stg_BLACKHOLE_BQ_info &&
bh->header.info != &stg_CAF_BLACKHOLE_info) {
#if (!defined(LAZY_BLACKHOLING)) && defined(DEBUG)
debugBelch("Unexpected lazy BHing required at 0x%04x",(int)bh);
......
......@@ -10,6 +10,7 @@
#include "Rts.h"
#include "RtsUtils.h"
#include "RtsFlags.h"
#include "OSThreads.h"
#include "Storage.h"
#include "BlockAlloc.h"
#include "MBlock.h"
......@@ -548,7 +549,6 @@ thread_obj (StgInfoTable *info, StgPtr p)
case SE_CAF_BLACKHOLE:
case SE_BLACKHOLE:
case BLACKHOLE:
case BLACKHOLE_BQ:
{
StgPtr end;
......
......@@ -109,18 +109,6 @@ __stg_gc_enter_1
GC_GENERIC
}
#ifdef SMP
stg_gc_enter_1_hponly
{
Sp_adj(-1);
Sp(0) = R1;
R1 = HeapOverflow;
SAVE_THREAD_STATE();
TSO_what_next(CurrentTSO) = ThreadRunGHC::I16;
jump StgReturn;
}
#endif
#if defined(GRAN)
/*
ToDo: merge the block and yield macros, calling something like BLOCK(N)
......
......@@ -180,7 +180,6 @@ processHeapClosureForDead( StgClosure *c )
case FUN_1_1:
case FUN_0_2:
case BLACKHOLE_BQ:
case BLACKHOLE:
case SE_BLACKHOLE:
case CAF_BLACKHOLE:
......
......@@ -528,7 +528,6 @@ typedef struct _RtsSymbolVal {
SymX(stable_ptr_table) \
SymX(stackOverflow) \
SymX(stg_CAF_BLACKHOLE_info) \
SymX(stg_BLACKHOLE_BQ_info) \
SymX(awakenBlockedQueue) \
SymX(stg_CHARLIKE_closure) \
SymX(stg_EMPTY_MVAR_info) \
......
......@@ -290,21 +290,13 @@ printClosure( StgClosure *obj )
}
case CAF_BLACKHOLE:
debugBelch("CAF_BH(");
printPtr((StgPtr)stgCast(StgBlockingQueue*,obj)->blocking_queue);
debugBelch(")\n");
debugBelch("CAF_BH");
break;
case BLACKHOLE:
debugBelch("BH\n");
break;
case BLACKHOLE_BQ:
debugBelch("BQ(");
printPtr((StgPtr)stgCast(StgBlockingQueue*,obj)->blocking_queue);
debugBelch(")\n");
break;
case SE_BLACKHOLE:
debugBelch("SE_BH\n");
break;
......
......@@ -150,7 +150,6 @@ static char *type_names[] = {
, "STOP_FRAME"
, "BLACKHOLE"
, "BLACKHOLE_BQ"
, "MVAR"
, "ARR_WORDS"
......@@ -878,7 +877,6 @@ heapCensusChain( Census *census, bdescr *bd )
case SE_CAF_BLACKHOLE:
case SE_BLACKHOLE:
case BLACKHOLE:
case BLACKHOLE_BQ:
case CONSTR_INTLIKE:
case CONSTR_CHARLIKE:
case FUN_1_0:
......
......@@ -466,11 +466,6 @@ push( StgClosure *c, retainer c_child_r, StgClosure **first_child )
case MUT_VAR:
*first_child = ((StgMutVar *)c)->var;
return;
case BLACKHOLE_BQ:
// blocking_queue must be TSO and the head of a linked list of TSOs.
// Shoule it be a child? Seems to be yes.
*first_child = (StgClosure *)((StgBlockingQueue *)c)->blocking_queue;
return;