Add a write barrier to the TSO link field (#1589)

parent 9de1ad50
......@@ -544,9 +544,6 @@
#define END_TSO_QUEUE stg_END_TSO_QUEUE_closure
#define END_INVARIANT_CHECK_QUEUE stg_END_INVARIANT_CHECK_QUEUE_closure
#define dirtyTSO(tso) \
StgTSO_flags(tso) = StgTSO_flags(tso) | TSO_DIRTY::I32;
#define recordMutableCap(p, gen, regs) \
W_ __bd; \
W_ mut_list; \
......
......@@ -260,6 +260,11 @@
#define TSO_INTERRUPTIBLE 8
#define TSO_STOPPED_ON_BREAKPOINT 16
/*
* TSO_LINK_DIRTY is set when a TSO's link field is modified
*/
#define TSO_LINK_DIRTY 32
/* -----------------------------------------------------------------------------
RET_DYN stack frames
-------------------------------------------------------------------------- */
......
......@@ -126,6 +126,4 @@ extern void revertCAFs( void );
extern void dirty_MUT_VAR(StgRegTable *reg, StgClosure *p);
extern void dirty_MVAR(StgRegTable *reg, StgClosure *p);
extern void dirty_TSO(StgClosure *tso);
#endif /* RTSEXTERNAL_H */
......@@ -124,7 +124,21 @@ typedef union {
typedef struct StgTSO_ {
StgHeader header;
struct StgTSO_* link; /* Links threads onto blocking queues */
/* The link field, for linking threads together in lists (e.g. the
run queue on a Capability.
*/
struct StgTSO_* _link;
/*
NOTE!!! do not modify _link directly, it is subject to
a write barrier for generational GC. Instead use the
setTSOLink() function. Exceptions to this rule are:
* setting the link field to END_TSO_QUEUE
* putting a TSO on the blackhole_queue
* setting the link field of the currently running TSO, as it
will already be dirty.
*/
struct StgTSO_* global_link; /* Links all threads together */
StgWord16 what_next; /* Values defined in Constants.h */
......@@ -171,6 +185,13 @@ typedef struct StgTSO_ {
StgWord stack[FLEXIBLE_ARRAY];
} StgTSO;
/* -----------------------------------------------------------------------------
functions
-------------------------------------------------------------------------- */
extern void dirty_TSO (Capability *cap, StgTSO *tso);
extern void setTSOLink (Capability *cap, StgTSO *tso, StgTSO *target);
/* -----------------------------------------------------------------------------
Invariants:
......
......@@ -275,7 +275,7 @@ main(int argc, char *argv[])
closure_field(StgArrWords, words);
closure_payload(StgArrWords, payload);
closure_field(StgTSO, link);
closure_field(StgTSO, _link);
closure_field(StgTSO, global_link);
closure_field(StgTSO, what_next);
closure_field(StgTSO, why_blocked);
......
......@@ -1561,9 +1561,10 @@ takeMVarzh_fast
if (StgMVar_head(mvar) == stg_END_TSO_QUEUE_closure) {
StgMVar_head(mvar) = CurrentTSO;
} else {
StgTSO_link(StgMVar_tail(mvar)) = CurrentTSO;
foreign "C" setTSOLink(MyCapability() "ptr", StgMVar_tail(mvar),
CurrentTSO);
}
StgTSO_link(CurrentTSO) = stg_END_TSO_QUEUE_closure;
StgTSO__link(CurrentTSO) = stg_END_TSO_QUEUE_closure;
StgTSO_why_blocked(CurrentTSO) = BlockedOnMVar::I16;
StgTSO_block_info(CurrentTSO) = mvar;
StgMVar_tail(mvar) = CurrentTSO;
......@@ -1584,15 +1585,18 @@ takeMVarzh_fast
/* actually perform the putMVar for the thread that we just woke up */
tso = StgMVar_head(mvar);
PerformPut(tso,StgMVar_value(mvar));
dirtyTSO(tso);
if (StgTSO_flags(tso) & TSO_DIRTY == 0) {
foreign "C" dirty_TSO(MyCapability(), tso);
}
#if defined(GRAN) || defined(PAR)
/* ToDo: check 2nd arg (mvar) is right */
("ptr" tso) = foreign "C" unblockOne(StgMVar_head(mvar),mvar) [];
StgMVar_head(mvar) = tso;
#else
("ptr" tso) = foreign "C" unblockOne(MyCapability() "ptr",
StgMVar_head(mvar) "ptr") [];
("ptr" tso) = foreign "C" unblockOne_(MyCapability() "ptr",
StgMVar_head(mvar) "ptr", 1) [];
StgMVar_head(mvar) = tso;
#endif
......@@ -1664,15 +1668,17 @@ tryTakeMVarzh_fast
/* actually perform the putMVar for the thread that we just woke up */
tso = StgMVar_head(mvar);
PerformPut(tso,StgMVar_value(mvar));
dirtyTSO(tso);
if (StgTSO_flags(tso) & TSO_DIRTY == 0) {
foreign "C" dirty_TSO(MyCapability(), tso);
}
#if defined(GRAN) || defined(PAR)
/* ToDo: check 2nd arg (mvar) is right */
("ptr" tso) = foreign "C" unblockOne(StgMVar_head(mvar) "ptr", mvar "ptr") [];
StgMVar_head(mvar) = tso;
#else
("ptr" tso) = foreign "C" unblockOne(MyCapability() "ptr",
StgMVar_head(mvar) "ptr") [];
("ptr" tso) = foreign "C" unblockOne_(MyCapability() "ptr",
StgMVar_head(mvar) "ptr", 1) [];
StgMVar_head(mvar) = tso;
#endif
......@@ -1721,9 +1727,10 @@ putMVarzh_fast
if (StgMVar_head(mvar) == stg_END_TSO_QUEUE_closure) {
StgMVar_head(mvar) = CurrentTSO;
} else {
StgTSO_link(StgMVar_tail(mvar)) = CurrentTSO;
foreign "C" setTSOLink(MyCapability() "ptr", StgMVar_tail(mvar),
CurrentTSO);
}
StgTSO_link(CurrentTSO) = stg_END_TSO_QUEUE_closure;
StgTSO__link(CurrentTSO) = stg_END_TSO_QUEUE_closure;
StgTSO_why_blocked(CurrentTSO) = BlockedOnMVar::I16;
StgTSO_block_info(CurrentTSO) = mvar;
StgMVar_tail(mvar) = CurrentTSO;
......@@ -1740,14 +1747,17 @@ putMVarzh_fast
/* actually perform the takeMVar */
tso = StgMVar_head(mvar);
PerformTake(tso, R2);
dirtyTSO(tso);
if (StgTSO_flags(tso) & TSO_DIRTY == 0) {
foreign "C" dirty_TSO(MyCapability(), tso);
}
#if defined(GRAN) || defined(PAR)
/* ToDo: check 2nd arg (mvar) is right */
("ptr" tso) = foreign "C" unblockOne(MyCapability() "ptr", StgMVar_head(mvar) "ptr",mvar "ptr") [];
StgMVar_head(mvar) = tso;
#else
("ptr" tso) = foreign "C" unblockOne(MyCapability() "ptr", StgMVar_head(mvar) "ptr") [];
("ptr" tso) = foreign "C" unblockOne_(MyCapability() "ptr",
StgMVar_head(mvar) "ptr", 1) [];
StgMVar_head(mvar) = tso;
#endif
......@@ -1812,14 +1822,17 @@ tryPutMVarzh_fast
/* actually perform the takeMVar */
tso = StgMVar_head(mvar);
PerformTake(tso, R2);
dirtyTSO(tso);
if (StgTSO_flags(tso) & TSO_DIRTY == 0) {
foreign "C" dirty_TSO(MyCapability(), tso);
}
#if defined(GRAN) || defined(PAR)
/* ToDo: check 2nd arg (mvar) is right */
("ptr" tso) = foreign "C" unblockOne(MyCapability() "ptr", StgMVar_head(mvar) "ptr",mvar "ptr") [];
StgMVar_head(mvar) = tso;
#else
("ptr" tso) = foreign "C" unblockOne(MyCapability() "ptr", StgMVar_head(mvar) "ptr") [];
("ptr" tso) = foreign "C" unblockOne_(MyCapability() "ptr",
StgMVar_head(mvar) "ptr", 1) [];
StgMVar_head(mvar) = tso;
#endif
......@@ -2037,11 +2050,11 @@ for2:
* macro in Schedule.h).
*/
#define APPEND_TO_BLOCKED_QUEUE(tso) \
ASSERT(StgTSO_link(tso) == END_TSO_QUEUE); \
ASSERT(StgTSO__link(tso) == END_TSO_QUEUE); \
if (W_[blocked_queue_hd] == END_TSO_QUEUE) { \
W_[blocked_queue_hd] = tso; \
} else { \
StgTSO_link(W_[blocked_queue_tl]) = tso; \
foreign "C" setTSOLink(MyCapability() "ptr", W_[blocked_queue_tl], tso); \
} \
W_[blocked_queue_tl] = tso;
......@@ -2137,15 +2150,15 @@ delayzh_fast
while:
if (t != END_TSO_QUEUE && StgTSO_block_info(t) < target) {
prev = t;
t = StgTSO_link(t);
t = StgTSO__link(t);
goto while;
}
StgTSO_link(CurrentTSO) = t;
StgTSO__link(CurrentTSO) = t;
if (prev == NULL) {
W_[sleeping_queue] = CurrentTSO;
} else {
StgTSO_link(prev) = CurrentTSO;
foreign "C" setTSOLink(MyCapability() "ptr", prev, CurrentTSO) [];
}
jump stg_block_noregs;
#endif
......
......@@ -30,7 +30,7 @@ static void raiseAsync (Capability *cap,
static void removeFromQueues(Capability *cap, StgTSO *tso);
static void blockedThrowTo (StgTSO *source, StgTSO *target);
static void blockedThrowTo (Capability *cap, StgTSO *source, StgTSO *target);
static void performBlockedException (Capability *cap,
StgTSO *source, StgTSO *target);
......@@ -152,7 +152,7 @@ throwTo (Capability *cap, // the Capability we hold
// follow ThreadRelocated links in the target first
while (target->what_next == ThreadRelocated) {
target = target->link;
target = target->_link;
// No, it might be a WHITEHOLE:
// ASSERT(get_itbl(target)->type == TSO);
}
......@@ -261,10 +261,10 @@ check_target:
// just moved this TSO.
if (target->what_next == ThreadRelocated) {
unlockTSO(target);
target = target->link;
target = target->_link;
goto retry;
}
blockedThrowTo(source,target);
blockedThrowTo(cap,source,target);
*out = target;
return THROWTO_BLOCKED;
}
......@@ -294,7 +294,7 @@ check_target:
info = lockClosure((StgClosure *)mvar);
if (target->what_next == ThreadRelocated) {
target = target->link;
target = target->_link;
unlockClosure((StgClosure *)mvar,info);
goto retry;
}
......@@ -309,12 +309,12 @@ check_target:
if ((target->flags & TSO_BLOCKEX) &&
((target->flags & TSO_INTERRUPTIBLE) == 0)) {
lockClosure((StgClosure *)target);
blockedThrowTo(source,target);
blockedThrowTo(cap,source,target);
unlockClosure((StgClosure *)mvar, info);
*out = target;
return THROWTO_BLOCKED; // caller releases TSO
} else {
removeThreadFromMVarQueue(mvar, target);
removeThreadFromMVarQueue(cap, mvar, target);
raiseAsync(cap, target, exception, rtsFalse, NULL);
unblockOne(cap, target);
unlockClosure((StgClosure *)mvar, info);
......@@ -333,12 +333,12 @@ check_target:
if (target->flags & TSO_BLOCKEX) {
lockTSO(target);
blockedThrowTo(source,target);
blockedThrowTo(cap,source,target);
RELEASE_LOCK(&sched_mutex);
*out = target;
return THROWTO_BLOCKED; // caller releases TSO
} else {
removeThreadFromQueue(&blackhole_queue, target);
removeThreadFromQueue(cap, &blackhole_queue, target);
raiseAsync(cap, target, exception, rtsFalse, NULL);
unblockOne(cap, target);
RELEASE_LOCK(&sched_mutex);
......@@ -373,12 +373,12 @@ check_target:
goto retry;
}
if (target->what_next == ThreadRelocated) {
target = target->link;
target = target->_link;
unlockTSO(target2);
goto retry;
}
if (target2->what_next == ThreadRelocated) {
target->block_info.tso = target2->link;
target->block_info.tso = target2->_link;
unlockTSO(target2);
goto retry;
}
......@@ -397,12 +397,12 @@ check_target:
if ((target->flags & TSO_BLOCKEX) &&
((target->flags & TSO_INTERRUPTIBLE) == 0)) {
lockTSO(target);
blockedThrowTo(source,target);
blockedThrowTo(cap,source,target);
unlockTSO(target2);
*out = target;
return THROWTO_BLOCKED;
} else {
removeThreadFromQueue(&target2->blocked_exceptions, target);
removeThreadFromQueue(cap, &target2->blocked_exceptions, target);
raiseAsync(cap, target, exception, rtsFalse, NULL);
unblockOne(cap, target);
unlockTSO(target2);
......@@ -419,7 +419,7 @@ check_target:
}
if ((target->flags & TSO_BLOCKEX) &&
((target->flags & TSO_INTERRUPTIBLE) == 0)) {
blockedThrowTo(source,target);
blockedThrowTo(cap,source,target);
*out = target;
return THROWTO_BLOCKED;
} else {
......@@ -436,7 +436,7 @@ check_target:
// thread is blocking exceptions, and block on its
// blocked_exception queue.
lockTSO(target);
blockedThrowTo(source,target);
blockedThrowTo(cap,source,target);
*out = target;
return THROWTO_BLOCKED;
......@@ -449,7 +449,7 @@ check_target:
#endif
if ((target->flags & TSO_BLOCKEX) &&
((target->flags & TSO_INTERRUPTIBLE) == 0)) {
blockedThrowTo(source,target);
blockedThrowTo(cap,source,target);
return THROWTO_BLOCKED;
} else {
removeFromQueues(cap,target);
......@@ -469,12 +469,12 @@ check_target:
// complex to achieve as there's no single lock on a TSO; see
// throwTo()).
static void
blockedThrowTo (StgTSO *source, StgTSO *target)
blockedThrowTo (Capability *cap, StgTSO *source, StgTSO *target)
{
debugTrace(DEBUG_sched, "throwTo: blocking on thread %lu", (unsigned long)target->id);
source->link = target->blocked_exceptions;
setTSOLink(cap, source, target->blocked_exceptions);
target->blocked_exceptions = source;
dirtyTSO(target); // we modified the blocked_exceptions queue
dirty_TSO(cap,target); // we modified the blocked_exceptions queue
source->block_info.tso = target;
write_barrier(); // throwTo_exception *must* be visible if BlockedOnException is.
......@@ -748,11 +748,11 @@ removeFromQueues(Capability *cap, StgTSO *tso)
goto done;
case BlockedOnMVar:
removeThreadFromMVarQueue((StgMVar *)tso->block_info.closure, tso);
removeThreadFromMVarQueue(cap, (StgMVar *)tso->block_info.closure, tso);
goto done;
case BlockedOnBlackHole:
removeThreadFromQueue(&blackhole_queue, tso);
removeThreadFromQueue(cap, &blackhole_queue, tso);
goto done;
case BlockedOnException:
......@@ -765,10 +765,10 @@ removeFromQueues(Capability *cap, StgTSO *tso)
// ASSERT(get_itbl(target)->type == TSO);
while (target->what_next == ThreadRelocated) {
target = target->link;
target = target->_link;
}
removeThreadFromQueue(&target->blocked_exceptions, tso);
removeThreadFromQueue(cap, &target->blocked_exceptions, tso);
goto done;
}
......@@ -778,7 +778,7 @@ removeFromQueues(Capability *cap, StgTSO *tso)
#if defined(mingw32_HOST_OS)
case BlockedOnDoProc:
#endif
removeThreadFromDeQueue(&blocked_queue_hd, &blocked_queue_tl, tso);
removeThreadFromDeQueue(cap, &blocked_queue_hd, &blocked_queue_tl, tso);
#if defined(mingw32_HOST_OS)
/* (Cooperatively) signal that the worker thread should abort
* the request.
......@@ -788,7 +788,7 @@ removeFromQueues(Capability *cap, StgTSO *tso)
goto done;
case BlockedOnDelay:
removeThreadFromQueue(&sleeping_queue, tso);
removeThreadFromQueue(cap, &sleeping_queue, tso);
goto done;
#endif
......@@ -797,7 +797,7 @@ removeFromQueues(Capability *cap, StgTSO *tso)
}
done:
tso->link = END_TSO_QUEUE;
tso->_link = END_TSO_QUEUE; // no write barrier reqd
tso->why_blocked = NotBlocked;
tso->block_info.closure = NULL;
appendToRunQueue(cap,tso);
......@@ -871,7 +871,7 @@ raiseAsync(Capability *cap, StgTSO *tso, StgClosure *exception,
#endif
// mark it dirty; we're about to change its stack.
dirtyTSO(tso);
dirty_TSO(cap, tso);
sp = tso->sp;
......
......@@ -1635,7 +1635,7 @@ inner_loop:
#ifdef DEBUG_RETAINER
debugBelch("ThreadRelocated encountered in retainClosure()\n");
#endif
c = (StgClosure *)((StgTSO *)c)->link;
c = (StgClosure *)((StgTSO *)c)->_link;
goto inner_loop;
}
break;
......
......@@ -652,7 +652,7 @@ checkTSO(StgTSO *tso)
StgPtr stack_end = stack + stack_size;
if (tso->what_next == ThreadRelocated) {
checkTSO(tso->link);
checkTSO(tso->_link);
return;
}
......
......@@ -592,7 +592,7 @@ run_thread:
cap->in_haskell = rtsTrue;
dirtyTSO(t);
dirty_TSO(cap,t);
#if defined(THREADED_RTS)
if (recent_activity == ACTIVITY_DONE_GC) {
......@@ -768,7 +768,7 @@ schedulePushWork(Capability *cap USED_IF_THREADS,
// Check whether we have more threads on our run queue, or sparks
// in our pool, that we could hand to another Capability.
if ((emptyRunQueue(cap) || cap->run_queue_hd->link == END_TSO_QUEUE)
if ((emptyRunQueue(cap) || cap->run_queue_hd->_link == END_TSO_QUEUE)
&& sparkPoolSizeCap(cap) < 2) {
return;
}
......@@ -809,21 +809,21 @@ schedulePushWork(Capability *cap USED_IF_THREADS,
if (cap->run_queue_hd != END_TSO_QUEUE) {
prev = cap->run_queue_hd;
t = prev->link;
prev->link = END_TSO_QUEUE;
t = prev->_link;
prev->_link = END_TSO_QUEUE;
for (; t != END_TSO_QUEUE; t = next) {
next = t->link;
t->link = END_TSO_QUEUE;
next = t->_link;
t->_link = END_TSO_QUEUE;
if (t->what_next == ThreadRelocated
|| t->bound == task // don't move my bound thread
|| tsoLocked(t)) { // don't move a locked thread
prev->link = t;
setTSOLink(cap, prev, t);
prev = t;
} else if (i == n_free_caps) {
pushed_to_all = rtsTrue;
i = 0;
// keep one for us
prev->link = t;
setTSOLink(cap, prev, t);
prev = t;
} else {
debugTrace(DEBUG_sched, "pushing thread %lu to capability %d", (unsigned long)t->id, free_caps[i]->no);
......@@ -919,7 +919,7 @@ scheduleCheckWakeupThreads(Capability *cap USED_IF_THREADS)
cap->run_queue_hd = cap->wakeup_queue_hd;
cap->run_queue_tl = cap->wakeup_queue_tl;
} else {
cap->run_queue_tl->link = cap->wakeup_queue_hd;
setTSOLink(cap, cap->run_queue_tl, cap->wakeup_queue_hd);
cap->run_queue_tl = cap->wakeup_queue_tl;
}
cap->wakeup_queue_hd = cap->wakeup_queue_tl = END_TSO_QUEUE;
......@@ -1711,7 +1711,7 @@ scheduleHandleYield( Capability *cap, StgTSO *t, nat prev_what_next )
IF_DEBUG(sanity,
//debugBelch("&& Doing sanity check on yielding TSO %ld.", t->id);
checkTSO(t));
ASSERT(t->link == END_TSO_QUEUE);
ASSERT(t->_link == END_TSO_QUEUE);
// Shortcut if we're just switching evaluators: don't bother
// doing stack squeezing (which can be expensive), just run the
......@@ -2019,7 +2019,7 @@ scheduleDoGC (Capability *cap, Task *task USED_IF_THREADS, rtsBool force_major)
for (t = all_threads; t != END_TSO_QUEUE; t = next) {
if (t->what_next == ThreadRelocated) {
next = t->link;
next = t->_link;
} else {
next = t->global_link;
......@@ -2182,7 +2182,7 @@ forkProcess(HsStablePtr *entry
for (t = all_threads; t != END_TSO_QUEUE; t = next) {
if (t->what_next == ThreadRelocated) {
next = t->link;
next = t->_link;
} else {
next = t->global_link;
// don't allow threads to catch the ThreadKilled
......@@ -2258,7 +2258,7 @@ deleteAllThreads ( Capability *cap )
debugTrace(DEBUG_sched,"deleting all threads");
for (t = all_threads; t != END_TSO_QUEUE; t = next) {
if (t->what_next == ThreadRelocated) {
next = t->link;
next = t->_link;
} else {
next = t->global_link;
deleteThread(cap,t);
......@@ -2417,7 +2417,7 @@ resumeThread (void *task_)
tso = task->suspended_tso;
task->suspended_tso = NULL;
tso->link = END_TSO_QUEUE;
tso->_link = END_TSO_QUEUE; // no write barrier reqd
debugTrace(DEBUG_sched, "thread %lu: re-entering RTS", (unsigned long)tso->id);
if (tso->why_blocked == BlockedOnCCall) {
......@@ -2436,7 +2436,7 @@ resumeThread (void *task_)
#endif
/* We might have GC'd, mark the TSO dirty again */
dirtyTSO(tso);
dirty_TSO(cap,tso);
IF_DEBUG(sanity, checkTSO(tso));
......@@ -2786,7 +2786,7 @@ threadStackOverflow(Capability *cap, StgTSO *tso)
* dead TSO's stack.
*/
tso->what_next = ThreadRelocated;
tso->link = dest;
setTSOLink(cap,tso,dest);
tso->sp = (P_)&(tso->stack[tso->stack_size]);
tso->why_blocked = NotBlocked;
......@@ -2934,8 +2934,8 @@ checkBlackHoles (Capability *cap)
*prev = t;
any_woke_up = rtsTrue;
} else {
prev = &t->link;
t = t->link;
prev = &t->_link;
t = t->_link;
}
}
......
......@@ -186,11 +186,11 @@ void print_bqe (StgBlockingQueueElement *bqe);
INLINE_HEADER void
appendToRunQueue (Capability *cap, StgTSO *tso)
{
ASSERT(tso->link == END_TSO_QUEUE);
ASSERT(tso->_link == END_TSO_QUEUE);
if (cap->run_queue_hd == END_TSO_QUEUE) {
cap->run_queue_hd = tso;
} else {
cap->run_queue_tl->link = tso;
setTSOLink(cap, cap->run_queue_tl, tso);
}
cap->run_queue_tl = tso;
}
......@@ -202,7 +202,7 @@ appendToRunQueue (Capability *cap, StgTSO *tso)
INLINE_HEADER void
pushOnRunQueue (Capability *cap, StgTSO *tso)
{
tso->link = cap->run_queue_hd;
setTSOLink(cap, tso, cap->run_queue_hd);
cap->run_queue_hd = tso;
if (cap->run_queue_tl == END_TSO_QUEUE) {
cap->run_queue_tl = tso;
......@@ -216,8 +216,8 @@ popRunQueue (Capability *cap)
{
StgTSO *t = cap->run_queue_hd;
ASSERT(t != END_TSO_QUEUE);
cap->run_queue_hd = t->link;
t->link = END_TSO_QUEUE;
cap->run_queue_hd = t->_link;
t->_link = END_TSO_QUEUE; // no write barrier req'd
if (cap->run_queue_hd == END_TSO_QUEUE) {
cap->run_queue_tl = END_TSO_QUEUE;
}
......@@ -230,11 +230,11 @@ popRunQueue (Capability *cap)
INLINE_HEADER void
appendToBlockedQueue(StgTSO *tso)
{
ASSERT(tso->link == END_TSO_QUEUE);
ASSERT(tso->_link == END_TSO_QUEUE);
if (blocked_queue_hd == END_TSO_QUEUE) {
blocked_queue_hd = tso;
} else {
blocked_queue_tl->link = tso;
setTSOLink(&MainCapability, blocked_queue_tl, tso);
}
blocked_queue_tl = tso;
}
......@@ -244,11 +244,11 @@ appendToBlockedQueue(StgTSO *tso)
INLINE_HEADER void
appendToWakeupQueue (Capability *cap, StgTSO *tso)
{
ASSERT(tso->link == END_TSO_QUEUE);
ASSERT(tso->_link == END_TSO_QUEUE);
if (cap->wakeup_queue_hd == END_TSO_QUEUE) {
cap->wakeup_queue_hd = tso;
} else {
cap->wakeup_queue_tl->link = tso;
setTSOLink(cap, cap->wakeup_queue_tl, tso);
}
cap->wakeup_queue_tl = tso;
}
......@@ -293,11 +293,5 @@ emptyThreadQueues(Capability *cap)
#endif /* !IN_STG_CODE */
INLINE_HEADER void
dirtyTSO (StgTSO *tso)
{
tso->flags |= TSO_DIRTY;
}
#endif /* SCHEDULE_H */
......@@ -309,7 +309,7 @@ INFO_TABLE(stg_BLACKHOLE,0,1,BLACKHOLE,"BLACKHOLE","BLACKHOLE")
#endif
/* Put ourselves on the blackhole queue */
StgTSO_link(CurrentTSO) = W_[blackhole_queue];
StgTSO__link(CurrentTSO) = W_[blackhole_queue];
W_[blackhole_queue] = CurrentTSO;