Commit 5a4ec937 authored by simonmar's avatar simonmar

[project @ 2000-08-25 13:12:07 by simonmar]

Change the way threadDelay# is implemented.

We now use a list of sleeping threads sorted in increasing order by
the time at which they will wake up.  This avoids us having to
traverse the entire queue on each context switch.
parent 944c6afd
/* -----------------------------------------------------------------------------
* $Id: TSO.h,v 1.17 2000/08/15 14:18:43 simonmar Exp $
* $Id: TSO.h,v 1.18 2000/08/25 13:12:07 simonmar Exp $
*
* (c) The GHC Team, 1998-1999
*
......@@ -134,11 +134,7 @@ typedef union {
StgClosure *closure;
struct StgTSO_ *tso;
int fd;
#if defined(HAVE_SETITIMER) || defined(mingw32_TARGET_OS)
unsigned int delay;
#else
unsigned int target;
#endif
} StgTSOBlockInfo;
/*
......
/* -----------------------------------------------------------------------------
* $Id: Itimer.c,v 1.16 2000/08/03 11:28:35 simonmar Exp $
* $Id: Itimer.c,v 1.17 2000/08/25 13:12:07 simonmar Exp $
*
* (c) The GHC Team, 1995-1999
*
......@@ -80,8 +80,10 @@ handle_tick(int unused STG_UNUSED)
handleProfTick();
#endif
/* For threadDelay etc., see Select.c */
ticks_since_select++;
/* so we can get a rough indication of the current time at any point
* without having to call gettimeofday() (see Select.c):
*/
ticks_since_timestamp++;
ticks_to_ctxt_switch--;
if (ticks_to_ctxt_switch <= 0) {
......@@ -156,6 +158,9 @@ initialize_virtual_timer(nat ms)
# else
struct itimerval it;
timestamp = getourtimeofday();
ticks_since_timestamp = 0;
it.it_value.tv_sec = ms / 1000;
it.it_value.tv_usec = 1000 * (ms - (1000 * it.it_value.tv_sec));
it.it_interval = it.it_value;
......@@ -174,6 +179,9 @@ initialize_virtual_timer(nat ms)
struct itimerspec it;
timer_t tid;
timestamp = getourtimeofday();
ticks_since_timestamp = 0;
se.sigev_notify = SIGEV_SIGNAL;
se.sigev_signo = SIGVTALRM;
se.sigev_value.sival_int = SIGVTALRM;
......@@ -232,12 +240,14 @@ unblock_vtalrm_signal(void)
}
#endif
#if !defined(HAVE_SETITIMER) && !defined(mingw32_TARGET_OS)
/* gettimeofday() takes around 1us on our 500MHz PIII. Since we're
* only calling it 50 times/s, it shouldn't have any great impact.
*/
unsigned int
getourtimeofday(void)
{
struct timeval tv;
gettimeofday(&tv, (struct timezone *) NULL);
return (tv.tv_sec * 1000000 + tv.tv_usec);
return (tv.tv_sec * TICK_FREQUENCY +
tv.tv_usec * TICK_FREQUENCY / 1000000);
}
#endif
/* -----------------------------------------------------------------------------
* $Id: Itimer.h,v 1.6 2000/08/03 11:28:35 simonmar Exp $
* $Id: Itimer.h,v 1.7 2000/08/25 13:12:07 simonmar Exp $
*
* (c) The GHC Team 1998-1999
*
......@@ -17,6 +17,9 @@
extern rtsBool do_prof_ticks; /* profiling ticks on/off */
/* Total number of ticks since startup */
extern lnat total_ticks;
nat initialize_virtual_timer ( nat ms );
int install_vtalrm_handler ( void );
void block_vtalrm_signal ( void );
......
/* -----------------------------------------------------------------------------
* $Id: PrimOps.hc,v 1.53 2000/08/07 23:37:23 qrczak Exp $
* $Id: PrimOps.hc,v 1.54 2000/08/25 13:12:07 simonmar Exp $
*
* (c) The GHC Team, 1998-2000
*
......@@ -1049,6 +1049,8 @@ FN_(waitWritezh_fast)
FN_(delayzh_fast)
{
StgTSO *t, *prev;
nat target;
FB_
/* args: R1.i */
ASSERT(CurrentTSO->why_blocked == NotBlocked);
......@@ -1056,20 +1058,26 @@ FN_(delayzh_fast)
ACQUIRE_LOCK(&sched_mutex);
/* Add on ticks_since_select, since these will be subtracted at
* the next awaitEvent call.
*/
#if defined(HAVE_SETITIMER) || defined(mingw32_TARGET_OS)
CurrentTSO->block_info.delay = R1.i + ticks_since_select;
#else
CurrentTSO->block_info.target = R1.i + getourtimeofday();
#endif
target = (R1.i / (TICK_MILLISECS*1000)) + timestamp + ticks_since_timestamp;
CurrentTSO->block_info.target = target;
APPEND_TO_BLOCKED_QUEUE(CurrentTSO);
/* Insert the new thread in the sleeping queue. */
prev = NULL;
t = sleeping_queue;
while (t != END_TSO_QUEUE && t->block_info.target < target) {
prev = t;
t = t->link;
}
CurrentTSO->link = t;
if (prev == NULL) {
sleeping_queue = CurrentTSO;
} else {
prev->link = CurrentTSO;
}
RELEASE_LOCK(&sched_mutex);
JMP_(stg_block_noregs);
FE_
}
/* ---------------------------------------------------------------------------
* $Id: Schedule.c,v 1.77 2000/08/23 12:51:03 simonmar Exp $
* $Id: Schedule.c,v 1.78 2000/08/25 13:12:07 simonmar Exp $
*
* (c) The GHC Team, 1998-2000
*
......@@ -144,6 +144,7 @@ StgTSO *ccalling_threadss[MAX_PROC];
StgTSO *run_queue_hd, *run_queue_tl;
StgTSO *blocked_queue_hd, *blocked_queue_tl;
StgTSO *sleeping_queue; /* perhaps replace with a hash table? */
#endif
......@@ -379,14 +380,7 @@ schedule( void )
*/
if (interrupted) {
IF_DEBUG(scheduler, sched_belch("interrupted"));
for (t = run_queue_hd; t != END_TSO_QUEUE; t = t->link) {
deleteThread(t);
}
for (t = blocked_queue_hd; t != END_TSO_QUEUE; t = t->link) {
deleteThread(t);
}
run_queue_hd = run_queue_tl = END_TSO_QUEUE;
blocked_queue_hd = blocked_queue_tl = END_TSO_QUEUE;
deleteAllThreads();
interrupted = rtsFalse;
was_interrupted = rtsTrue;
}
......@@ -506,7 +500,7 @@ schedule( void )
* ToDo: what if another client comes along & requests another
* main thread?
*/
if (blocked_queue_hd != END_TSO_QUEUE) {
if (blocked_queue_hd != END_TSO_QUEUE || sleeping_queue != END_TSO_QUEUE) {
awaitEvent(
(run_queue_hd == END_TSO_QUEUE)
#ifdef SMP
......@@ -538,6 +532,7 @@ schedule( void )
#ifdef SMP
if (blocked_queue_hd == END_TSO_QUEUE
&& run_queue_hd == END_TSO_QUEUE
&& sleeping_queue == END_TSO_QUEUE
&& (n_free_capabilities == RtsFlags.ParFlags.nNodes))
{
IF_DEBUG(scheduler, sched_belch("deadlocked, checking for black holes..."));
......@@ -554,7 +549,8 @@ schedule( void )
}
#else /* ! SMP */
if (blocked_queue_hd == END_TSO_QUEUE
&& run_queue_hd == END_TSO_QUEUE)
&& run_queue_hd == END_TSO_QUEUE
&& sleeping_queue == END_TSO_QUEUE)
{
IF_DEBUG(scheduler, sched_belch("deadlocked, checking for black holes..."));
detectBlackHoles();
......@@ -858,7 +854,8 @@ schedule( void )
*/
if (RtsFlags.ConcFlags.ctxtSwitchTicks == 0
&& (run_queue_hd != END_TSO_QUEUE
|| blocked_queue_hd != END_TSO_QUEUE))
|| blocked_queue_hd != END_TSO_QUEUE
|| sleeping_queue != END_TSO_QUEUE))
context_switch = 1;
else
context_switch = 0;
......@@ -1152,19 +1149,29 @@ schedule( void )
} /* end of while(1) */
}
/* A hack for Hugs concurrency support. Needs sanitisation (?) */
/* ---------------------------------------------------------------------------
* deleteAllThreads(): kill all the live threads.
*
* This is used when we catch a user interrupt (^C), before performing
* any necessary cleanups and running finalizers.
* ------------------------------------------------------------------------- */
void deleteAllThreads ( void )
{
StgTSO* t;
IF_DEBUG(scheduler,sched_belch("deleteAllThreads()"));
IF_DEBUG(scheduler,sched_belch("deleting all threads"));
for (t = run_queue_hd; t != END_TSO_QUEUE; t = t->link) {
deleteThread(t);
deleteThread(t);
}
for (t = blocked_queue_hd; t != END_TSO_QUEUE; t = t->link) {
deleteThread(t);
deleteThread(t);
}
for (t = sleeping_queue; t != END_TSO_QUEUE; t = t->link) {
deleteThread(t);
}
run_queue_hd = run_queue_tl = END_TSO_QUEUE;
blocked_queue_hd = blocked_queue_tl = END_TSO_QUEUE;
sleeping_queue = END_TSO_QUEUE;
}
/* startThread and insertThread are now in GranSim.c -- HWL */
......@@ -1582,12 +1589,14 @@ initScheduler(void)
blocked_queue_hds[i] = END_TSO_QUEUE;
blocked_queue_tls[i] = END_TSO_QUEUE;
ccalling_threadss[i] = END_TSO_QUEUE;
sleeping_queue = END_TSO_QUEUE;
}
#else
run_queue_hd = END_TSO_QUEUE;
run_queue_tl = END_TSO_QUEUE;
blocked_queue_hd = END_TSO_QUEUE;
blocked_queue_tl = END_TSO_QUEUE;
sleeping_queue = END_TSO_QUEUE;
#endif
suspended_ccalling_threads = END_TSO_QUEUE;
......@@ -1743,6 +1752,8 @@ howManyThreadsAvail ( void )
i++;
for (q = blocked_queue_hd; q != END_TSO_QUEUE; q = q->link)
i++;
for (q = sleeping_queue; q != END_TSO_QUEUE; q = q->link)
i++;
return i;
}
......@@ -1756,9 +1767,13 @@ finishAllThreads ( void )
while (blocked_queue_hd != END_TSO_QUEUE) {
waitThread ( blocked_queue_hd, NULL );
}
while (sleeping_queue != END_TSO_QUEUE) {
waitThread ( blocked_queue_hd, NULL );
}
} while
(blocked_queue_hd != END_TSO_QUEUE ||
run_queue_hd != END_TSO_QUEUE);
run_queue_hd != END_TSO_QUEUE ||
sleeping_queue != END_TSO_QUEUE);
}
SchedulerStatus
......@@ -1924,6 +1939,7 @@ take_off_run_queue(StgTSO *tso) {
- all the threads on the runnable queue
- all the threads on the blocked queue
- all the threads on the sleeping queue
- all the thread currently executing a _ccall_GC
- all the "main threads"
......@@ -1970,6 +1986,10 @@ static void GetRoots(void)
blocked_queue_hd = (StgTSO *)MarkRoot((StgClosure *)blocked_queue_hd);
blocked_queue_tl = (StgTSO *)MarkRoot((StgClosure *)blocked_queue_tl);
}
if (sleeping_queue != END_TSO_QUEUE) {
sleeping_queue = (StgTSO *)MarkRoot((StgClosure *)sleeping_queue);
}
#endif
for (m = main_threads; m != NULL; m = m->link) {
......@@ -2128,8 +2148,6 @@ threadStackOverflow(StgTSO *tso)
Wake up a queue that was blocked on some resource.
------------------------------------------------------------------------ */
/* ToDo: check push_on_run_queue vs. PUSH_ON_RUN_QUEUE */
#if defined(GRAN)
static inline void
unblockCount ( StgBlockingQueueElement *bqe, StgClosure *node )
......@@ -2500,7 +2518,6 @@ unblockThread(StgTSO *tso)
barf("unblockThread (Exception): TSO not found");
}
case BlockedOnDelay:
case BlockedOnRead:
case BlockedOnWrite:
{
......@@ -2525,6 +2542,23 @@ unblockThread(StgTSO *tso)
barf("unblockThread (I/O): TSO not found");
}
case BlockedOnDelay:
{
StgBlockingQueueElement *prev = NULL;
for (t = (StgBlockingQueueElement *)sleeping_queue; t != END_BQ_QUEUE;
prev = t, t = t->link) {
if (t == (StgBlockingQueueElement *)tso) {
if (prev == NULL) {
sleeping_queue = (StgTSO *)t->link;
} else {
prev->link = t->link;
}
goto done;
}
}
barf("unblockThread (I/O): TSO not found");
}
default:
barf("unblockThread");
}
......@@ -2603,7 +2637,6 @@ unblockThread(StgTSO *tso)
barf("unblockThread (Exception): TSO not found");
}
case BlockedOnDelay:
case BlockedOnRead:
case BlockedOnWrite:
{
......@@ -2628,6 +2661,23 @@ unblockThread(StgTSO *tso)
barf("unblockThread (I/O): TSO not found");
}
case BlockedOnDelay:
{
StgTSO *prev = NULL;
for (t = sleeping_queue; t != END_TSO_QUEUE;
prev = t, t = t->link) {
if (t == tso) {
if (prev == NULL) {
sleeping_queue = t->link;
} else {
prev->link = t->link;
}
goto done;
}
}
barf("unblockThread (I/O): TSO not found");
}
default:
barf("unblockThread");
}
......@@ -2864,7 +2914,7 @@ raiseAsync(StgTSO *tso, StgClosure *exception)
tso->su = (StgUpdateFrame *)(sp+1);
tso->sp = sp;
return;
default:
barf("raiseAsync");
}
......@@ -2986,12 +3036,7 @@ printThreadBlockage(StgTSO *tso)
fprintf(stderr,"blocked on write to fd %d", tso->block_info.fd);
break;
case BlockedOnDelay:
#if defined(HAVE_SETITIMER) || defined(mingw32_TARGET_OS)
fprintf(stderr,"blocked on delay of %d ms", tso->block_info.delay);
#else
fprintf(stderr,"blocked on delay of %d ms",
tso->block_info.target - getourtimeofday());
#endif
fprintf(stderr,"blocked until %d", tso->block_info.target);
break;
case BlockedOnMVar:
fprintf(stderr,"blocked on an MVar");
......
/* -----------------------------------------------------------------------------
* $Id: Schedule.h,v 1.18 2000/04/14 15:18:07 sewardj Exp $
* $Id: Schedule.h,v 1.19 2000/08/25 13:12:07 simonmar Exp $
*
* (c) The GHC Team 1998-1999
*
......@@ -85,6 +85,15 @@ void raiseAsync(StgTSO *tso, StgClosure *exception);
*/
void awaitEvent(rtsBool wait); /* In Select.c */
/* wakeUpSleepingThreads(nat ticks)
*
* Wakes up any sleeping threads whose timers have expired.
*
* Called from STG : NO
* Locks assumed : sched_mutex
*/
rtsBool wakeUpSleepingThreads(nat); /* In Select.c */
// ToDo: check whether all fcts below are used in the SMP version, too
//@cindex awaken_blocked_queue
#if defined(GRAN)
......@@ -112,7 +121,9 @@ void initThread(StgTSO *tso, nat stack_size);
extern nat context_switch;
extern rtsBool interrupted;
extern nat ticks_since_select;
/* In Select.c */
extern nat timestamp;
extern nat ticks_since_timestamp;
//@cindex Capability
/* Capability type
......@@ -139,6 +150,7 @@ extern Capability MainRegTable;
#else
extern StgTSO *run_queue_hd, *run_queue_tl;
extern StgTSO *blocked_queue_hd, *blocked_queue_tl;
extern StgTSO *sleeping_queue;
#endif
/* Linked list of all threads. */
extern StgTSO *all_threads;
......
/* -----------------------------------------------------------------------------
* $Id: Select.c,v 1.13 2000/08/23 12:51:03 simonmar Exp $
* $Id: Select.c,v 1.14 2000/08/25 13:12:07 simonmar Exp $
*
* (c) The GHC Team 1995-1999
*
......@@ -25,7 +25,44 @@
# include <sys/time.h>
# endif
nat ticks_since_select = 0;
/* last timestamp */
nat timestamp = 0;
/* keep track of the number of ticks since we last called
* gettimeofday(), to avoid having to call it every time we need
* a timestamp.
*/
nat ticks_since_timestamp = 0;
/* There's a clever trick here to avoid problems when the time wraps
* around. Since our maximum delay is smaller than 31 bits of ticks
* (it's actually 31 bits of microseconds), we can safely check
* whether a timer has expired even if our timer will wrap around
* before the target is reached, using the following formula:
*
* (int)((uint)current_time - (uint)target_time) < 0
*
* if this is true, then our time has expired.
* (idea due to Andy Gill).
*/
rtsBool
wakeUpSleepingThreads(nat ticks)
{
StgTSO *tso;
rtsBool flag = rtsFalse;
while (sleeping_queue != END_TSO_QUEUE &&
(int)(ticks - sleeping_queue->block_info.target) > 0) {
tso = sleeping_queue;
sleeping_queue = tso->link;
tso->why_blocked = NotBlocked;
tso->link = END_TSO_QUEUE;
IF_DEBUG(scheduler,belch("Waking up sleeping thread %d\n", tso->id));
PUSH_ON_RUN_QUEUE(tso);
flag = rtsTrue;
}
return flag;
}
/* Argument 'wait' says whether to wait for I/O to become available,
* or whether to just check and return immediately. If there are
......@@ -50,16 +87,21 @@ awaitEvent(rtsBool wait)
rtsBool ready;
fd_set rfd,wfd;
int numFound;
nat min, delta;
int maxfd = -1;
rtsBool select_succeeded = rtsTrue;
struct timeval tv;
#ifndef linux_TARGET_OS
struct timeval tv_before,tv_after;
#endif
lnat min, ticks;
tv.tv_sec = 0;
tv.tv_usec = 0;
IF_DEBUG(scheduler,belch("Checking for threads blocked on I/O...\n"));
IF_DEBUG(scheduler,
belch("scheduler: checking for threads blocked on I/O");
if (wait) {
belch(" (waiting)");
}
belch("\n");
);
/* loop until we've woken up some threads. This loop is needed
* because the select timing isn't accurate, we sometimes sleep
......@@ -68,18 +110,23 @@ awaitEvent(rtsBool wait)
*/
do {
/* see how long it's been since we last checked the blocked queue.
* ToDo: make this check atomic, so we don't lose any ticks.
*/
delta = ticks_since_select;
ticks_since_select = 0;
delta = delta * TICK_MILLISECS * 1000;
ticks = timestamp = getourtimeofday();
ticks_since_timestamp = 0;
if (wakeUpSleepingThreads(ticks)) {
return;
}
min = wait == rtsTrue ? 0x7fffffff : 0;
if (!wait) {
min = 0;
} else if (sleeping_queue != END_TSO_QUEUE) {
min = (sleeping_queue->block_info.target - ticks)
* TICK_MILLISECS * 1000;
} else {
min = 0x7ffffff;
}
/*
* Collect all of the fd's that we're interested in, and capture
* the minimum waiting time (in microseconds) for the delayed threads.
* Collect all of the fd's that we're interested in
*/
FD_ZERO(&rfd);
FD_ZERO(&wfd);
......@@ -104,23 +151,6 @@ awaitEvent(rtsBool wait)
continue;
}
case BlockedOnDelay:
{
int candidate; /* signed int is intentional */
#if defined(HAVE_SETITIMER) || defined(mingw32_TARGET_OS)
candidate = tso->block_info.delay;
#else
candidate = tso->block_info.target - getourtimeofday();
if (candidate < 0) {
candidate = 0;
}
#endif
if ((nat)candidate < min) {
min = candidate;
}
continue;
}
default:
barf("AwaitEvent");
}
......@@ -141,139 +171,92 @@ awaitEvent(rtsBool wait)
RELEASE_LOCK(&sched_mutex);
/* Check for any interesting events */
tv.tv_sec = min / 1000000;
tv.tv_sec = min / 1000000;
tv.tv_usec = min % 1000000;
#ifndef linux_TARGET_OS
gettimeofday(&tv_before, (struct timezone *) NULL);
#endif
while (!interrupted &&
(numFound = select(maxfd+1, &rfd, &wfd, NULL, &tv)) < 0) {
if (errno != EINTR) {
/* fflush(stdout); */
perror("select");
barf("select failed");
}
ACQUIRE_LOCK(&sched_mutex);
while ((numFound = select(maxfd+1, &rfd, &wfd, NULL, &tv)) < 0) {
/* We got a signal; could be one of ours. If so, we need
* to start up the signal handler straight away, otherwise
* we could block for a long time before the signal is
* serviced.
*/
if (signals_pending()) {
RELEASE_LOCK(&sched_mutex);
start_signal_handlers();
/* Don't wake up any other threads that were waiting on I/O */
select_succeeded = rtsFalse;
break;
}
if (interrupted) {
RELEASE_LOCK(&sched_mutex);
select_succeeded = rtsFalse;
break;
}
/* If new runnable threads have arrived, stop waiting for
* I/O and run them.
*/
if (run_queue_hd != END_TSO_QUEUE) {
RELEASE_LOCK(&sched_mutex);
select_succeeded = rtsFalse;
break;
}
if (errno != EINTR) {
/* fflush(stdout); */
perror("select");
barf("select failed");
}
ACQUIRE_LOCK(&sched_mutex);
RELEASE_LOCK(&sched_mutex);
}
#ifdef linux_TARGET_OS
/* on Linux, tv is set to indicate the amount of time not
* slept, so we don't need to gettimeofday() to find out.
*/
delta += min - (tv.tv_sec * 1000000 + tv.tv_usec);
#else
gettimeofday(&tv_after, (struct timezone *) NULL);
delta += (tv_after.tv_sec - tv_before.tv_sec) * 1000000 +
tv_after.tv_usec - tv_before.tv_usec;
#endif
#if 0
if (delta != 0) { fprintf(stderr,"waited: %d %d %d\n", min, delta,
interrupted); }
#endif
/* We got a signal; could be one of ours. If so, we need
* to start up the signal handler straight away, otherwise
* we could block for a long time before the signal is
* serviced.
*/
if (signals_pending()) {
RELEASE_LOCK(&sched_mutex); /* ToDo: kill */
start_signal_handlers();
ACQUIRE_LOCK(&sched_mutex);
return; /* still hold the lock */
}
/* we were interrupted, return to the scheduler immediately.
*/
if (interrupted) {
return; /* still hold the lock */
}
/* check for threads that need waking up
*/
wakeUpSleepingThreads(getourtimeofday());
/* If new runnable threads have arrived, stop waiting for
* I/O and run them.
*/
if (run_queue_hd != END_TSO_QUEUE) {
return; /* still hold the lock */
}
RELEASE_LOCK(&sched_mutex);
}
ACQUIRE_LOCK(&sched_mutex);
/* Step through the waiting queue, unblocking every thread that now has
* a file descriptor in a ready state.
* For the delayed threads, decrement the number of microsecs
* we've been blocked for. Unblock the threads that have thusly expired.
*/
prev = NULL;
for(tso = blocked_queue_hd; tso != END_TSO_QUEUE; tso = next) {
next = tso->link;
switch (tso->why_blocked) {
case BlockedOnRead:
ready = select_succeeded && FD_ISSET(tso->block_info.fd, &rfd);
break;
case BlockedOnWrite:
ready = select_succeeded && FD_ISSET(tso->block_info.fd, &wfd);
break;
case BlockedOnDelay:
{
#if defined(HAVE_SETITIMER) || defined(mingw32_TARGET_OS)
if (tso->block_info.delay > delta) {
tso->block_info.delay -= delta;
ready = 0;
} else {
tso->block_info.delay = 0;
ready = 1;
}
#else
int candidate; /* signed int is intentional */
candidate = tso->block_info.target - getourtimeofday();
if (candidate < 0) {
candidate = 0;
}
if ((nat)candidate > delta) {
ready = 0;
} else {
ready = 1;
}
#endif
break;
}
default:
barf("awaitEvent");
}
if (select_succeeded) {
for(tso = blocked_queue_hd; tso != END_TSO_QUEUE; tso = next) {
next = tso->link;
switch (tso->why_blocked) {
case BlockedOnRead:
ready = FD_ISSET(tso->block_info.fd, &rfd);
break;
case BlockedOnWrite:
ready = FD_ISSET(tso->block_info.fd, &wfd);
break;
default:
barf("awaitEvent");
}
if (ready) {
IF_DEBUG(scheduler,belch("Waking up thread %d\n", tso->id));
tso->why_blocked = NotBlocked;
tso->link = END_TSO_QUEUE;
PUSH_ON_RUN_QUEUE(tso);
} else {
if (prev == NULL)
blocked_queue_hd = tso;
else
prev->link = tso;
prev = tso;
}