Commit 5a2769f0 authored by Simon Marlow's avatar Simon Marlow
Browse files

New tracing interface

A simple interface for generating trace messages with timestamps and
thread IDs attached to them.  Most debugging output goes through this
interface now, so it is straightforward to get timestamped debugging
traces with +RTS -vt.  Also, we plan to use this to generate
parallelism profiles from the trace output.
parent 3f10646c
......@@ -25,6 +25,7 @@
#include "Capability.h"
#include "Schedule.h"
#include "Sparks.h"
#include "Trace.h"
// one global capability, this is the Capability for non-threaded
// builds, and for +RTS -N1
......@@ -196,8 +197,7 @@ initCapabilities( void )
initCapability(&capabilities[i], i);
}
IF_DEBUG(scheduler, sched_belch("allocated %d capabilities",
n_capabilities));
debugTrace(DEBUG_sched, "allocated %d capabilities", n_capabilities);
#else /* !THREADED_RTS */
......@@ -233,10 +233,10 @@ giveCapabilityToTask (Capability *cap USED_IF_DEBUG, Task *task)
{
ASSERT_LOCK_HELD(&cap->lock);
ASSERT(task->cap == cap);
IF_DEBUG(scheduler,
sched_belch("passing capability %d to %s %p",
cap->no, task->tso ? "bound task" : "worker",
(void *)task->id));
trace(TRACE_sched | DEBUG_sched,
"passing capability %d to %s %p",
cap->no, task->tso ? "bound task" : "worker",
(void *)task->id);
ACQUIRE_LOCK(&task->lock);
task->wakeup = rtsTrue;
// the wakeup flag is needed because signalCondition() doesn't
......@@ -291,8 +291,8 @@ releaseCapability_ (Capability* cap)
// are threads that need to be completed. If the system is
// shutting down, we never create a new worker.
if (sched_state < SCHED_SHUTTING_DOWN || !emptyRunQueue(cap)) {
IF_DEBUG(scheduler,
sched_belch("starting new worker on capability %d", cap->no));
debugTrace(DEBUG_sched,
"starting new worker on capability %d", cap->no);
startWorkerTask(cap, workerStart);
return;
}
......@@ -310,7 +310,7 @@ releaseCapability_ (Capability* cap)
}
last_free_capability = cap;
IF_DEBUG(scheduler, sched_belch("freeing capability %d", cap->no));
trace(TRACE_sched | DEBUG_sched, "freeing capability %d", cap->no);
}
void
......@@ -396,8 +396,7 @@ waitForReturnCapability (Capability **pCap, Task *task)
ACQUIRE_LOCK(&cap->lock);
IF_DEBUG(scheduler,
sched_belch("returning; I want capability %d", cap->no));
debugTrace(DEBUG_sched, "returning; I want capability %d", cap->no);
if (!cap->running_task) {
// It's free; just grab it
......@@ -435,8 +434,7 @@ waitForReturnCapability (Capability **pCap, Task *task)
ASSERT_FULL_CAPABILITY_INVARIANTS(cap,task);
IF_DEBUG(scheduler,
sched_belch("returning; got capability %d", cap->no));
trace(TRACE_sched | DEBUG_sched, "resuming capability %d", cap->no);
*pCap = cap;
#endif
......@@ -455,7 +453,7 @@ yieldCapability (Capability** pCap, Task *task)
// The fast path has no locking, if we don't enter this while loop
while ( cap->returning_tasks_hd != NULL || !anyWorkForMe(cap,task) ) {
IF_DEBUG(scheduler, sched_belch("giving up capability %d", cap->no));
debugTrace(DEBUG_sched, "giving up capability %d", cap->no);
// We must now release the capability and wait to be woken up
// again.
......@@ -470,10 +468,12 @@ yieldCapability (Capability** pCap, Task *task)
task->wakeup = rtsFalse;
RELEASE_LOCK(&task->lock);
IF_DEBUG(scheduler, sched_belch("woken up on capability %d", cap->no));
debugTrace(DEBUG_sched, "woken up on capability %d", cap->no);
ACQUIRE_LOCK(&cap->lock);
if (cap->running_task != NULL) {
IF_DEBUG(scheduler, sched_belch("capability %d is owned by another task", cap->no));
debugTrace(DEBUG_sched,
"capability %d is owned by another task", cap->no);
RELEASE_LOCK(&cap->lock);
continue;
}
......@@ -495,7 +495,7 @@ yieldCapability (Capability** pCap, Task *task)
break;
}
IF_DEBUG(scheduler, sched_belch("got capability %d", cap->no));
trace(TRACE_sched | DEBUG_sched, "resuming capability %d", cap->no);
ASSERT(cap->running_task == task);
}
......@@ -527,6 +527,7 @@ wakeupThreadOnCapability (Capability *cap, StgTSO *tso)
// start it up
cap->running_task = myTask(); // precond for releaseCapability_()
trace(TRACE_sched, "resuming capability %d", cap->no);
releaseCapability_(cap);
} else {
appendToWakeupQueue(cap,tso);
......@@ -557,6 +558,7 @@ prodCapabilities(rtsBool all)
ACQUIRE_LOCK(&cap->lock);
if (!cap->running_task) {
if (cap->spare_workers) {
trace(TRACE_sched, "resuming capability %d", cap->no);
task = cap->spare_workers;
ASSERT(!task->stopped);
giveCapabilityToTask(cap,task);
......@@ -616,23 +618,25 @@ shutdownCapability (Capability *cap, Task *task)
task->cap = cap;
for (i = 0; i < 50; i++) {
IF_DEBUG(scheduler, sched_belch("shutting down capability %d, attempt %d", cap->no, i));
debugTrace(DEBUG_sched,
"shutting down capability %d, attempt %d", cap->no, i);
ACQUIRE_LOCK(&cap->lock);
if (cap->running_task) {
RELEASE_LOCK(&cap->lock);
IF_DEBUG(scheduler, sched_belch("not owner, yielding"));
debugTrace(DEBUG_sched, "not owner, yielding");
yieldThread();
continue;
}
cap->running_task = task;
if (!emptyRunQueue(cap) || cap->spare_workers) {
IF_DEBUG(scheduler, sched_belch("runnable threads or workers still alive, yielding"));
debugTrace(DEBUG_sched,
"runnable threads or workers still alive, yielding");
releaseCapability_(cap); // this will wake up a worker
RELEASE_LOCK(&cap->lock);
yieldThread();
continue;
}
IF_DEBUG(scheduler, sched_belch("capability %d is stopped.", cap->no));
debugTrace(DEBUG_sched, "capability %d is stopped.", cap->no);
RELEASE_LOCK(&cap->lock);
break;
}
......
......@@ -42,7 +42,7 @@
#if defined(RTS_GTK_FRONTPANEL)
#include "FrontPanel.h"
#endif
#include "Trace.h"
#include "RetainerProfile.h"
#include <string.h>
......@@ -355,10 +355,7 @@ GarbageCollect ( void (*get_roots)(evac_fn), rtsBool force_major_gc )
CostCentreStack *prev_CCS;
#endif
#if defined(DEBUG) && defined(GRAN)
IF_DEBUG(gc, debugBelch("@@ Starting garbage collection at %ld (%lx)\n",
Now, Now));
#endif
debugTrace(DEBUG_gc, "starting GC");
#if defined(RTS_USER_SIGNALS)
// block signals
......@@ -516,8 +513,8 @@ GarbageCollect ( void (*get_roots)(evac_fn), rtsBool force_major_gc )
stp->bitmap = bitmap_bdescr;
bitmap = bitmap_bdescr->start;
IF_DEBUG(gc, debugBelch("bitmap_size: %d, bitmap: %p",
bitmap_size, bitmap););
debugTrace(DEBUG_gc, "bitmap_size: %d, bitmap: %p",
bitmap_size, bitmap);
// don't forget to fill it with zeros!
memset(bitmap, 0, bitmap_size);
......@@ -828,7 +825,10 @@ GarbageCollect ( void (*get_roots)(evac_fn), rtsBool force_major_gc )
}
copied += mut_list_size;
IF_DEBUG(gc, debugBelch("mut_list_size: %ld (%d vars, %d arrays, %d others)\n", mut_list_size * sizeof(W_), mutlist_MUTVARS, mutlist_MUTARRS, mutlist_OTHERS));
debugTrace(DEBUG_gc,
"mut_list_size: %ld (%d vars, %d arrays, %d others)",
mut_list_size * sizeof(W_),
mutlist_MUTVARS, mutlist_MUTARRS, mutlist_OTHERS);
}
for (s = 0; s < generations[g].n_steps; s++) {
......@@ -1077,7 +1077,10 @@ GarbageCollect ( void (*get_roots)(evac_fn), rtsBool force_major_gc )
int pc_free;
adjusted_blocks = (RtsFlags.GcFlags.maxHeapSize - 2 * blocks);
IF_DEBUG(gc, debugBelch("@@ Near maximum heap size of 0x%x blocks, blocks = %d, adjusted to %ld", RtsFlags.GcFlags.maxHeapSize, blocks, adjusted_blocks));
debugTrace(DEBUG_gc, "near maximum heap size of 0x%x blocks, blocks = %d, adjusted to %ld",
RtsFlags.GcFlags.maxHeapSize, blocks, adjusted_blocks);
pc_free = adjusted_blocks * 100 / RtsFlags.GcFlags.maxHeapSize;
if (pc_free < RtsFlags.GcFlags.pcFreeHeap) /* might even be < 0 */ {
heapOverflow();
......@@ -1309,8 +1312,10 @@ traverse_weak_ptr_list(void)
w->link = weak_ptr_list;
weak_ptr_list = w;
flag = rtsTrue;
IF_DEBUG(weak, debugBelch("Weak pointer still alive at %p -> %p",
w, w->key));
debugTrace(DEBUG_weak,
"weak pointer still alive at %p -> %p",
w, w->key);
continue;
}
else {
......@@ -2196,18 +2201,16 @@ loop:
to = copy(q,BLACKHOLE_sizeW(),stp);
//ToDo: derive size etc from reverted IP
//to = copy(q,size,stp);
IF_DEBUG(gc,
debugBelch("@@ evacuate: RBH %p (%s) to %p (%s)",
q, info_type(q), to, info_type(to)));
debugTrace(DEBUG_gc, "evacuate: RBH %p (%s) to %p (%s)",
q, info_type(q), to, info_type(to));
return to;
}
case BLOCKED_FETCH:
ASSERT(sizeofW(StgBlockedFetch) >= MIN_PAYLOD_SIZE);
to = copy(q,sizeofW(StgBlockedFetch),stp);
IF_DEBUG(gc,
debugBelch("@@ evacuate: %p (%s) to %p (%s)",
q, info_type(q), to, info_type(to)));
debugTrace(DEBUG_gc, "evacuate: %p (%s) to %p (%s)",
q, info_type(q), to, info_type(to));
return to;
# ifdef DIST
......@@ -2216,17 +2219,15 @@ loop:
case FETCH_ME:
ASSERT(sizeofW(StgBlockedFetch) >= MIN_PAYLOAD_SIZE);
to = copy(q,sizeofW(StgFetchMe),stp);
IF_DEBUG(gc,
debugBelch("@@ evacuate: %p (%s) to %p (%s)",
q, info_type(q), to, info_type(to)));
debugTrace(DEBUG_gc, "evacuate: %p (%s) to %p (%s)",
q, info_type(q), to, info_type(to)));
return to;
case FETCH_ME_BQ:
ASSERT(sizeofW(StgBlockedFetch) >= MIN_PAYLOAD_SIZE);
to = copy(q,sizeofW(StgFetchMeBlockingQueue),stp);
IF_DEBUG(gc,
debugBelch("@@ evacuate: %p (%s) to %p (%s)",
q, info_type(q), to, info_type(to)));
debugTrace(DEBUG_gc, "evacuate: %p (%s) to %p (%s)",
q, info_type(q), to, info_type(to)));
return to;
#endif
......@@ -3072,9 +3073,8 @@ scavenge(step *stp)
(StgClosure *)rbh->blocking_queue =
evacuate((StgClosure *)rbh->blocking_queue);
failed_to_evac = rtsTrue; // mutable anyhow.
IF_DEBUG(gc,
debugBelch("@@ scavenge: RBH %p (%s) (new blocking_queue link=%p)",
p, info_type(p), (StgClosure *)rbh->blocking_queue));
debugTrace(DEBUG_gc, "scavenge: RBH %p (%s) (new blocking_queue link=%p)",
p, info_type(p), (StgClosure *)rbh->blocking_queue);
// ToDo: use size of reverted closure here!
p += BLACKHOLE_sizeW();
break;
......@@ -3089,10 +3089,9 @@ scavenge(step *stp)
// follow the link to the rest of the blocking queue
(StgClosure *)bf->link =
evacuate((StgClosure *)bf->link);
IF_DEBUG(gc,
debugBelch("@@ scavenge: %p (%s); node is now %p; exciting, isn't it",
bf, info_type((StgClosure *)bf),
bf->node, info_type(bf->node)));
debugTrace(DEBUG_gc, "scavenge: %p (%s); node is now %p; exciting, isn't it",
bf, info_type((StgClosure *)bf),
bf->node, info_type(bf->node)));
p += sizeofW(StgBlockedFetch);
break;
}
......@@ -3109,9 +3108,8 @@ scavenge(step *stp)
StgFetchMeBlockingQueue *fmbq = (StgFetchMeBlockingQueue *)p;
(StgClosure *)fmbq->blocking_queue =
evacuate((StgClosure *)fmbq->blocking_queue);
IF_DEBUG(gc,
debugBelch("@@ scavenge: %p (%s) exciting, isn't it",
p, info_type((StgClosure *)p)));
debugTrace(DEBUG_gc, "scavenge: %p (%s) exciting, isn't it",
p, info_type((StgClosure *)p)));
p += sizeofW(StgFetchMeBlockingQueue);
break;
}
......@@ -3464,9 +3462,8 @@ linear_scan:
bh->blocking_queue =
(StgTSO *)evacuate((StgClosure *)bh->blocking_queue);
failed_to_evac = rtsTrue; // mutable anyhow.
IF_DEBUG(gc,
debugBelch("@@ scavenge: RBH %p (%s) (new blocking_queue link=%p)",
p, info_type(p), (StgClosure *)rbh->blocking_queue));
debugTrace(DEBUG_gc, "scavenge: RBH %p (%s) (new blocking_queue link=%p)",
p, info_type(p), (StgClosure *)rbh->blocking_queue));
break;
}
......@@ -3479,10 +3476,9 @@ linear_scan:
// follow the link to the rest of the blocking queue
(StgClosure *)bf->link =
evacuate((StgClosure *)bf->link);
IF_DEBUG(gc,
debugBelch("@@ scavenge: %p (%s); node is now %p; exciting, isn't it",
bf, info_type((StgClosure *)bf),
bf->node, info_type(bf->node)));
debugTrace(DEBUG_gc, "scavenge: %p (%s); node is now %p; exciting, isn't it",
bf, info_type((StgClosure *)bf),
bf->node, info_type(bf->node)));
break;
}
......@@ -3497,9 +3493,8 @@ linear_scan:
StgFetchMeBlockingQueue *fmbq = (StgFetchMeBlockingQueue *)p;
(StgClosure *)fmbq->blocking_queue =
evacuate((StgClosure *)fmbq->blocking_queue);
IF_DEBUG(gc,
debugBelch("@@ scavenge: %p (%s) exciting, isn't it",
p, info_type((StgClosure *)p)));
debugTrace(DEBUG_gc, "scavenge: %p (%s) exciting, isn't it",
p, info_type((StgClosure *)p)));
break;
}
#endif /* PAR */
......@@ -3574,7 +3569,7 @@ linear_scan:
// start a new linear scan if the mark stack overflowed at some point
if (mark_stack_overflowed && oldgen_scan_bd == NULL) {
IF_DEBUG(gc, debugBelch("scavenge_mark_stack: starting linear scan"));
debugTrace(DEBUG_gc, "scavenge_mark_stack: starting linear scan");
mark_stack_overflowed = rtsFalse;
oldgen_scan_bd = oldest_gen->steps[0].old_blocks;
oldgen_scan = oldgen_scan_bd->start;
......@@ -3816,9 +3811,8 @@ scavenge_one(StgPtr p)
(StgClosure *)rbh->blocking_queue =
evacuate((StgClosure *)rbh->blocking_queue);
failed_to_evac = rtsTrue; // mutable anyhow.
IF_DEBUG(gc,
debugBelch("@@ scavenge: RBH %p (%s) (new blocking_queue link=%p)",
p, info_type(p), (StgClosure *)rbh->blocking_queue));
debugTrace(DEBUG_gc, "scavenge: RBH %p (%s) (new blocking_queue link=%p)",
p, info_type(p), (StgClosure *)rbh->blocking_queue));
// ToDo: use size of reverted closure here!
break;
}
......@@ -3832,10 +3826,10 @@ scavenge_one(StgPtr p)
// follow the link to the rest of the blocking queue
(StgClosure *)bf->link =
evacuate((StgClosure *)bf->link);
IF_DEBUG(gc,
debugBelch("@@ scavenge: %p (%s); node is now %p; exciting, isn't it",
bf, info_type((StgClosure *)bf),
bf->node, info_type(bf->node)));
debugTrace(DEBUG_gc,
"scavenge: %p (%s); node is now %p; exciting, isn't it",
bf, info_type((StgClosure *)bf),
bf->node, info_type(bf->node)));
break;
}
......@@ -3850,9 +3844,8 @@ scavenge_one(StgPtr p)
StgFetchMeBlockingQueue *fmbq = (StgFetchMeBlockingQueue *)p;
(StgClosure *)fmbq->blocking_queue =
evacuate((StgClosure *)fmbq->blocking_queue);
IF_DEBUG(gc,
debugBelch("@@ scavenge: %p (%s) exciting, isn't it",
p, info_type((StgClosure *)p)));
debugTrace(DEBUG_gc, "scavenge: %p (%s) exciting, isn't it",
p, info_type((StgClosure *)p)));
break;
}
#endif
......@@ -4180,8 +4173,6 @@ scavenge_stack(StgPtr p, StgPtr stack_end)
StgWord bitmap;
nat size;
//IF_DEBUG(sanity, debugBelch(" scavenging stack between %p and %p", p, stack_end));
/*
* Each time around this loop, we are looking at a chunk of stack
* that starts with an activation record.
......@@ -4441,11 +4432,11 @@ gcCAFs(void)
ASSERT(info->type == IND_STATIC);
if (STATIC_LINK(info,p) == NULL) {
IF_DEBUG(gccafs, debugBelch("CAF gc'd at 0x%04lx", (long)p));
// black hole it
SET_INFO(p,&stg_BLACKHOLE_info);
p = STATIC_LINK2(info,p);
*pp = p;
debugTrace(DEBUG_gccafs, "CAF gc'd at 0x%04lx", (long)p);
// black hole it
SET_INFO(p,&stg_BLACKHOLE_info);
p = STATIC_LINK2(info,p);
*pp = p;
}
else {
pp = &STATIC_LINK2(info,p);
......@@ -4455,7 +4446,7 @@ gcCAFs(void)
}
// debugBelch("%d CAFs live", i);
debugTrace(DEBUG_gccafs, "%d CAFs live", i);
}
#endif
......@@ -4650,7 +4641,9 @@ threadPaused(Capability *cap, StgTSO *tso)
bh = ((StgUpdateFrame *)frame)->updatee;
if (closure_IND(bh) || bh->header.info == &stg_BLACKHOLE_info) {
IF_DEBUG(squeeze, debugBelch("suspending duplicate work: %ld words of stack\n", (StgPtr)frame - tso->sp));
debugTrace(DEBUG_squeeze,
"suspending duplicate work: %ld words of stack",
(StgPtr)frame - tso->sp);
// If this closure is already an indirection, then
// suspend the computation up to this point:
......@@ -4710,10 +4703,10 @@ threadPaused(Capability *cap, StgTSO *tso)
}
end:
IF_DEBUG(squeeze,
debugBelch("words_to_squeeze: %d, weight: %d, squeeze: %s\n",
words_to_squeeze, weight,
weight < words_to_squeeze ? "YES" : "NO"));
debugTrace(DEBUG_squeeze,
"words_to_squeeze: %d, weight: %d, squeeze: %s",
words_to_squeeze, weight,
weight < words_to_squeeze ? "YES" : "NO");
// Should we squeeze or not? Arbitrary heuristic: we squeeze if
// the number of words we have to shift down is less than the
......@@ -4735,7 +4728,7 @@ printMutableList(generation *gen)
bdescr *bd;
StgPtr p;
debugBelch("@@ Mutable list %p: ", gen->mut_list);
debugBelch("mutable list %p: ", gen->mut_list);
for (bd = gen->mut_list; bd != NULL; bd = bd->link) {
for (p = bd->start; p < bd->free; p++) {
......
......@@ -17,6 +17,7 @@
#include "GCCompact.h"
#include "Schedule.h"
#include "Apply.h"
#include "Trace.h"
// Turn off inlining when debugging - it obfuscates things
#ifdef DEBUG
......@@ -931,12 +932,14 @@ compact( void (*get_roots)(evac_fn) )
for (s = 0; s < generations[g].n_steps; s++) {
if (g==0 && s ==0) continue;
stp = &generations[g].steps[s];
IF_DEBUG(gc, debugBelch("update_fwd: %d.%d\n", stp->gen->no, stp->no););
debugTrace(DEBUG_gc, "update_fwd: %d.%d",
stp->gen->no, stp->no);
update_fwd(stp->blocks);
update_fwd_large(stp->scavenged_large_objects);
if (g == RtsFlags.GcFlags.generations-1 && stp->old_blocks != NULL) {
IF_DEBUG(gc, debugBelch("update_fwd: %d.%d (compact)\n", stp->gen->no, stp->no););
debugTrace(DEBUG_gc, "update_fwd: %d.%d (compact)",
stp->gen->no, stp->no);
update_fwd_compact(stp->old_blocks);
}
}
......@@ -946,9 +949,10 @@ compact( void (*get_roots)(evac_fn) )
stp = &oldest_gen->steps[0];
if (stp->old_blocks != NULL) {
blocks = update_bkwd_compact(stp);
IF_DEBUG(gc, debugBelch("update_bkwd: %d.%d (compact, old: %d blocks, now %d blocks)\n",
stp->gen->no, stp->no,
stp->n_old_blocks, blocks););
debugTrace(DEBUG_gc,
"update_bkwd: %d.%d (compact, old: %d blocks, now %d blocks)",
stp->gen->no, stp->no,
stp->n_old_blocks, blocks);
stp->n_old_blocks = blocks;
}
}
......@@ -16,6 +16,7 @@
#include "RtsFlags.h"
#include "MBlock.h"
#include "BlockAlloc.h"
#include "Trace.h"
#ifdef HAVE_STDLIB_H
#include <stdlib.h>
......@@ -287,7 +288,7 @@ getMBlocks(nat n)
// ToDo: check that we haven't already grabbed the memory at next_request
next_request = ret + size;
IF_DEBUG(gc,debugBelch("Allocated %d megablock(s) at %p\n",n,ret));
debugTrace(DEBUG_gc, "allocated %d megablock(s) at %p",n,ret);
// fill in the table
for (i = 0; i < n; i++) {
......@@ -402,7 +403,7 @@ getMBlocks(nat n)
barf("getMBlocks: unknown memory allocation failure on Win32.");
}
IF_DEBUG(gc,debugBelch("Allocated %d megablock(s) at 0x%x\n",n,(nat)ret));
debugTrace(DEBUG_gc, "allocated %d megablock(s) at 0x%x",n,(nat)ret);
next_request = (char*)next_request + size;
mblocks_allocated += n;
......
......@@ -353,11 +353,12 @@ CostCentreStack *
PushCostCentre ( CostCentreStack *ccs, CostCentre *cc )
#define PushCostCentre _PushCostCentre
{
IF_DEBUG(prof,
debugBelch("Pushing %s on ", cc->label);
debugCCS(ccs);
debugBelch("\n"));
return PushCostCentre(ccs,cc);
IF_DEBUG(prof,
traceBegin("pushing %s on ", cc->label);
debugCCS(ccs);
traceEnd(););
return PushCostCentre(ccs,cc);
}
#endif
......
......@@ -27,6 +27,7 @@
#include "Linker.h"
#include "ThreadLabels.h"
#include "BlockAlloc.h"
#include "Trace.h"
#if defined(RTS_GTK_FRONTPANEL)
#include "FrontPanel.h"
......@@ -161,6 +162,9 @@ hs_init(int *argc, char **argv[])
setProgArgv(*argc,*argv);
}
/* initTracing must be after setupRtsFlags() */
initTracing();
#if defined(PAR)
/* NB: this really must be done after processing the RTS flags */
IF_PAR_DEBUG(verbose,
......
......@@ -90,6 +90,7 @@
#include "SMP.h"
#include "STM.h"
#include "Storage.h"
#include "Trace.h"
#include <stdlib.h>
#include <stdio.h>
......@@ -113,16 +114,7 @@
// If SHAKE is defined then validation will sometime spuriously fail. They helps test
// unusualy code paths if genuine contention is rare
#if defined(DEBUG)
#define SHAKE
#if defined(THREADED_RTS)
#define TRACE(_x...) IF_DEBUG(stm, debugBelch("STM (task %p): ", (void *)(unsigned long)(unsigned int)osThreadId()); debugBelch ( _x ))
#else
#define TRACE(_x...) IF_DEBUG(stm, debugBelch ( _x ))
#endif
#else
#define TRACE(_x...) /*Nothing*/
#endif
#define TRACE(_x...) debugTrace(DEBUG_stm, "STM: " _x)
#ifdef SHAKE
static const int do_shake = TRUE;
......
This diff is collapsed.
......@@ -314,11 +314,6 @@ emptyThreadQueues(Capability *cap)
;
}
#ifdef DEBUG
void sched_belch(char *s, ...)
GNU_ATTRIBUTE(format (printf, 1, 2));
#endif
#endif /* !IN_STG_CODE */
STATIC_INLINE void
......
......@@ -21,6 +21,7 @@
# include "GranSimRts.h"
# endif
#include "Sparks.h"
#include "Trace.h"
#if defined(THREADED_RTS) || defined(PARALLEL_HASKELL)
......@@ -149,19 +150,18 @@ markSparkQueue (evac_fn evac)
PAR_TICKY_MARK_SPARK_QUEUE_END(n);
#if defined(PARALLEL_HASKELL)
IF_DEBUG(scheduler,
debugBelch("markSparkQueue: marked %d sparks and pruned %d sparks on [%x]",
n, pruned_sparks, mytid));
debugTrace(DEBUG_sched,
"marked %d sparks and pruned %d sparks on [%x]",
n, pruned_sparks, mytid);
#else
IF_DEBUG(scheduler,
debugBelch("markSparkQueue: marked %d sparks and pruned %d sparks\n",
n, pruned_sparks));
debugTrace(DEBUG_sched,
"marked %d sparks and pruned %d sparks",
n, pruned_sparks);
#endif
IF_DEBUG(scheduler,
debugBelch("markSparkQueue: new spark queue len=%d; (hd=%p; tl=%p)\n",
sparkPoolSize(pool), pool->hd, pool->tl));
debugTrace(DEBUG_sched,
"new spark queue len=%d; (hd=%p; tl=%p)\n",
sparkPoolSize(pool), pool->hd, pool->tl);
}
}
......@@ -825,8 +825,9 @@ markSparkQueue(void)
// ToDo?: statistics gathering here (also for GUM!)
sp->node = (StgClosure *)MarkRoot(sp->node);
}