Commit a5288c55 authored by Simon Marlow's avatar Simon Marlow

Unify event logging and debug tracing.

  - tracing facilities are now enabled with -DTRACING, and -DDEBUG
    additionally enables debug-tracing.  -DEVENTLOG has been
    removed.

  - -debug now implies -eventlog

  - events can be printed to stderr instead of being sent to the
    binary .eventlog file by adding +RTS -v (which is implied by the
    +RTS -Dx options).

  - -Dx debug messages can be sent to the binary .eventlog file
    by adding +RTS -l.  This should help debugging by reducing
    the impact of debug tracing on execution time.

  - Various debug messages that duplicated the information in events
    have been removed.
parent c51229b2
......@@ -406,8 +406,8 @@ way_details =
, "-optc-DPROFILING" ],
Way WayEventLog "l" True "RTS Event Logging"
[ "-DEVENTLOG"
, "-optc-DEVENTLOG" ],
[ "-DTRACING"
, "-optc-DTRACING" ],
Way WayTicky "t" True "Ticky-ticky Profiling"
[ "-DTICKY_TICKY"
......
......@@ -27,6 +27,12 @@
#define USING_LIBBFD 1
#endif
/* DEBUG implies TRACING
*/
#if defined(DEBUG)
#define TRACING
#endif
/* -----------------------------------------------------------------------------
Signals - supported on non-PAR versions of the runtime. See RtsSignals.h.
-------------------------------------------------------------------------- */
......
......@@ -112,11 +112,17 @@
#define EVENT_GC_END 10 /* (cap) */
#define EVENT_REQUEST_SEQ_GC 11 /* (cap) */
#define EVENT_REQUEST_PAR_GC 12 /* (cap) */
#define EVENT_CREATE_SPARK 13 /* (cap, thread) */
#define EVENT_SPARK_TO_THREAD 14 /* DEPRECATED! (cap, thread, spark_thread) */
#define EVENT_CREATE_SPARK_THREAD 15 /* (cap, thread, spark_thread) */
#define EVENT_LOG_CAP_MSG 16 /* (cap, message ...) */
#define EVENT_LOG_MSG 17 /* (message ...) */
#define EVENT_STARTUP 18 /* (num_capabilities) */
#define NUM_EVENT_TAGS 16
#define NUM_EVENT_TAGS 19
#if 0 /* DEPRECATED EVENTS: */
#define EVENT_CREATE_SPARK 13 /* (cap, thread) */
#define EVENT_SPARK_TO_THREAD 14 /* (cap, thread, spark_thread) */
#endif
/*
* Status values for EVENT_STOP_THREAD
......@@ -137,6 +143,7 @@ typedef StgWord16 EventTypeNum;
typedef StgWord64 EventTimestamp; // in nanoseconds
typedef StgWord64 EventThreadID;
typedef StgWord16 EventCapNo;
typedef StgWord16 EventPayloadSize; // variable-size events
#endif
......
......@@ -65,13 +65,12 @@ struct DEBUG_FLAGS {
rtsBool sanity; /* 'S' warning: might be expensive! */
rtsBool stable; /* 't' */
rtsBool prof; /* 'p' */
rtsBool eventlog; /* 'e' */
rtsBool linker; /* 'l' the object linker */
rtsBool apply; /* 'a' */
rtsBool stm; /* 'm' */
rtsBool squeeze; /* 'z' stack squeezing & lazy blackholing */
rtsBool hpc; /* 'c' coverage */
rtsBool timestamp; /* add timestamps to traces */
rtsBool sparks; /* 'r' */
};
struct COST_CENTRE_FLAGS {
......@@ -118,11 +117,12 @@ struct PROFILING_FLAGS {
};
#ifdef EVENTLOG
struct EVENTLOG_FLAGS {
rtsBool doEventLogging;
struct TRACE_FLAGS {
rtsBool trace_stderr;
rtsBool timestamp; /* show timestamp in stderr output */
rtsBool scheduler; /* trace scheduler events */
};
#endif
struct CONCURRENT_FLAGS {
int ctxtSwitchTime; /* in milliseconds */
......@@ -184,9 +184,7 @@ typedef struct _RTS_FLAGS {
struct DEBUG_FLAGS DebugFlags;
struct COST_CENTRE_FLAGS CcFlags;
struct PROFILING_FLAGS ProfFlags;
#ifdef EVENTLOG
struct EVENTLOG_FLAGS EventLogFlags;
#endif
struct TRACE_FLAGS TraceFlags;
struct TICKY_FLAGS TickyFlags;
#if defined(THREADED_RTS)
......
......@@ -241,7 +241,6 @@ endif
# debug_t : debugging ticky-ticky profiling
# l : event logging
# thr_l : threaded and event logging
# thr_debug_l : threaded and debugging and event logging
#
GhcRTSWays=l
......@@ -775,8 +774,7 @@ endif
#
# The ways currently defined.
#
ALL_WAYS=v p t l s mp mg debug dyn thr thr_l debug_dyn thr_dyn thr_debug_dyn thr_p thr_debug thr_debug_l debug_p thr_debug_p
USER_WAYS=a b c d e f g h j k l m n o A B
ALL_WAYS=v p t l s mp mg debug dyn thr thr_l debug_dyn thr_dyn thr_debug_dyn thr_p thr_debug debug_p thr_debug_p
#
# The following ways currently have treated specially, p t mg,
......@@ -846,10 +844,6 @@ WAY_thr_debug_HC_OPTS=-optc-DTHREADED_RTS -optc-DDEBUG
WAY_thr_debug_p_NAME=threaded debug profiling
WAY_thr_debug_p_HC_OPTS=-optc-DTHREADED_RTS -optc-DDEBUG -prof
# Way 'thr_debug_l':
WAY_thr_debug_l_NAME=threaded debug event logging
WAY_thr_debug_l_HC_OPTS=-optc-DTHREADED_RTS -optc-DDEBUG -eventlog
# Way 'dyn': build dynamic shared libraries
WAY_dyn_NAME=dyn
WAY_dyn_HC_OPTS=-fPIC -dynamic
......@@ -866,57 +860,6 @@ WAY_thr_debug_dyn_HC_OPTS=-fPIC -dynamic -optc-DTHREADED_RTS -optc-DDEBUG
WAY_debug_dyn_NAME=thr_dyn
WAY_debug_dyn_HC_OPTS=-fPIC -dynamic -optc-DDEBUG
#
# Add user-way configurations here:
#
WAY_A_NAME=
WAY_A_HC_OPTS=
WAY_B_NAME=
WAY_B_HC_OPTS=
WAY_a_NAME=
WAY_a_HC_OPTS=
WAY_b_NAME=
WAY_b_HC_OPTS=
WAY_c_NAME=
WAY_c_HC_OPTS=
WAY_d_NAME=
WAY_d_HC_OPTS=
WAY_e_NAME=
WAY_e_HC_OPTS=
WAY_f_NAME=
WAY_f_HC_OPTS=
WAY_g_NAME=
WAY_g_HC_OPTS=
WAY_h_NAME=
WAY_h_HC_OPTS=
WAY_j_NAME=
WAY_j_HC_OPTS=
WAY_k_NAME=
WAY_k_HC_OPTS=
WAY_l_NAME=
WAY_l_HC_OPTS=
WAY_m_NAME=
WAY_m_HC_OPTS=
WAY_n_NAME=
WAY_n_HC_OPTS=
WAY_o_NAME=
WAY_o_HC_OPTS=
################################################################################
#
# 31-bit-Int Core files
......
......@@ -95,7 +95,7 @@ findSpark (Capability *cap)
cap->sparks_converted++;
// Post event for running a spark from capability's own pool.
postEvent(cap, EVENT_RUN_SPARK, cap->r.rCurrentTSO->id, 0);
traceSchedEvent(cap, EVENT_RUN_SPARK, cap->r.rCurrentTSO, 0);
return spark;
}
......@@ -127,14 +127,10 @@ findSpark (Capability *cap)
}
if (spark != NULL) {
debugTrace(DEBUG_sched,
"cap %d: Stole a spark from capability %d",
cap->no, robbed->no);
cap->sparks_converted++;
postEvent(cap, EVENT_STEAL_SPARK,
cap->r.rCurrentTSO->id, robbed->no);
traceSchedEvent(cap, EVENT_STEAL_SPARK,
cap->r.rCurrentTSO, robbed->no);
return spark;
}
......@@ -580,10 +576,9 @@ yieldCapability (Capability** pCap, Task *task)
Capability *cap = *pCap;
if (waiting_for_gc == PENDING_GC_PAR) {
debugTrace(DEBUG_sched, "capability %d: becoming a GC thread", cap->no);
postEvent(cap, EVENT_GC_START, 0, 0);
traceSchedEvent(cap, EVENT_GC_START, 0, 0);
gcWorkerThread(cap);
postEvent(cap, EVENT_GC_END, 0, 0);
traceSchedEvent(cap, EVENT_GC_END, 0, 0);
return;
}
......@@ -790,8 +785,7 @@ shutdownCapability (Capability *cap, Task *task, rtsBool safe)
continue;
}
postEvent(cap, EVENT_SHUTDOWN, 0, 0);
debugTrace(DEBUG_sched, "capability %d is stopped.", cap->no);
traceSchedEvent(cap, EVENT_SHUTDOWN, 0, 0);
RELEASE_LOCK(&cap->lock);
break;
}
......@@ -880,8 +874,6 @@ markSomeCapabilities (evac_fn evac, void *user, nat i0, nat delta,
#endif
for (task = cap->suspended_ccalling_tasks; task != NULL;
task=task->next) {
debugTrace(DEBUG_sched,
"evac'ing suspended TSO %lu", (unsigned long)task->suspended_tso->id);
evac(user, (StgClosure **)(void *)&task->suspended_tso);
}
......
......@@ -1051,6 +1051,15 @@ void prettyPrintClosure_ (StgClosure *obj)
}
}
char *what_next_strs[] = {
[0] = "(unknown)",
[ThreadRunGHC] = "ThreadRunGHC",
[ThreadInterpret] = "ThreadInterpret",
[ThreadKilled] = "ThreadKilled",
[ThreadRelocated] = "ThreadRelocated",
[ThreadComplete] = "ThreadComplete"
};
#else /* DEBUG */
void printPtr( StgPtr p )
{
......
......@@ -30,6 +30,8 @@ extern void printTSO ( StgTSO *tso );
extern void DEBUG_LoadSymbols( char *name );
extern const char *lookupGHCName( void *addr );
extern char *what_next_strs[];
#endif
#pragma GCC visibility pop
......
......@@ -18,7 +18,6 @@
#include "STM.h"
#include "Sanity.h"
#include "Profiling.h"
#include "eventlog/EventLog.h"
#if defined(mingw32_HOST_OS)
#include "win32/IOManager.h"
#endif
......@@ -162,11 +161,7 @@ throwTo (Capability *cap, // the Capability we hold
(unsigned long)source->id, (unsigned long)target->id);
#ifdef DEBUG
if (traceClass(DEBUG_sched)) {
debugTraceBegin("throwTo: target");
printThreadStatus(target);
debugTraceEnd();
}
traceThreadStatus(DEBUG_sched, target);
#endif
goto check_target;
......
......@@ -106,12 +106,11 @@ void initRtsFlagsDefaults(void)
RtsFlags.DebugFlags.stable = rtsFalse;
RtsFlags.DebugFlags.stm = rtsFalse;
RtsFlags.DebugFlags.prof = rtsFalse;
RtsFlags.DebugFlags.eventlog = rtsFalse;
RtsFlags.DebugFlags.apply = rtsFalse;
RtsFlags.DebugFlags.linker = rtsFalse;
RtsFlags.DebugFlags.squeeze = rtsFalse;
RtsFlags.DebugFlags.hpc = rtsFalse;
RtsFlags.DebugFlags.timestamp = rtsFalse;
RtsFlags.DebugFlags.sparks = rtsFalse;
#endif
#if defined(PROFILING)
......@@ -135,8 +134,10 @@ void initRtsFlagsDefaults(void)
RtsFlags.ProfFlags.bioSelector = NULL;
#endif
#ifdef EVENTLOG
RtsFlags.EventLogFlags.doEventLogging = rtsFalse;
#ifdef TRACING
RtsFlags.TraceFlags.trace_stderr = rtsFalse;
RtsFlags.TraceFlags.timestamp = rtsFalse;
RtsFlags.TraceFlags.scheduler = rtsFalse;
#endif
RtsFlags.MiscFlags.tickInterval = 20; /* In milliseconds */
......@@ -196,9 +197,11 @@ usage_text[] = {
" -m<n> Minimum % of heap which must be available (default 3%)",
" -G<n> Number of generations (default: 2)",
" -T<n> Number of steps in younger generations (default: 2)",
" -c<n> Auto-enable compaction of the oldest generation when live data is",
" at least <n>% of the maximum heap size set with -M (default: 30%)",
" -c Enable compaction for all major collections",
" -c<n> Use in-place compaction instead of copying in the oldest generation",
" when live data is at least <n>% of the maximum heap size set with",
" -M (default: 30%)",
" -c Use in-place compaction for all oldest generation collections",
" (the default is to use copying)",
" -w Use mark-region for the oldest generation (experimental)",
#if defined(THREADED_RTS)
" -I<sec> Perform full GC after <sec> idle time (default: 0.3, 0 == off)",
......@@ -252,9 +255,13 @@ usage_text[] = {
# endif
#endif /* PROFILING or PAR */
#ifdef EVENTLOG
#ifdef TRACING
"",
" -l Log runtime events (generates binary trace file <program>.eventlog)",
" -v Log events to stderr",
" -l Log events in binary format to the file <program>.eventlog",
" -vt Include time stamps when tracing events to stderr with -v",
"",
" -ls Log scheduler events",
"",
#endif
......@@ -275,8 +282,6 @@ usage_text[] = {
" This sets the resolution for -C and the profile timer -i.",
" Default: 0.02 sec.",
"",
" -vt Time-stamp debug messages",
"",
#if defined(DEBUG)
" -Ds DEBUG: scheduler",
" -Di DEBUG: interpreter",
......@@ -293,6 +298,10 @@ usage_text[] = {
" -Dm DEBUG: stm",
" -Dz DEBUG: stack squezing",
" -Dc DEBUG: program coverage",
" -Dr DEBUG: sparks",
"",
" NOTE: all -D options also enable -v automatically. Use -l to create a",
" binary event log file instead.",
"",
#endif /* DEBUG */
#if defined(THREADED_RTS) && !defined(NOSMP)
......@@ -472,10 +481,10 @@ errorBelch("not built for: -prof"); \
error = rtsTrue;
#endif
#ifdef EVENTLOG
# define EVENTLOG_BUILD_ONLY(x) x
#ifdef TRACING
# define TRACING_BUILD_ONLY(x) x
#else
# define EVENTLOG_BUILD_ONLY(x) \
# define TRACING_BUILD_ONLY(x) \
errorBelch("not built for: -par-prof"); \
error = rtsTrue;
#endif
......@@ -617,9 +626,6 @@ error = rtsTrue;
case 'p':
RtsFlags.DebugFlags.prof = rtsTrue;
break;
case 'e':
RtsFlags.DebugFlags.eventlog = rtsTrue;
break;
case 'l':
RtsFlags.DebugFlags.linker = rtsTrue;
break;
......@@ -635,10 +641,16 @@ error = rtsTrue;
case 'c':
RtsFlags.DebugFlags.hpc = rtsTrue;
break;
case 'r':
RtsFlags.DebugFlags.sparks = rtsTrue;
break;
default:
bad_option( rts_argv[arg] );
}
}
// -Dx also turns on -v. Use -l to direct trace
// events to the .eventlog file instead.
RtsFlags.TraceFlags.trace_stderr = rtsTrue;
break;
}
#endif
......@@ -743,8 +755,19 @@ error = rtsTrue;
/* =========== PROFILING ========================== */
case 'l':
#ifdef EVENTLOG
RtsFlags.EventLogFlags.doEventLogging = rtsTrue;
#ifdef TRACING
switch(rts_argv[arg][2]) {
case '\0':
RtsFlags.TraceFlags.trace_stderr = rtsFalse;
break;
case 's':
RtsFlags.TraceFlags.scheduler = rtsTrue;
break;
default:
errorBelch("unknown RTS option: %s",rts_argv[arg]);
error = rtsTrue;
break;
}
#else
errorBelch("not built for: -eventlog");
#endif
......@@ -1049,13 +1072,14 @@ error = rtsTrue;
case 'v':
switch(rts_argv[arg][2]) {
case '\0':
errorBelch("incomplete RTS option: %s",rts_argv[arg]);
error = rtsTrue;
break;
#ifdef TRACING
case '\0':
RtsFlags.TraceFlags.trace_stderr = rtsTrue;
break;
case 't':
RtsFlags.DebugFlags.timestamp = rtsTrue;
RtsFlags.TraceFlags.timestamp = rtsTrue;
break;
#endif
case 's':
case 'g':
// ignored for backwards-compat
......
......@@ -29,7 +29,6 @@
#include "sm/BlockAlloc.h"
#include "Trace.h"
#include "Stable.h"
#include "eventlog/EventLog.h"
#include "Hash.h"
#include "Profiling.h"
#include "Timer.h"
......@@ -146,7 +145,7 @@ hs_init(int *argc, char **argv[])
#endif
/* initTracing must be after setupRtsFlags() */
#ifdef DEBUG
#ifdef TRACING
initTracing();
#endif
......@@ -190,12 +189,6 @@ hs_init(int *argc, char **argv[])
initProfiling1();
#ifdef EVENTLOG
if (RtsFlags.EventLogFlags.doEventLogging) {
initEventLogging();
}
#endif
/* start the virtual timer 'subsystem'. */
initTimer();
startTimer();
......@@ -421,11 +414,9 @@ hs_exit_(rtsBool wait_foreign)
if (prof_file != NULL) fclose(prof_file);
#endif
#ifdef EVENTLOG
if (RtsFlags.EventLogFlags.doEventLogging) {
endEventLogging();
freeEventLogging();
}
#ifdef TRACING
endTracing();
freeTracing();
#endif
#if defined(TICKY_TICKY)
......
......@@ -26,7 +26,6 @@
#include "Proftimer.h"
#include "ProfHeap.h"
#include "Weak.h"
#include "eventlog/EventLog.h"
#include "sm/GC.h" // waitForGcThreads, releaseGCThreads, N
#include "Sparks.h"
#include "Capability.h"
......@@ -172,17 +171,6 @@ static void deleteAllThreads (Capability *cap);
static void deleteThread_(Capability *cap, StgTSO *tso);
#endif
#ifdef DEBUG
static char *whatNext_strs[] = {
[0] = "(unknown)",
[ThreadRunGHC] = "ThreadRunGHC",
[ThreadInterpret] = "ThreadInterpret",
[ThreadKilled] = "ThreadKilled",
[ThreadRelocated] = "ThreadRelocated",
[ThreadComplete] = "ThreadComplete"
};
#endif
/* -----------------------------------------------------------------------------
* Putting a thread on the run queue: different scheduling policies
* -------------------------------------------------------------------------- */
......@@ -249,9 +237,7 @@ schedule (Capability *initialCapability, Task *task)
// The sched_mutex is *NOT* held
// NB. on return, we still hold a capability.
debugTrace (DEBUG_sched,
"### NEW SCHEDULER LOOP (task: %p, cap: %p)",
task, initialCapability);
debugTrace (DEBUG_sched, "cap %d: schedule()", initialCapability->no);
schedulePreLoop();
......@@ -396,12 +382,11 @@ schedule (Capability *initialCapability, Task *task)
if (bound) {
if (bound == task) {
debugTrace(DEBUG_sched,
"### Running thread %lu in bound thread", (unsigned long)t->id);
// yes, the Haskell thread is bound to the current native thread
} else {
debugTrace(DEBUG_sched,
"### thread %lu bound to another OS thread", (unsigned long)t->id);
"thread %lu bound to another OS thread",
(unsigned long)t->id);
// no, bound to a different Haskell thread: pass to that thread
pushOnRunQueue(cap,t);
continue;
......@@ -410,7 +395,8 @@ schedule (Capability *initialCapability, Task *task)
// The thread we want to run is unbound.
if (task->tso) {
debugTrace(DEBUG_sched,
"### this OS thread cannot run thread %lu", (unsigned long)t->id);
"this OS thread cannot run thread %lu",
(unsigned long)t->id);
// no, the current native thread is bound to a different
// Haskell thread, so pass it to any worker thread
pushOnRunQueue(cap,t);
......@@ -445,9 +431,6 @@ run_thread:
// that.
cap->r.rCurrentTSO = t;
debugTrace(DEBUG_sched, "-->> running thread %ld %s ...",
(long)t->id, whatNext_strs[t->what_next]);
startHeapProfTimer();
// Check for exceptions blocked on this thread
......@@ -485,7 +468,7 @@ run_thread:
}
#endif
postEvent(cap, EVENT_RUN_THREAD, t->id, 0);
traceSchedEvent(cap, EVENT_RUN_THREAD, t, 0);
switch (prev_what_next) {
......@@ -535,7 +518,7 @@ run_thread:
t->saved_winerror = GetLastError();
#endif
postEvent (cap, EVENT_STOP_THREAD, t->id, ret);
traceSchedEvent (cap, EVENT_STOP_THREAD, t, ret);
#if defined(THREADED_RTS)
// If ret is ThreadBlocked, and this Task is bound to the TSO that
......@@ -545,9 +528,6 @@ run_thread:
// that task->cap != cap. We better yield this Capability
// immediately and return to normaility.
if (ret == ThreadBlocked) {
debugTrace(DEBUG_sched,
"--<< thread %lu (%s) stopped: blocked",
(unsigned long)t->id, whatNext_strs[t->what_next]);
force_yield = rtsTrue;
goto yield;
}
......@@ -798,7 +778,7 @@ schedulePushWork(Capability *cap USED_IF_THREADS,
debugTrace(DEBUG_sched, "pushing thread %lu to capability %d", (unsigned long)t->id, free_caps[i]->no);
appendToRunQueue(free_caps[i],t);
postEvent (cap, EVENT_MIGRATE_THREAD, t->id, free_caps[i]->no);
traceSchedEvent (cap, EVENT_MIGRATE_THREAD, t, free_caps[i]->no);
if (t->bound) { t->bound->cap = free_caps[i]; }
t->cap = free_caps[i];
......@@ -822,7 +802,7 @@ schedulePushWork(Capability *cap USED_IF_THREADS,
if (spark != NULL) {
debugTrace(DEBUG_sched, "pushing spark %p to capability %d", spark, free_caps[i]->no);
postEvent(free_caps[i], EVENT_STEAL_SPARK, t->id, cap->no);
traceSchedEvent(free_caps[i], EVENT_STEAL_SPARK, t, cap->no);
newSpark(&(free_caps[i]->r), spark);
}
......@@ -1106,7 +1086,7 @@ scheduleHandleHeapOverflow( Capability *cap, StgTSO *t )
debugTrace(DEBUG_sched,
"--<< thread %ld (%s) stopped: requesting a large block (size %ld)\n",
(long)t->id, whatNext_strs[t->what_next], blocks);
(long)t->id, what_next_strs[t->what_next], blocks);
// don't do this if the nursery is (nearly) full, we'll GC first.
if (cap->r.rCurrentNursery->link != NULL ||
......@@ -1160,10 +1140,6 @@ scheduleHandleHeapOverflow( Capability *cap, StgTSO *t )
}
}
debugTrace(DEBUG_sched,
"--<< thread %ld (%s) stopped: HeapOverflow",
(long)t->id, whatNext_strs[t->what_next]);
if (cap->r.rHpLim == NULL || cap->context_switch) {
// Sometimes we miss a context switch, e.g. when calling
// primitives in a tight loop, MAYBE_GC() doesn't check the
......@@ -1185,10 +1161,6 @@ scheduleHandleHeapOverflow( Capability *cap, StgTSO *t )
static void
scheduleHandleStackOverflow (Capability *cap, Task *task, StgTSO *t)
{
debugTrace (DEBUG_sched,
"--<< thread %ld (%s) stopped, StackOverflow",
(long)t->id, whatNext_strs[t->what_next]);
/* just adjust the stack for this thread, then pop it back
* on the run queue.
*/
......@@ -1230,11 +1202,7 @@ scheduleHandleYield( Capability *cap, StgTSO *t, nat prev_what_next )
if (t->what_next != prev_what_next) {
debugTrace(DEBUG_sched,
"--<< thread %ld (%s) stopped to switch evaluators",
(long)t->id, whatNext_strs[t->what_next]);
} else {
debugTrace(DEBUG_sched,
"--<< thread %ld (%s) stopped, yielding",
(long)t->id, whatNext_strs[t->what_next]);
(long)t->id, what_next_strs[t->what_next]);