Commit 65aaa9b2 authored by Duncan Coutts's avatar Duncan Coutts

Add new eventlog events for various heap and GC statistics

They cover much the same info as is available via the GHC.Stats module
or via the '+RTS -s' textual output, but via the eventlog and with a
better sampling frequency.

We have three new generic heap info events and two very GHC-specific
ones. (The hope is the general ones are usable by other implementations
that use the same eventlog system, or indeed not so sensitive to changes
in GHC itself.)

The general ones are:

 * total heap mem allocated since prog start, on a per-HEC basis
 * current size of the heap (MBlocks reserved from OS for the heap)
 * current size of live data in the heap

Currently these are all emitted by GHC at GC time (live data only at
major GC).

The GHC specific ones are:

 * an event giving various static heap paramaters:
   * number of generations (usually 2)
   * max size if any
   * nursary size
   * MBlock and block sizes
 * a event emitted on each GC containing:
   * GC generation (usually just 0,1)
   * total bytes copied
   * bytes lost to heap slop and fragmentation
   * the number of threads in the parallel GC (1 for serial)
   * the maximum number of bytes copied by any par GC thread
   * the total number of bytes copied by all par GC threads
     (these last three can be used to calculate an estimate of the
      work balance in parallel GCs)
parent cd930da1
......@@ -147,8 +147,18 @@
#define EVENT_CAP_DELETE 46 /* (cap) */
#define EVENT_CAP_DISABLE 47 /* (cap) */
#define EVENT_CAP_ENABLE 48 /* (cap) */
/* Range 49 - 59 is available for new GHC and common events */
#define EVENT_HEAP_ALLOCATED 49 /* (heap_capset, alloc_bytes) */
#define EVENT_HEAP_SIZE 50 /* (heap_capset, size_bytes) */
#define EVENT_HEAP_LIVE 51 /* (heap_capset, live_bytes) */
#define EVENT_HEAP_INFO_GHC 52 /* (heap_capset, n_generations,
max_heap_size, alloc_area_size,
mblock_size, block_size) */
#define EVENT_GC_STATS_GHC 53 /* (heap_capset, generation,
copied_bytes, slop_bytes, frag_bytes,
par_n_threads,
par_max_copied, par_tot_copied) */
/* Range 54 - 59 is available for new GHC and common events */
/* Range 60 - 80 is used by eden for parallel tracing
* see http://www.mathematik.uni-marburg.de/~eden/
......@@ -161,7 +171,7 @@
* ranges higher than this are reserved but not currently emitted by ghc.
* This must match the size of the EventDesc[] array in EventLog.c
*/
#define NUM_GHC_EVENT_TAGS 49
#define NUM_GHC_EVENT_TAGS 54
#if 0 /* DEPRECATED EVENTS: */
/* we don't actually need to record the thread, it's implicit */
......
......@@ -305,7 +305,7 @@ usage_text[] = {
# endif
" where [flags] can contain:",
" s scheduler events",
" g GC events",
" g GC and heap events",
" p par spark events (sampled)",
" f par spark events (full detail)",
" u user events (emitted from Haskell code)",
......
......@@ -55,6 +55,11 @@ provider HaskellEvent {
probe gc__idle (EventCapNo);
probe gc__work (EventCapNo);
probe gc__done (EventCapNo);
probe gc__stats (CapsetID, StgWord, StgWord, StgWord, StgWord, StgWord, StgWord, StgWord);
probe heap__info (CapsetID, StgWord, StgWord, StgWord, StgWord, StgWord);
probe heap__allocated (EventCapNo, CapsetID, StgWord64);
probe heap__size (CapsetID, StgWord);
probe heap__live (CapsetID, StgWord);
/* capability events */
probe startup (EventCapNo);
......
......@@ -330,9 +330,9 @@ stat_gcWorkerThreadDone (gc_thread *gct STG_UNUSED)
-------------------------------------------------------------------------- */
void
stat_endGC (gc_thread *gct,
lnat alloc, lnat live, lnat copied, nat gen,
lnat par_max_copied, lnat par_tot_copied, lnat slop)
stat_endGC (Capability *cap, gc_thread *gct,
lnat alloc, lnat live, lnat copied, lnat slop, nat gen,
nat par_n_threads, lnat par_max_copied, lnat par_tot_copied)
{
if (RtsFlags.GcFlags.giveStats != NO_GC_STATS ||
RtsFlags.ProfFlags.doHeapProfile)
......@@ -388,9 +388,26 @@ stat_endGC (gc_thread *gct,
lnat n;
for (n = 0; n < n_capabilities; n++) {
tot_alloc += capabilities[n].total_allocated;
traceEventHeapAllocated(&capabilities[n],
CAPSET_HEAP_DEFAULT,
capabilities[n].total_allocated * sizeof(W_));
}
ASSERT(GC_tot_alloc == tot_alloc);
}
traceEventHeapSize(cap,
CAPSET_HEAP_DEFAULT,
mblocks_allocated * MBLOCK_SIZE_W * sizeof(W_));
traceEventGcStats(cap,
CAPSET_HEAP_DEFAULT,
gen,
copied * sizeof(W_),
slop * sizeof(W_),
/* current loss due to fragmentation */
(mblocks_allocated * BLOCKS_PER_MBLOCK - n_alloc_blocks)
* BLOCK_SIZE_W * sizeof(W_),
par_n_threads,
par_max_copied * sizeof(W_),
par_tot_copied * sizeof(W_));
if (gen == RtsFlags.GcFlags.generations-1) { /* major GC? */
if (live > max_residency) {
......@@ -399,6 +416,9 @@ stat_endGC (gc_thread *gct,
current_residency = live;
residency_samples++;
cumulative_residency += live;
traceEventHeapLive(cap,
CAPSET_HEAP_DEFAULT,
live * sizeof(W_));
}
if (slop > max_slop) max_slop = slop;
......
......@@ -19,9 +19,9 @@ void stat_startInit(void);
void stat_endInit(void);
void stat_startGC(struct gc_thread_ *gct);
void stat_endGC (struct gc_thread_ *gct, lnat alloc, lnat live,
lnat copied, nat gen,
lnat max_copied, lnat avg_copied, lnat slop);
void stat_endGC (Capability *cap, struct gc_thread_ *gct,
lnat alloc, lnat live, lnat copied, lnat slop, nat gen,
nat n_gc_threads, lnat par_max_copied, lnat par_tot_copied);
void stat_gcWorkerThreadStart (struct gc_thread_ *gct);
void stat_gcWorkerThreadDone (struct gc_thread_ *gct);
......
......@@ -98,6 +98,9 @@ void initTracing (void)
TRACE_gc =
RtsFlags.TraceFlags.gc ||
RtsFlags.DebugFlags.gc;
if (TRACE_gc && RtsFlags.GcFlags.giveStats == NO_GC_STATS) {
RtsFlags.GcFlags.giveStats = COLLECT_GC_STATS;
}
TRACE_spark_sampled =
RtsFlags.TraceFlags.sparks_sampled;
......@@ -301,6 +304,44 @@ void traceGcEvent_ (Capability *cap, EventTypeNum tag)
}
}
void traceHeapEvent_ (Capability *cap,
EventTypeNum tag,
CapsetID heap_capset,
lnat info1)
{
/* no stderr equivalent for these ones */
postHeapEvent(cap, tag, heap_capset, info1);
}
void traceEventHeapInfo_ (CapsetID heap_capset,
nat gens,
lnat maxHeapSize,
lnat allocAreaSize,
lnat mblockSize,
lnat blockSize)
{
/* no stderr equivalent for this one */
postEventHeapInfo(heap_capset, gens,
maxHeapSize, allocAreaSize,
mblockSize, blockSize);
}
void traceEventGcStats_ (Capability *cap,
CapsetID heap_capset,
nat gen,
lnat copied,
lnat slop,
lnat fragmentation,
nat par_n_threads,
lnat par_max_copied,
lnat par_tot_copied)
{
/* no stderr equivalent for this one */
postEventGcStats(cap, heap_capset, gen,
copied, slop, fragmentation,
par_n_threads, par_max_copied, par_tot_copied);
}
void traceCapEvent (Capability *cap,
EventTypeNum tag)
{
......
......@@ -37,8 +37,9 @@ typedef StgWord16 CapsetType;
enum CapsetType { CapsetTypeCustom = CAPSET_TYPE_CUSTOM,
CapsetTypeOsProcess = CAPSET_TYPE_OSPROCESS,
CapsetTypeClockdomain = CAPSET_TYPE_CLOCKDOMAIN };
#define CAPSET_OSPROCESS_DEFAULT 0
#define CAPSET_CLOCKDOMAIN_DEFAULT 1
#define CAPSET_OSPROCESS_DEFAULT ((CapsetID)0)
#define CAPSET_HEAP_DEFAULT ((CapsetID)0) /* reusing the same capset */
#define CAPSET_CLOCKDOMAIN_DEFAULT ((CapsetID)1)
// -----------------------------------------------------------------------------
// Message classes
......@@ -112,6 +113,35 @@ void traceSchedEvent_ (Capability *cap, EventTypeNum tag,
void traceGcEvent_ (Capability *cap, EventTypeNum tag);
/*
* Record a heap event
*/
#define traceHeapEvent(cap, tag, heap_capset, info1) \
if (RTS_UNLIKELY(TRACE_gc)) { \
traceHeapEvent_(cap, tag, heap_capset, info1); \
}
void traceHeapEvent_ (Capability *cap,
EventTypeNum tag,
CapsetID heap_capset,
lnat info1);
void traceEventHeapInfo_ (CapsetID heap_capset,
nat gens,
lnat maxHeapSize,
lnat allocAreaSize,
lnat mblockSize,
lnat blockSize);
void traceEventGcStats_ (Capability *cap,
CapsetID heap_capset,
nat gen,
lnat copied,
lnat slop,
lnat fragmentation,
nat par_n_threads,
lnat par_max_copied,
lnat par_tot_copied);
/*
* Record a spark event
*/
......@@ -227,6 +257,13 @@ void traceSparkCounters_ (Capability *cap,
#define traceSchedEvent(cap, tag, tso, other) /* nothing */
#define traceSchedEvent2(cap, tag, tso, other, info) /* nothing */
#define traceGcEvent(cap, tag) /* nothing */
#define traceEventGcStats_(cap, heap_capset, gen, \
copied, slop, fragmentation, \
par_n_threads, par_max_copied, par_tot_copied) /* nothing */
#define traceHeapEvent(cap, tag, heap_capset, info1) /* nothing */
#define traceEventHeapInfo_(heap_capset, gens, \
maxHeapSize, allocAreaSize, \
mblockSize, blockSize) /* nothing */
#define traceSparkEvent(cap, tag) /* nothing */
#define traceSparkEvent2(cap, tag, other) /* nothing */
#define traceCap(class, cap, msg, ...) /* nothing */
......@@ -302,6 +339,31 @@ INLINE_HEADER void dtraceStartup (int num_caps) {
HASKELLEVENT_GC_WORK(cap)
#define dtraceGcDone(cap) \
HASKELLEVENT_GC_DONE(cap)
#define dtraceEventGcStats(heap_capset, gens, \
copies, slop, fragmentation, \
par_n_threads, \
par_max_copied, \
par_tot_copied) \
HASKELLEVENT_GC_STATS(heap_capset, gens, \
copies, slop, fragmentation, \
par_n_threads, \
par_max_copied, \
par_tot_copied) \
#define dtraceHeapInfo(heap_capset, gens, \
maxHeapSize, allocAreaSize, \
mblockSize, blockSize) \
HASKELLEVENT_HEAP_INFO(heap_capset, gens, \
maxHeapSize, allocAreaSize, \
mblockSize, blockSize)
#define dtraceEventHeapAllocated(cap, heap_capset, \
allocated) \
HASKELLEVENT_HEAP_ALLOCATED(cap, heap_capset, \
allocated)
#define dtraceEventHeapSize(heap_capset, size) \
HASKELLEVENT_HEAP_LIVE(heap_capset, size)
#define dtraceEventHeapLive(heap_capset, live) \
HASKELLEVENT_HEAP_LIVE(heap_capset, live)
#define dtraceCapsetCreate(capset, capset_type) \
HASKELLEVENT_CAPSET_CREATE(capset, capset_type)
#define dtraceCapsetDelete(capset) \
......@@ -346,6 +408,18 @@ INLINE_HEADER void dtraceStartup (int num_caps STG_UNUSED) {};
#define dtraceGcIdle(cap) /* nothing */
#define dtraceGcWork(cap) /* nothing */
#define dtraceGcDone(cap) /* nothing */
#define dtraceEventGcStats(heap_capset, gens, \
copies, slop, fragmentation, \
par_n_threads, \
par_max_copied, \
par_tot_copied) /* nothing */
#define dtraceHeapInfo(heap_capset, gens, \
maxHeapSize, allocAreaSize, \
mblockSize, blockSize) /* nothing */
#define dtraceEventHeapAllocated(cap, heap_capset, \
allocated) /* nothing */
#define dtraceEventHeapSize(heap_capset, size) /* nothing */
#define dtraceEventHeapLive(heap_capset, live) /* nothing */
#define dtraceCapCreate(cap) /* nothing */
#define dtraceCapDelete(cap) /* nothing */
#define dtraceCapEnable(cap) /* nothing */
......@@ -511,6 +585,70 @@ INLINE_HEADER void traceEventGcDone(Capability *cap STG_UNUSED)
dtraceGcDone((EventCapNo)cap->no);
}
INLINE_HEADER void traceEventGcStats(Capability *cap STG_UNUSED,
CapsetID heap_capset STG_UNUSED,
nat gen STG_UNUSED,
lnat copied STG_UNUSED,
lnat slop STG_UNUSED,
lnat fragmentation STG_UNUSED,
nat par_n_threads STG_UNUSED,
lnat par_max_copied STG_UNUSED,
lnat par_tot_copied STG_UNUSED)
{
if (RTS_UNLIKELY(TRACE_gc)) {
traceEventGcStats_(cap, heap_capset, gen,
copied, slop, fragmentation,
par_n_threads, par_max_copied, par_tot_copied);
}
dtraceEventGcStats(heap_capset, gens,
copies, slop, fragmentation,
par_n_threads, par_max_copied, par_tot_copied);
}
INLINE_HEADER void traceEventHeapInfo(CapsetID heap_capset STG_UNUSED,
nat gens STG_UNUSED,
lnat maxHeapSize STG_UNUSED,
lnat allocAreaSize STG_UNUSED,
lnat mblockSize STG_UNUSED,
lnat blockSize STG_UNUSED)
{
if (RTS_UNLIKELY(TRACE_gc)) {
traceEventHeapInfo_(heap_capset, gens,
maxHeapSize, allocAreaSize,
mblockSize, blockSize);
}
dtraceHeapInfo(heap_capset, gens,
maxHeapSize, allocAreaSize,
mblockSize, blockSize);
}
INLINE_HEADER void traceEventHeapAllocated(Capability *cap STG_UNUSED,
CapsetID heap_capset STG_UNUSED,
lnat allocated STG_UNUSED)
{
traceHeapEvent(cap, EVENT_HEAP_ALLOCATED, heap_capset, allocated);
dtraceEventHeapAllocated((EventCapNo)cap->no, heap_capset, allocated);
}
INLINE_HEADER void traceEventHeapSize(Capability *cap STG_UNUSED,
CapsetID heap_capset STG_UNUSED,
lnat heap_size STG_UNUSED)
{
traceHeapEvent(cap, EVENT_HEAP_SIZE, heap_capset, heap_size);
dtraceEventHeapSize(heap_capset, heap_size);
}
INLINE_HEADER void traceEventHeapLive(Capability *cap STG_UNUSED,
CapsetID heap_capset STG_UNUSED,
lnat heap_live STG_UNUSED)
{
traceHeapEvent(cap, EVENT_HEAP_LIVE, heap_capset, heap_live);
dtraceEventHeapLive(heap_capset, heap_live);
}
/* TODO: at some point we should remove this event, it's covered by
* the cap create/delete events.
*/
INLINE_HEADER void traceEventStartup(void)
{
int n_caps;
......
......@@ -71,6 +71,11 @@ char *EventDesc[] = {
[EVENT_GC_END] = "Finished GC",
[EVENT_REQUEST_SEQ_GC] = "Request sequential GC",
[EVENT_REQUEST_PAR_GC] = "Request parallel GC",
[EVENT_GC_STATS_GHC] = "GC statistics",
[EVENT_HEAP_INFO_GHC] = "Heap static parameters",
[EVENT_HEAP_ALLOCATED] = "Total heap mem ever allocated",
[EVENT_HEAP_SIZE] = "Current heap size",
[EVENT_HEAP_LIVE] = "Current heap live data",
[EVENT_CREATE_SPARK_THREAD] = "Create spark thread",
[EVENT_LOG_MSG] = "Log message",
[EVENT_USER_MSG] = "User message",
......@@ -358,6 +363,31 @@ initEventLogging(void)
eventTypes[t].size = 7 * sizeof(StgWord64);
break;
case EVENT_HEAP_ALLOCATED: // (heap_capset, alloc_bytes)
case EVENT_HEAP_SIZE: // (heap_capset, size_bytes)
case EVENT_HEAP_LIVE: // (heap_capset, live_bytes)
eventTypes[t].size = sizeof(EventCapsetID) + sizeof(StgWord64);
break;
case EVENT_HEAP_INFO_GHC: // (heap_capset, n_generations,
// max_heap_size, alloc_area_size,
// mblock_size, block_size)
eventTypes[t].size = sizeof(EventCapsetID)
+ sizeof(StgWord16)
+ sizeof(StgWord64) * 4;
break;
case EVENT_GC_STATS_GHC: // (heap_capset, generation,
// copied_bytes, slop_bytes, frag_bytes,
// par_n_threads,
// par_max_copied, par_tot_copied)
eventTypes[t].size = sizeof(EventCapsetID)
+ sizeof(StgWord16)
+ sizeof(StgWord64) * 3
+ sizeof(StgWord32)
+ sizeof(StgWord64) * 2;
break;
case EVENT_BLOCK_MARKER:
eventTypes[t].size = sizeof(StgWord32) + sizeof(EventTimestamp) +
sizeof(EventCapNo);
......@@ -783,6 +813,101 @@ void postWallClockTime (EventCapsetID capset)
RELEASE_LOCK(&eventBufMutex);
}
/*
* Various GC and heap events
*/
void postHeapEvent (Capability *cap,
EventTypeNum tag,
EventCapsetID heap_capset,
lnat info1)
{
EventsBuf *eb;
eb = &capEventBuf[cap->no];
if (!hasRoomForEvent(eb, tag)) {
// Flush event buffer to make room for new event.
printAndClearEventBuf(eb);
}
postEventHeader(eb, tag);
switch (tag) {
case EVENT_HEAP_ALLOCATED: // (heap_capset, alloc_bytes)
case EVENT_HEAP_SIZE: // (heap_capset, size_bytes)
case EVENT_HEAP_LIVE: // (heap_capset, live_bytes)
{
postCapsetID(eb, heap_capset);
postWord64(eb,info1 /* alloc/size/live_bytes */);
break;
}
default:
barf("postHeapEvent: unknown event tag %d", tag);
}
}
void postEventHeapInfo (EventCapsetID heap_capset,
nat gens,
lnat maxHeapSize,
lnat allocAreaSize,
lnat mblockSize,
lnat blockSize)
{
ACQUIRE_LOCK(&eventBufMutex);
if (!hasRoomForEvent(&eventBuf, EVENT_HEAP_INFO_GHC)) {
// Flush event buffer to make room for new event.
printAndClearEventBuf(&eventBuf);
}
postEventHeader(&eventBuf, EVENT_HEAP_INFO_GHC);
/* EVENT_HEAP_INFO_GHC (heap_capset, n_generations,
max_heap_size, alloc_area_size,
mblock_size, block_size) */
postCapsetID(&eventBuf, heap_capset);
postWord16(&eventBuf, gens);
postWord64(&eventBuf, maxHeapSize);
postWord64(&eventBuf, allocAreaSize);
postWord64(&eventBuf, mblockSize);
postWord64(&eventBuf, blockSize);
RELEASE_LOCK(&eventBufMutex);
}
void postEventGcStats (Capability *cap,
EventCapsetID heap_capset,
nat gen,
lnat copied,
lnat slop,
lnat fragmentation,
nat par_n_threads,
lnat par_max_copied,
lnat par_tot_copied)
{
EventsBuf *eb;
eb = &capEventBuf[cap->no];
if (!hasRoomForEvent(eb, EVENT_GC_STATS_GHC)) {
// Flush event buffer to make room for new event.
printAndClearEventBuf(eb);
}
postEventHeader(eb, EVENT_GC_STATS_GHC);
/* EVENT_GC_STATS_GHC (heap_capset, generation,
copied_bytes, slop_bytes, frag_bytes,
par_n_threads, par_max_copied, par_tot_copied) */
postCapsetID(eb, heap_capset);
postWord16(eb, gen);
postWord64(eb, copied);
postWord64(eb, slop);
postWord64(eb, fragmentation);
postWord32(eb, par_n_threads);
postWord64(eb, par_max_copied);
postWord64(eb, par_tot_copied);
}
void
postEvent (Capability *cap, EventTypeNum tag)
{
......
......@@ -97,6 +97,31 @@ void postThreadLabel(Capability *cap,
EventThreadID id,
char *label);
/*
* Various GC and heap events
*/
void postHeapEvent (Capability *cap,
EventTypeNum tag,
EventCapsetID heap_capset,
StgWord info1);
void postEventHeapInfo (EventCapsetID heap_capset,
nat gens,
lnat maxHeapSize,
lnat allocAreaSize,
lnat mblockSize,
lnat blockSize);
void postEventGcStats (Capability *cap,
EventCapsetID heap_capset,
nat gen,
lnat copied,
lnat slop,
lnat fragmentation,
nat par_n_threads,
lnat par_max_copied,
lnat par_tot_copied);
#else /* !TRACING */
INLINE_HEADER void postSchedEvent (Capability *cap STG_UNUSED,
......
......@@ -738,9 +738,9 @@ GarbageCollect (rtsBool force_major_gc,
#endif
// ok, GC over: tell the stats department what happened.
stat_endGC(gct, allocated, live_words,
copied, N, par_max_copied, par_tot_copied,
live_blocks * BLOCK_SIZE_W - live_words /* slop */);
stat_endGC(cap, gct, allocated, live_words, copied,
live_blocks * BLOCK_SIZE_W - live_words /* slop */,
N, n_gc_threads, par_max_copied, par_tot_copied);
// Guess which generation we'll collect *next* time
initialise_N(force_major_gc);
......
......@@ -186,6 +186,13 @@ initStorage (void)
IF_DEBUG(gc, statDescribeGens());
RELEASE_SM_LOCK;
traceEventHeapInfo(CAPSET_HEAP_DEFAULT,
RtsFlags.GcFlags.generations,
RtsFlags.GcFlags.maxHeapSize * BLOCK_SIZE_W * sizeof(W_),
RtsFlags.GcFlags.minAllocAreaSize * BLOCK_SIZE_W * sizeof(W_),
MBLOCK_SIZE_W * sizeof(W_),
BLOCK_SIZE_W * sizeof(W_));
}
void storageAddCapabilities (nat from, nat to)
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment