Commit 41737f12 authored by Simon Marlow's avatar Simon Marlow

Deprecate lnat, and use StgWord instead

lnat was originally "long unsigned int" but we were using it when we
wanted a 64-bit type on a 64-bit machine.  This broke on Windows x64,
where long == int == 32 bits.  Using types of unspecified size is bad,
but what we really wanted was a type with N bits on an N-bit machine.
StgWord is exactly that.

lnat was mentioned in some APIs that clients might be using
(e.g. StackOverflowHook()), so we leave it defined but with a comment
to say that it's deprecated.
parent a8179622
......@@ -40,7 +40,7 @@ defaultsHook (void)
}
void
StackOverflowHook (lnat stack_size) /* in bytes */
StackOverflowHook (StgWord stack_size) /* in bytes */
{
fprintf(stderr, "GHC stack-space overflow: current limit is %zu bytes.\nUse the `-K<size>' option to increase it.\n", (size_t)stack_size);
}
......
......@@ -225,7 +225,7 @@ main(int argc, char *argv[])
printf("#define BLOCK_SIZE %u\n", BLOCK_SIZE);
printf("#define MBLOCK_SIZE %u\n", MBLOCK_SIZE);
printf("#define BLOCKS_PER_MBLOCK %" FMT_SizeT "\n", (lnat)BLOCKS_PER_MBLOCK);
printf("#define BLOCKS_PER_MBLOCK %" FMT_SizeT "\n", (W_)BLOCKS_PER_MBLOCK);
// could be derived, but better to save doing the calculation twice
printf("\n\n");
......
......@@ -18,9 +18,9 @@ extern char *ghc_rts_opts;
extern void OnExitHook (void);
extern int NoRunnableThreadsHook (void);
extern void StackOverflowHook (lnat stack_size);
extern void OutOfHeapHook (lnat request_size, lnat heap_size);
extern void MallocFailHook (lnat request_size /* in bytes */, char *msg);
extern void StackOverflowHook (W_ stack_size);
extern void OutOfHeapHook (W_ request_size, W_ heap_size);
extern void MallocFailHook (W_ request_size /* in bytes */, char *msg);
extern void defaultsHook (void);
#endif /* RTS_HOOKS_H */
......@@ -34,7 +34,7 @@ typedef struct SpinLock_
typedef StgWord SpinLock;
#endif
typedef lnat SpinLockCount;
typedef StgWord SpinLockCount;
#if defined(PROF_SPIN)
......
......@@ -16,8 +16,10 @@
#include <stddef.h>
typedef unsigned int nat; /* at least 32 bits (like int) */
typedef size_t lnat; /* at least 32 bits */
typedef unsigned int nat; /* at least 32 bits (like int) */
// Deprecated; just use StgWord instead
typedef StgWord lnat;
/* ullong (64|128-bit) type: only include if needed (not ANSI) */
#if defined(__GNUC__)
......
......@@ -429,20 +429,20 @@ EXTERN_INLINE StgWord stack_frame_sizeW( StgClosure *frame )
-------------------------------------------------------------------------- */
// The number of card bytes needed
INLINE_HEADER lnat mutArrPtrsCards (lnat elems)
INLINE_HEADER W_ mutArrPtrsCards (W_ elems)
{
return (lnat)((elems + (1 << MUT_ARR_PTRS_CARD_BITS) - 1)
return (W_)((elems + (1 << MUT_ARR_PTRS_CARD_BITS) - 1)
>> MUT_ARR_PTRS_CARD_BITS);
}
// The number of words in the card table
INLINE_HEADER lnat mutArrPtrsCardTableSize (lnat elems)
INLINE_HEADER W_ mutArrPtrsCardTableSize (W_ elems)
{
return ROUNDUP_BYTES_TO_WDS(mutArrPtrsCards(elems));
}
// The address of the card for a particular card number
INLINE_HEADER StgWord8 *mutArrPtrsCard (StgMutArrPtrs *a, lnat n)
INLINE_HEADER StgWord8 *mutArrPtrsCard (StgMutArrPtrs *a, W_ n)
{
return ((StgWord8 *)&(a->payload[a->ptrs]) + n);
}
......
......@@ -149,8 +149,8 @@ extern generation * oldest_gen;
-------------------------------------------------------------------------- */
StgPtr allocate ( Capability *cap, lnat n );
StgPtr allocatePinned ( Capability *cap, lnat n );
StgPtr allocate ( Capability *cap, W_ n );
StgPtr allocatePinned ( Capability *cap, W_ n );
/* memory allocator for executable memory */
void * allocateExec(unsigned int len, void **exec_addr);
......
......@@ -12,8 +12,8 @@
#ifndef RTS_STORAGE_MBLOCK_H
#define RTS_STORAGE_MBLOCK_H
extern lnat peak_mblocks_allocated;
extern lnat mblocks_allocated;
extern W_ peak_mblocks_allocated;
extern W_ mblocks_allocated;
extern void initMBlocks(void);
extern void * getMBlock(void);
......@@ -156,7 +156,7 @@ typedef struct {
MBlockMapLine lines[MBLOCK_MAP_ENTRIES];
} MBlockMap;
extern lnat mpc_misses;
extern W_ mpc_misses;
StgBool HEAP_ALLOCED_miss(StgWord mblock, void *p);
......
......@@ -80,7 +80,7 @@ arenaAlloc( Arena *arena, size_t size )
return p;
} else {
// allocate a fresh block...
req_blocks = (lnat)BLOCK_ROUND_UP(size) / BLOCK_SIZE;
req_blocks = (W_)BLOCK_ROUND_UP(size) / BLOCK_SIZE;
bd = allocGroup_lock(req_blocks);
arena_blocks += req_blocks;
......
......@@ -123,7 +123,7 @@ struct Capability_ {
SparkCounters spark_stats;
#endif
// Total words allocated by this cap since rts start
lnat total_allocated;
W_ total_allocated;
// Per-capability STM-related data
StgTVarWatchQueue *free_tvar_watch_queues;
......
......@@ -80,7 +80,7 @@ disInstr ( StgBCO *bco, int pc )
pc += 1; break;
case bci_STKCHECK: {
StgWord stk_words_reqd = BCO_GET_LARGE_ARG + 1;
debugBelch("STKCHECK %" FMT_SizeT "\n", (lnat)stk_words_reqd );
debugBelch("STKCHECK %" FMT_SizeT "\n", (W_)stk_words_reqd );
break;
}
case bci_PUSH_L:
......
......@@ -296,7 +296,7 @@ numLabel( GtkWidget *lbl, nat n )
}
void
updateFrontPanelAfterGC( nat N, lnat live )
updateFrontPanelAfterGC( nat N, W_ live )
{
char buf[1000];
......
......@@ -19,7 +19,7 @@
void initFrontPanel( void );
void stopFrontPanel( void );
void updateFrontPanelBeforeGC( nat N );
void updateFrontPanelAfterGC( nat N, lnat live );
void updateFrontPanelAfterGC( nat N, W_ live );
void updateFrontPanel( void );
......
......@@ -1900,7 +1900,7 @@ mmap_again:
MAP_PRIVATE|TRY_MAP_32BIT|fixed|flags, fd, 0);
if (result == MAP_FAILED) {
sysErrorBelch("mmap %" FMT_SizeT " bytes at %p",(lnat)size,map_addr);
sysErrorBelch("mmap %" FMT_SizeT " bytes at %p",(W_)size,map_addr);
errorBelch("Try specifying an address with +RTS -xm<addr> -RTS");
stg_exit(EXIT_FAILURE);
}
......@@ -1943,7 +1943,7 @@ mmap_again:
}
#endif
IF_DEBUG(linker, debugBelch("mmapForLinker: mapped %" FMT_SizeT " bytes starting at %p\n", (lnat)size, result));
IF_DEBUG(linker, debugBelch("mmapForLinker: mapped %" FMT_SizeT " bytes starting at %p\n", (W_)size, result));
IF_DEBUG(linker, debugBelch("mmapForLinker: done\n"));
return result;
}
......@@ -4937,7 +4937,7 @@ do_Elf_Rel_relocations ( ObjectCode* oc, char* ehdrC,
default:
errorBelch("%s: unhandled ELF relocation(Rel) type %" FMT_SizeT "\n",
oc->fileName, (lnat)ELF_R_TYPE(info));
oc->fileName, (W_)ELF_R_TYPE(info));
return 0;
}
......@@ -5252,7 +5252,7 @@ do_Elf_Rela_relocations ( ObjectCode* oc, char* ehdrC,
default:
errorBelch("%s: unhandled ELF relocation(RelA) type %" FMT_SizeT "\n",
oc->fileName, (lnat)ELF_R_TYPE(info));
oc->fileName, (W_)ELF_R_TYPE(info));
return 0;
}
......
......@@ -74,7 +74,7 @@ loop:
{
StgTSO *tso = ((MessageWakeup *)m)->tso;
debugTraceCap(DEBUG_sched, cap, "message: try wakeup thread %ld",
(lnat)tso->id);
(W_)tso->id);
tryWakeupThread(cap, tso);
}
else if (i == &stg_MSG_THROWTO_info)
......@@ -90,7 +90,7 @@ loop:
}
debugTraceCap(DEBUG_sched, cap, "message: throwTo %ld -> %ld",
(lnat)t->source->id, (lnat)t->target->id);
(W_)t->source->id, (W_)t->target->id);
ASSERT(t->source->why_blocked == BlockedOnMsgThrowTo);
ASSERT(t->source->block_info.closure == (StgClosure *)m);
......@@ -167,7 +167,7 @@ nat messageBlackHole(Capability *cap, MessageBlackHole *msg)
StgTSO *owner;
debugTraceCap(DEBUG_sched, cap, "message: thread %d blocking on blackhole %p",
(lnat)msg->tso->id, msg->bh);
(W_)msg->tso->id, msg->bh);
info = bh->header.info;
......@@ -256,7 +256,7 @@ loop:
recordClosureMutated(cap,bh); // bh was mutated
debugTraceCap(DEBUG_sched, cap, "thread %d blocked on thread %d",
(lnat)msg->tso->id, (lnat)owner->id);
(W_)msg->tso->id, (W_)owner->id);
return 1; // blocked
}
......@@ -289,7 +289,7 @@ loop:
}
debugTraceCap(DEBUG_sched, cap, "thread %d blocked on thread %d",
(lnat)msg->tso->id, (lnat)owner->id);
(W_)msg->tso->id, (W_)owner->id);
// See above, #3838
if (owner->why_blocked == NotBlocked && owner->id != msg->tso->id) {
......
......@@ -300,21 +300,21 @@ printClosure( StgClosure *obj )
StgWord i;
debugBelch("ARR_WORDS(\"");
for (i=0; i<arr_words_words((StgArrWords *)obj); i++)
debugBelch("%" FMT_SizeT, (lnat)((StgArrWords *)obj)->payload[i]);
debugBelch("%" FMT_SizeT, (W_)((StgArrWords *)obj)->payload[i]);
debugBelch("\")\n");
break;
}
case MUT_ARR_PTRS_CLEAN:
debugBelch("MUT_ARR_PTRS_CLEAN(size=%" FMT_SizeT ")\n", (lnat)((StgMutArrPtrs *)obj)->ptrs);
debugBelch("MUT_ARR_PTRS_CLEAN(size=%" FMT_SizeT ")\n", (W_)((StgMutArrPtrs *)obj)->ptrs);
break;
case MUT_ARR_PTRS_DIRTY:
debugBelch("MUT_ARR_PTRS_DIRTY(size=%" FMT_SizeT ")\n", (lnat)((StgMutArrPtrs *)obj)->ptrs);
debugBelch("MUT_ARR_PTRS_DIRTY(size=%" FMT_SizeT ")\n", (W_)((StgMutArrPtrs *)obj)->ptrs);
break;
case MUT_ARR_PTRS_FROZEN:
debugBelch("MUT_ARR_PTRS_FROZEN(size=%" FMT_SizeT ")\n", (lnat)((StgMutArrPtrs *)obj)->ptrs);
debugBelch("MUT_ARR_PTRS_FROZEN(size=%" FMT_SizeT ")\n", (W_)((StgMutArrPtrs *)obj)->ptrs);
break;
case MVAR_CLEAN:
......@@ -431,7 +431,7 @@ printSmallBitmap( StgPtr spBottom, StgPtr payload, StgWord bitmap, nat size )
printPtr((P_)payload[i]);
debugBelch("\n");
} else {
debugBelch("Word# %" FMT_SizeT "\n", (lnat)payload[i]);
debugBelch("Word# %" FMT_SizeT "\n", (W_)payload[i]);
}
}
}
......@@ -447,12 +447,12 @@ printLargeBitmap( StgPtr spBottom, StgPtr payload, StgLargeBitmap* large_bitmap,
StgWord bitmap = large_bitmap->bitmap[bmp];
j = 0;
for(; i < size && j < BITS_IN(W_); j++, i++, bitmap >>= 1 ) {
debugBelch(" stk[%" FMT_SizeT "] (%p) = ", (lnat)(spBottom-(payload+i)), payload+i);
debugBelch(" stk[%" FMT_SizeT "] (%p) = ", (W_)(spBottom-(payload+i)), payload+i);
if ((bitmap & 1) == 0) {
printPtr((P_)payload[i]);
debugBelch("\n");
} else {
debugBelch("Word# %" FMT_SizeT "\n", (lnat)payload[i]);
debugBelch("Word# %" FMT_SizeT "\n", (W_)payload[i]);
}
}
}
......
......@@ -821,7 +821,7 @@ dumpCensus( Census *census )
}
#endif
fprintf(hp_file, "\t%" FMT_SizeT "\n", (lnat)count * sizeof(W_));
fprintf(hp_file, "\t%" FMT_SizeT "\n", (W_)count * sizeof(W_));
}
printSample(rtsFalse, census->time);
......
......@@ -42,7 +42,7 @@ unsigned int CCS_ID = 1;
/* figures for the profiling report.
*/
static StgWord64 total_alloc;
static lnat total_prof_ticks;
static W_ total_prof_ticks;
/* Globals for opening the profiling log file(s)
*/
......
......@@ -271,11 +271,11 @@ isEmptyRetainerStack( void )
* Returns size of stack
* -------------------------------------------------------------------------- */
#ifdef DEBUG
lnat
W_
retainerStackBlocks( void )
{
bdescr* bd;
lnat res = 0;
W_ res = 0;
for (bd = firstStack; bd != NULL; bd = bd->link)
res += bd->blocks;
......
......@@ -43,7 +43,7 @@ retainerSetOf( StgClosure *c )
// Used by Storage.c:memInventory()
#ifdef DEBUG
extern lnat retainerStackBlocks ( void );
extern W_ retainerStackBlocks ( void );
#endif
#include "EndPrivate.h"
......
......@@ -1542,7 +1542,7 @@ decodeSize(const char *flag, nat offset, StgWord64 min, StgWord64 max)
if (m < 0 || val < min || val > max) {
// printf doesn't like 64-bit format specs on Windows
// apparently, so fall back to unsigned long.
errorBelch("error in RTS option %s: size outside allowed range (%" FMT_SizeT " - %" FMT_SizeT ")", flag, (lnat)min, (lnat)max);
errorBelch("error in RTS option %s: size outside allowed range (%" FMT_SizeT " - %" FMT_SizeT ")", flag, (W_)min, (W_)max);
stg_exit(EXIT_FAILURE);
}
......
......@@ -130,7 +130,7 @@ heapOverflow(void)
{
/* don't fflush(stdout); WORKAROUND bug in Linux glibc */
OutOfHeapHook(0/*unknown request size*/,
(lnat)RtsFlags.GcFlags.maxHeapSize * BLOCK_SIZE);
(W_)RtsFlags.GcFlags.maxHeapSize * BLOCK_SIZE);
heap_overflow = rtsTrue;
}
......
......@@ -1107,9 +1107,9 @@ scheduleHandleHeapOverflow( Capability *cap, StgTSO *t )
if (cap->r.rHpAlloc > BLOCK_SIZE) {
// if so, get one and push it on the front of the nursery.
bdescr *bd;
lnat blocks;
W_ blocks;
blocks = (lnat)BLOCK_ROUND_UP(cap->r.rHpAlloc) / BLOCK_SIZE;
blocks = (W_)BLOCK_ROUND_UP(cap->r.rHpAlloc) / BLOCK_SIZE;
if (blocks > BLOCKS_PER_MBLOCK) {
barf("allocation of %ld bytes too large (GHC should have complained at compile-time)", (long)cap->r.rHpAlloc);
......
......@@ -57,14 +57,14 @@ static Time HCe_start_time, HCe_tot_time = 0; // heap census prof elap time
#endif
// current = current as of last GC
static lnat current_residency = 0; // in words; for stats only
static lnat max_residency = 0;
static lnat cumulative_residency = 0;
static lnat residency_samples = 0; // for stats only
static lnat current_slop = 0;
static lnat max_slop = 0;
static W_ current_residency = 0; // in words; for stats only
static W_ max_residency = 0;
static W_ cumulative_residency = 0;
static W_ residency_samples = 0; // for stats only
static W_ current_slop = 0;
static W_ max_slop = 0;
static lnat GC_end_faults = 0;
static W_ GC_end_faults = 0;
static Time *GC_coll_cpu = NULL;
static Time *GC_coll_elapsed = NULL;
......@@ -340,8 +340,8 @@ stat_gcWorkerThreadDone (gc_thread *gct STG_UNUSED)
void
stat_endGC (Capability *cap, gc_thread *gct,
lnat alloc, lnat live, lnat copied, lnat slop, nat gen,
nat par_n_threads, lnat par_max_copied, lnat par_tot_copied)
W_ alloc, W_ live, W_ copied, W_ slop, nat gen,
nat par_n_threads, W_ par_max_copied, W_ par_tot_copied)
{
if (RtsFlags.GcFlags.giveStats != NO_GC_STATS ||
RtsFlags.ProfFlags.doHeapProfile)
......@@ -419,8 +419,8 @@ stat_endGC (Capability *cap, gc_thread *gct,
* to calculate the total
*/
{
lnat tot_alloc = 0;
lnat n;
W_ tot_alloc = 0;
W_ n;
for (n = 0; n < n_capabilities; n++) {
tot_alloc += capabilities[n].total_allocated;
traceEventHeapAllocated(&capabilities[n],
......@@ -627,7 +627,7 @@ stat_exit(int alloc)
if (tot_elapsed == 0.0) tot_elapsed = 1;
if (RtsFlags.GcFlags.giveStats >= VERBOSE_GC_STATS) {
statsPrintf("%9" FMT_SizeT " %9.9s %9.9s", (lnat)alloc*sizeof(W_), "", "");
statsPrintf("%9" FMT_SizeT " %9.9s %9.9s", (W_)alloc*sizeof(W_), "", "");
statsPrintf(" %5.2f %5.2f\n\n", 0.0, 0.0);
}
......@@ -675,7 +675,7 @@ stat_exit(int alloc)
statsPrintf("%16" FMT_SizeT " MB total memory in use (%" FMT_SizeT " MB lost due to fragmentation)\n\n",
peak_mblocks_allocated * MBLOCK_SIZE_W / (1024 * 1024 / sizeof(W_)),
(lnat)(peak_mblocks_allocated * BLOCKS_PER_MBLOCK * BLOCK_SIZE_W - hw_alloc_blocks * BLOCK_SIZE_W) / (1024 * 1024 / sizeof(W_)));
(W_)(peak_mblocks_allocated * BLOCKS_PER_MBLOCK * BLOCK_SIZE_W - hw_alloc_blocks * BLOCK_SIZE_W) / (1024 * 1024 / sizeof(W_)));
/* Print garbage collections in each gen */
statsPrintf(" Tot time (elapsed) Avg pause Max pause\n");
......@@ -856,9 +856,9 @@ void
statDescribeGens(void)
{
nat g, mut, lge, i;
lnat gen_slop;
lnat tot_live, tot_slop;
lnat gen_live, gen_blocks;
W_ gen_slop;
W_ tot_live, tot_slop;
W_ gen_live, gen_blocks;
bdescr *bd;
generation *gen;
......@@ -896,7 +896,7 @@ statDescribeGens(void)
gen_blocks += gcThreadLiveBlocks(i,g);
}
debugBelch("%5d %7" FMT_SizeT " %9d", g, (lnat)gen->max_blocks, mut);
debugBelch("%5d %7" FMT_SizeT " %9d", g, (W_)gen->max_blocks, mut);
gen_slop = gen_blocks * BLOCK_SIZE_W - gen_live;
......
......@@ -29,8 +29,8 @@ void stat_endInit(void);
void stat_startGC(Capability *cap, struct gc_thread_ *gct);
void stat_endGC (Capability *cap, struct gc_thread_ *gct,
lnat alloc, lnat live, lnat copied, lnat slop, nat gen,
nat n_gc_threads, lnat par_max_copied, lnat par_tot_copied);
W_ alloc, W_ live, W_ copied, W_ slop, nat gen,
nat n_gc_threads, W_ par_max_copied, W_ par_tot_copied);
void stat_gcWorkerThreadStart (struct gc_thread_ *gct);
void stat_gcWorkerThreadDone (struct gc_thread_ *gct);
......
......@@ -247,7 +247,7 @@ tryWakeupThread (Capability *cap, StgTSO *tso)
msg->tso = tso;
sendMessage(cap, tso->cap, (Message*)msg);
debugTraceCap(DEBUG_sched, cap, "message: try wakeup thread %ld on cap %d",
(lnat)tso->id, tso->cap->no);
(W_)tso->id, tso->cap->no);
return;
}
#endif
......@@ -272,7 +272,7 @@ tryWakeupThread (Capability *cap, StgTSO *tso)
unlockClosure(tso->block_info.closure, i);
if (i != &stg_MSG_NULL_info) {
debugTraceCap(DEBUG_sched, cap, "thread %ld still blocked on throwto (%p)",
(lnat)tso->id, tso->block_info.throwto->header.info);
(W_)tso->id, tso->block_info.throwto->header.info);
return;
}
......@@ -375,7 +375,7 @@ checkBlockingQueues (Capability *cap, StgTSO *tso)
debugTraceCap(DEBUG_sched, cap,
"collision occurred; checking blocking queues for thread %ld",
(lnat)tso->id);
(W_)tso->id);
for (bq = tso->bq; bq != (StgBlockingQueue*)END_TSO_QUEUE; bq = next) {
next = bq->link;
......@@ -494,7 +494,7 @@ threadStackOverflow (Capability *cap, StgTSO *tso)
{
StgStack *new_stack, *old_stack;
StgUnderflowFrame *frame;
lnat chunk_size;
W_ chunk_size;
IF_DEBUG(sanity,checkTSO(tso));
......
......@@ -204,37 +204,37 @@ static void traceSchedEvent_stderr (Capability *cap, EventTypeNum tag,
switch (tag) {
case EVENT_CREATE_THREAD: // (cap, thread)
debugBelch("cap %d: created thread %" FMT_SizeT "\n",
cap->no, (lnat)tso->id);
cap->no, (W_)tso->id);
break;
case EVENT_RUN_THREAD: // (cap, thread)
debugBelch("cap %d: running thread %" FMT_SizeT " (%s)\n",
cap->no, (lnat)tso->id, what_next_strs[tso->what_next]);
cap->no, (W_)tso->id, what_next_strs[tso->what_next]);
break;
case EVENT_THREAD_RUNNABLE: // (cap, thread)
debugBelch("cap %d: thread %" FMT_SizeT " appended to run queue\n",
cap->no, (lnat)tso->id);
cap->no, (W_)tso->id);
break;
case EVENT_MIGRATE_THREAD: // (cap, thread, new_cap)
debugBelch("cap %d: thread %" FMT_SizeT " migrating to cap %d\n",
cap->no, (lnat)tso->id, (int)info1);
cap->no, (W_)tso->id, (int)info1);
break;
case EVENT_THREAD_WAKEUP: // (cap, thread, info1_cap)
debugBelch("cap %d: waking up thread %" FMT_SizeT " on cap %d\n",
cap->no, (lnat)tso->id, (int)info1);
cap->no, (W_)tso->id, (int)info1);
break;
case EVENT_STOP_THREAD: // (cap, thread, status)
if (info1 == 6 + BlockedOnBlackHole) {
debugBelch("cap %d: thread %" FMT_SizeT " stopped (blocked on black hole owned by thread %lu)\n",
cap->no, (lnat)tso->id, (long)info2);
cap->no, (W_)tso->id, (long)info2);
} else {
debugBelch("cap %d: thread %" FMT_SizeT " stopped (%s)\n",
cap->no, (lnat)tso->id, thread_stop_reasons[info1]);
cap->no, (W_)tso->id, thread_stop_reasons[info1]);
}
break;
default:
debugBelch("cap %d: thread %" FMT_SizeT ": event %d\n\n",
cap->no, (lnat)tso->id, tag);
cap->no, (W_)tso->id, tag);
break;
}
......@@ -324,7 +324,7 @@ void traceGcEventAtT_ (Capability *cap, StgWord64 ts, EventTypeNum tag)
void traceHeapEvent_ (Capability *cap,
EventTypeNum tag,
CapsetID heap_capset,
lnat info1)
W_ info1)
{
#ifdef DEBUG
if (RtsFlags.TraceFlags.tracing == TRACE_STDERR) {
......@@ -338,10 +338,10 @@ void traceHeapEvent_ (Capability *cap,
void traceEventHeapInfo_ (CapsetID heap_capset,
nat gens,
lnat maxHeapSize,
lnat allocAreaSize,
lnat mblockSize,
lnat blockSize)
W_ maxHeapSize,
W_ allocAreaSize,
W_ mblockSize,
W_ blockSize)
{
#ifdef DEBUG
if (RtsFlags.TraceFlags.tracing == TRACE_STDERR) {
......@@ -358,12 +358,12 @@ void traceEventHeapInfo_ (CapsetID heap_capset,
void traceEventGcStats_ (Capability *cap,
CapsetID heap_capset,
nat gen,
lnat copied,
lnat slop,
lnat fragmentation,
W_ copied,
W_ slop,
W_ fragmentation,
nat par_n_threads,
lnat par_max_copied,
lnat par_tot_copied)
W_ par_max_copied,
W_ par_tot_copied)
{
#ifdef DEBUG
if (RtsFlags.TraceFlags.tracing == TRACE_STDERR) {
......@@ -423,18 +423,18 @@ void traceCapsetEvent (EventTypeNum tag,
tracePreface();
switch (tag) {
case EVENT_CAPSET_CREATE: // (capset, capset_type)
debugBelch("created capset %" FMT_SizeT " of type %d\n", (lnat)capset, (int)info);
debugBelch("created capset %" FMT_SizeT " of type %d\n", (W_)capset, (int)info);
break;
case EVENT_CAPSET_DELETE: // (capset)
debugBelch("deleted capset %" FMT_SizeT "\n", (lnat)capset);
debugBelch("deleted capset %" FMT_SizeT "\n", (W_)capset);
break;
case EVENT_CAPSET_ASSIGN_CAP: // (capset, capno)
debugBelch("assigned cap %" FMT_SizeT " to capset %" FMT_SizeT "\n",
(lnat)info, (lnat)capset);
(W_)info, (W_)capset);
break;
case EVENT_CAPSET_REMOVE_CAP: // (capset, capno)
debugBelch("removed cap %" FMT_SizeT " from capset %" FMT_SizeT "\n",
(lnat)info, (lnat)capset);
(W_)info, (W_)capset);
break;
}
RELEASE_LOCK(&trace_utx);
......@@ -717,7 +717,7 @@ void traceThreadLabel_(Capability *cap,
ACQUIRE_LOCK(&trace_utx);
tracePreface();
debugBelch("cap %d: thread %" FMT_SizeT " has label %s\n",
cap->no, (lnat)tso->id, label);
cap->no, (W_)tso->id, label);
RELEASE_LOCK(&trace_utx);
} else
#endif
......
......@@ -133,24 +133,24 @@ void traceGcEventAtT_ (Capability *cap, StgWord64 ts, EventTypeNum tag);
void traceHeapEvent_ (Capability *cap,
EventTypeNum tag,
CapsetID heap_capset,
lnat info1);
W_ info1);
void traceEventHeapInfo_ (CapsetID heap_capset,
nat gens,
lnat maxHeapSize,
lnat allocAreaSize,
lnat mblockSize,
lnat blockSize);
W_ maxHeapSize,
W_ allocAreaSize,
W_ mblockSize,
W_ blockSize);
void traceEventGcStats_ (Capability *cap,
CapsetID heap_capset,
nat gen,
lnat copied,
lnat slop,
lnat fragmentation,
W_ copied,
W_ slop,
W_ fragmentation,
nat par_n_threads,
lnat par_max_copied,
lnat par_tot_copied);
W_ par_max_copied,
W_ par_tot_copied);
/*
* Record a spark event
......@@ -642,12 +642,12 @@ INLINE_HEADER void traceEventGcGlobalSync(Capability *cap STG_UNUSED)
INLINE_HEADER void traceEventGcStats(Capability *cap STG_UNUSED,
CapsetID heap_capset STG_UNUSED,
nat gen STG_UNUSED,
lnat copied STG_UNUSED,
lnat slop STG_UNUSED,
lnat fragmentation STG_UNUSED,
W_ copied STG_UNUSED,
W_ slop STG_UNUSED,
W_ fragmentation STG_UNUSED,
nat par_n_threads STG_UNUSED,
lnat par_max_copied STG_UNUSED,
lnat par_tot_copied STG_UNUSED)
W_ par_max_copied STG_UNUSED,
W_ par_tot_copied STG_UNUSED)
{
if (RTS_UNLIKELY(TRACE_gc)) {
traceEventGcStats_(cap, heap_capset, gen,
......@@ -661,10 +661,10 @@ INLINE_HEADER void traceEventGcStats(Capability *cap STG_UNUSED,
INLINE_HEADER void traceEventHeapInfo(CapsetID heap_capset STG_UNUSED,
nat gens STG_UNUSED,
lnat maxHeapSize STG_UNUSED,
lnat allocAreaSize STG_UNUSED,
lnat mblockSize STG_UNUSED,
lnat blockSize STG_UNUSED)
W_ maxHeapSize STG_UNUSED,
W_ allocAreaSize STG_UNUSED,
W_ mblockSize STG_UNUSED,
W_ blockSize STG_UNUSED)
{
if (RTS_UNLIKELY(TRACE_gc)) {
traceEventHeapInfo_(heap_capset, gens,
......@@ -678,7 +678,7 @@ INLINE_HEADER void traceEventHeapInfo(CapsetID heap_capset STG_UNUSED,
INLINE_HEADER void traceEventHeapAllocated(Capability *cap STG_UNUSED,
CapsetID heap_capset STG_UNUSED,
lnat allocated STG_UNUSED)
W_ allocated STG_UNUSED)
{
traceHeapEvent(cap, EVENT_HEAP_ALLOCATED, heap_capset, allocated);
dtraceEventHeapAllocated((EventCapNo)cap->no, heap_capset, allocated);
......@@ -686,7 +686,7 @@ INLINE_HEADER void traceEventHeapAllocated(Capability *cap STG_UNUSED,
INLINE_HEADER void traceEventHeapSize(Capability *cap STG_UNUSED,
CapsetID heap_capset STG_UNUSED,
lnat heap_size STG_UNUSED)
W_ heap_size STG_UNUSED)
{
traceHeapEvent(cap, EVENT_HEAP_SIZE,