Commit bf2d58c2 authored by Simon Marlow's avatar Simon Marlow

Lots of nat -> StgWord changes

parent 0550bcbf
......@@ -22,17 +22,17 @@
//
// Creating threads
//
StgTSO *createThread (Capability *cap, nat stack_size);
StgTSO *createThread (Capability *cap, W_ stack_size);
void scheduleWaitThread (/* in */ StgTSO *tso,
/* out */ HaskellObj* ret,
/* inout */ Capability **cap);
StgTSO *createGenThread (Capability *cap, nat stack_size,
StgTSO *createGenThread (Capability *cap, W_ stack_size,
StgClosure *closure);
StgTSO *createIOThread (Capability *cap, nat stack_size,
StgTSO *createIOThread (Capability *cap, W_ stack_size,
StgClosure *closure);
StgTSO *createStrictIOThread (Capability *cap, nat stack_size,
StgTSO *createStrictIOThread (Capability *cap, W_ stack_size,
StgClosure *closure);
// Suspending/resuming threads around foreign calls
......
......@@ -244,11 +244,11 @@ extern void initBlockAllocator(void);
/* Allocation -------------------------------------------------------------- */
bdescr *allocGroup(nat n);
bdescr *allocGroup(W_ n);
bdescr *allocBlock(void);
// versions that take the storage manager lock for you:
bdescr *allocGroup_lock(nat n);
bdescr *allocGroup_lock(W_ n);
bdescr *allocBlock_lock(void);
/* De-Allocation ----------------------------------------------------------- */
......
......@@ -124,13 +124,13 @@ extern generation * oldest_gen;
/* -----------------------------------------------------------------------------
Generic allocation
StgPtr allocate(Capability *cap, nat n)
StgPtr allocate(Capability *cap, W_ n)
Allocates memory from the nursery in
the current Capability. This can be
done without taking a global lock,
unlike allocate().
StgPtr allocatePinned(Capability *cap, nat n)
StgPtr allocatePinned(Capability *cap, W_ n)
Allocates a chunk of contiguous store
n words long, which is at a fixed
address (won't be moved by GC).
......@@ -153,11 +153,11 @@ StgPtr allocate ( Capability *cap, W_ n );
StgPtr allocatePinned ( Capability *cap, W_ n );
/* memory allocator for executable memory */
void * allocateExec(unsigned int len, void **exec_addr);
void * allocateExec(W_ len, void **exec_addr);
void freeExec (void *p);
// Used by GC checks in external .cmm code:
extern nat large_alloc_lim;
extern W_ large_alloc_lim;
/* -----------------------------------------------------------------------------
Performing Garbage Collection
......
......@@ -380,7 +380,7 @@ INLINE_HEADER void pushClosure (StgTSO *tso, StgWord c) {
}
StgTSO *
createGenThread (Capability *cap, nat stack_size, StgClosure *closure)
createGenThread (Capability *cap, W_ stack_size, StgClosure *closure)
{
StgTSO *t;
t = createThread (cap, stack_size);
......@@ -390,7 +390,7 @@ createGenThread (Capability *cap, nat stack_size, StgClosure *closure)
}
StgTSO *
createIOThread (Capability *cap, nat stack_size, StgClosure *closure)
createIOThread (Capability *cap, W_ stack_size, StgClosure *closure)
{
StgTSO *t;
t = createThread (cap, stack_size);
......@@ -406,7 +406,7 @@ createIOThread (Capability *cap, nat stack_size, StgClosure *closure)
*/
StgTSO *
createStrictIOThread(Capability *cap, nat stack_size, StgClosure *closure)
createStrictIOThread(Capability *cap, W_ stack_size, StgClosure *closure)
{
StgTSO *t;
t = createThread(cap, stack_size);
......
......@@ -57,7 +57,7 @@ static StgThreadID next_thread_id = 1;
currently pri (priority) is only used in a GRAN setup -- HWL
------------------------------------------------------------------------ */
StgTSO *
createThread(Capability *cap, nat size)
createThread(Capability *cap, W_ size)
{
StgTSO *tso;
StgStack *stack;
......@@ -586,7 +586,7 @@ threadStackOverflow (Capability *cap, StgTSO *tso)
{
StgWord *sp;
nat chunk_words, size;
W_ chunk_words, size;
// find the boundary of the chunk of old stack we're going to
// copy to the new stack. We skip over stack frames until we
......@@ -659,7 +659,7 @@ threadStackOverflow (Capability *cap, StgTSO *tso)
Stack underflow - called from the stg_stack_underflow_info frame
------------------------------------------------------------------------ */
nat // returns offset to the return address
W_ // returns offset to the return address
threadStackUnderflow (Capability *cap, StgTSO *tso)
{
StgStack *new_stack, *old_stack;
......@@ -681,7 +681,7 @@ threadStackUnderflow (Capability *cap, StgTSO *tso)
if (retvals != 0)
{
// we have some return values to copy to the old stack
if ((nat)(new_stack->sp - new_stack->stack) < retvals)
if ((W_)(new_stack->sp - new_stack->stack) < retvals)
{
barf("threadStackUnderflow: not enough space for return values");
}
......
......@@ -40,7 +40,7 @@ StgBool isThreadBound (StgTSO* tso);
// Overfow/underflow
void threadStackOverflow (Capability *cap, StgTSO *tso);
nat threadStackUnderflow (Capability *cap, StgTSO *tso);
W_ threadStackUnderflow (Capability *cap, StgTSO *tso);
#ifdef DEBUG
void printThreadBlockage (StgTSO *tso);
......
......@@ -168,7 +168,7 @@ STATIC_INLINE void
initGroup(bdescr *head)
{
bdescr *bd;
nat i, n;
W_ i, n;
n = head->blocks;
head->free = head->start;
......@@ -184,9 +184,9 @@ initGroup(bdescr *head)
// usually small, and MAX_FREE_LIST is also small, so the loop version
// might well be the best choice here.
STATIC_INLINE nat
log_2_ceil(nat n)
log_2_ceil(W_ n)
{
nat i, x;
W_ i, x;
x = 1;
for (i=0; i < MAX_FREE_LIST; i++) {
if (x >= n) return i;
......@@ -196,9 +196,9 @@ log_2_ceil(nat n)
}
STATIC_INLINE nat
log_2(nat n)
log_2(W_ n)
{
nat i, x;
W_ i, x;
x = n;
for (i=0; i < MAX_FREE_LIST; i++) {
x = x >> 1;
......@@ -244,7 +244,7 @@ setup_tail (bdescr *bd)
// Take a free block group bd, and split off a group of size n from
// it. Adjust the free list as necessary, and return the new group.
static bdescr *
split_free_block (bdescr *bd, nat n, nat ln)
split_free_block (bdescr *bd, W_ n, nat ln)
{
bdescr *fg; // free group
......@@ -311,7 +311,7 @@ alloc_mega_group (nat mblocks)
}
bdescr *
allocGroup (nat n)
allocGroup (W_ n)
{
bdescr *bd, *rem;
nat ln;
......@@ -400,7 +400,7 @@ finish:
// single compile.
//
bdescr *
allocLargeChunk (nat min, nat max)
allocLargeChunk (W_ min, W_ max)
{
bdescr *bd;
nat ln, lnmax;
......@@ -441,7 +441,7 @@ allocLargeChunk (nat min, nat max)
}
bdescr *
allocGroup_lock(nat n)
allocGroup_lock(W_ n)
{
bdescr *bd;
ACQUIRE_SM_LOCK;
......@@ -653,10 +653,10 @@ initMBlock(void *mblock)
Stats / metrics
-------------------------------------------------------------------------- */
nat
W_
countBlocks(bdescr *bd)
{
nat n;
W_ n;
for (n=0; bd != NULL; bd=bd->link) {
n += bd->blocks;
}
......@@ -668,10 +668,10 @@ countBlocks(bdescr *bd)
// that would be taken up by block descriptors in the second and
// subsequent megablock. This is so we can tally the count with the
// number of blocks allocated in the system, for memInventory().
nat
W_
countAllocdBlocks(bdescr *bd)
{
nat n;
W_ n;
for (n=0; bd != NULL; bd=bd->link) {
n += bd->blocks;
// hack for megablock groups: see (*1) above
......@@ -806,7 +806,7 @@ checkFreeListSanity(void)
}
}
nat /* BLOCKS */
W_ /* BLOCKS */
countFreeList(void)
{
bdescr *bd;
......
......@@ -11,17 +11,17 @@
#include "BeginPrivate.h"
bdescr *allocLargeChunk (nat min, nat max);
bdescr *allocLargeChunk (W_ min, W_ max);
/* Debugging -------------------------------------------------------------- */
extern nat countBlocks (bdescr *bd);
extern nat countAllocdBlocks (bdescr *bd);
extern W_ countBlocks (bdescr *bd);
extern W_ countAllocdBlocks (bdescr *bd);
extern void returnMemoryToOS(nat n);
#ifdef DEBUG
void checkFreeListSanity(void);
nat countFreeList(void);
W_ countFreeList(void);
void markBlocks (bdescr *bd);
void reportUnmarkedBlocks (void);
#endif
......
......@@ -183,7 +183,7 @@ loop:
// A word-aligned memmove will be faster for small objects than libc's or gcc's.
// Remember, the two regions *might* overlap, but: to <= from.
STATIC_INLINE void
move(StgPtr to, StgPtr from, nat size)
move(StgPtr to, StgPtr from, W_ size)
{
for(; size > 0; --size) {
*to++ = *from++;
......@@ -225,9 +225,9 @@ thread_static( StgClosure* p )
}
STATIC_INLINE void
thread_large_bitmap( StgPtr p, StgLargeBitmap *large_bitmap, nat size )
thread_large_bitmap( StgPtr p, StgLargeBitmap *large_bitmap, W_ size )
{
nat i, b;
W_ i, b;
StgWord bitmap;
b = 0;
......@@ -252,7 +252,7 @@ thread_arg_block (StgFunInfoTable *fun_info, StgClosure **args)
{
StgPtr p;
StgWord bitmap;
nat size;
W_ size;
p = (StgPtr)args;
switch (fun_info->f.fun_type) {
......@@ -287,7 +287,7 @@ thread_stack(StgPtr p, StgPtr stack_end)
{
const StgRetInfoTable* info;
StgWord bitmap;
nat size;
W_ size;
// highly similar to scavenge_stack, but we do pointer threading here.
......@@ -846,7 +846,7 @@ update_fwd_compact( bdescr *blocks )
}
}
static nat
static W_
update_bkwd_compact( generation *gen )
{
StgPtr p, free;
......@@ -855,7 +855,7 @@ update_bkwd_compact( generation *gen )
#endif
bdescr *bd, *free_bd;
StgInfoTable *info;
nat size, free_blocks;
W_ size, free_blocks;
StgWord iptr;
bd = free_bd = gen->old_blocks;
......@@ -937,7 +937,7 @@ update_bkwd_compact( generation *gen )
void
compact(StgClosure *static_objects)
{
nat n, g, blocks;
W_ n, g, blocks;
generation *gen;
// 1. thread the roots
......
......@@ -488,7 +488,7 @@ GarbageCollect (nat collect_gen,
// Count the mutable list as bytes "copied" for the purposes of
// stats. Every mutable list is copied during every GC.
if (g > 0) {
nat mut_list_size = 0;
W_ mut_list_size = 0;
for (n = 0; n < n_capabilities; n++) {
mut_list_size += countOccupied(capabilities[n].mut_lists[g]);
}
......@@ -710,7 +710,7 @@ GarbageCollect (nat collect_gen,
ACQUIRE_SM_LOCK;
if (major_gc) {
nat need, got;
W_ need, got;
need = BLOCKS_TO_MBLOCKS(n_alloc_blocks);
got = mblocks_allocated;
/* If the amount of data remains constant, next major GC we'll
......@@ -1511,8 +1511,8 @@ resize_generations (void)
if (major_gc && RtsFlags.GcFlags.generations > 1) {
W_ live, size, min_alloc, words;
const nat max = RtsFlags.GcFlags.maxHeapSize;
const nat gens = RtsFlags.GcFlags.generations;
const W_ max = RtsFlags.GcFlags.maxHeapSize;
const W_ gens = RtsFlags.GcFlags.generations;
// live in the oldest generations
if (oldest_gen->live_estimate != 0) {
......@@ -1608,7 +1608,7 @@ resize_nursery (void)
if (RtsFlags.GcFlags.generations == 1)
{ // Two-space collector:
nat blocks;
W_ blocks;
/* set up a new nursery. Allocate a nursery size based on a
* function of the amount of live data (by default a factor of 2)
......@@ -1703,7 +1703,7 @@ resize_nursery (void)
blocks = min_nursery;
}
resizeNurseries((nat)blocks);
resizeNurseries((W_)blocks);
}
else
{
......
......@@ -41,8 +41,8 @@ StgClosure *caf_list = NULL;
StgClosure *revertible_caf_list = NULL;
rtsBool keepCAFs;
nat large_alloc_lim; /* GC if n_large_blocks in any nursery
* reaches this. */
W_ large_alloc_lim; /* GC if n_large_blocks in any nursery
* reaches this. */
bdescr *exec_block;
......@@ -425,10 +425,10 @@ newDynCAF (StgRegTable *reg STG_UNUSED, StgClosure *caf, StgClosure *bh)
-------------------------------------------------------------------------- */
static bdescr *
allocNursery (bdescr *tail, nat blocks)
allocNursery (bdescr *tail, W_ blocks)
{
bdescr *bd = NULL;
nat i, n;
W_ i, n;
// We allocate the nursery as a single contiguous block and then
// divide it into single blocks manually. This way we guarantee
......@@ -541,10 +541,10 @@ countNurseryBlocks (void)
}
static void
resizeNursery (nursery *nursery, nat blocks)
resizeNursery (nursery *nursery, W_ blocks)
{
bdescr *bd;
nat nursery_blocks;
W_ nursery_blocks;
nursery_blocks = nursery->n_blocks;
if (nursery_blocks == blocks) return;
......@@ -584,7 +584,7 @@ resizeNursery (nursery *nursery, nat blocks)
// Resize each of the nurseries to the specified size.
//
void
resizeNurseriesFixed (nat blocks)
resizeNurseriesFixed (W_ blocks)
{
nat i;
for (i = 0; i < n_capabilities; i++) {
......@@ -596,7 +596,7 @@ resizeNurseriesFixed (nat blocks)
// Resize the nurseries to the total specified size.
//
void
resizeNurseries (nat blocks)
resizeNurseries (W_ blocks)
{
// If there are multiple nurseries, then we just divide the number
// of available blocks between them.
......@@ -1096,7 +1096,7 @@ calcNeeded (rtsBool force_major, memcount *blocks_needed)
// because it knows how to work around the restrictions put in place
// by SELinux.
void *allocateExec (nat bytes, void **exec_ret)
void *allocateExec (W_ bytes, void **exec_ret)
{
void **ret, **exec;
ACQUIRE_SM_LOCK;
......@@ -1120,10 +1120,10 @@ void freeExec (void *addr)
#else
void *allocateExec (nat bytes, void **exec_ret)
void *allocateExec (W_ bytes, void **exec_ret)
{
void *ret;
nat n;
W_ n;
ACQUIRE_SM_LOCK;
......
......@@ -37,7 +37,7 @@ doYouWantToGC( Capability *cap )
}
/* for splitting blocks groups in two */
bdescr * splitLargeBlock (bdescr *bd, nat blocks);
bdescr * splitLargeBlock (bdescr *bd, W_ blocks);
/* -----------------------------------------------------------------------------
Generational garbage collection support
......@@ -81,10 +81,10 @@ void dirty_MVAR(StgRegTable *reg, StgClosure *p);
extern nursery *nurseries;
void resetNurseries ( void );
W_ clearNursery ( Capability *cap );
void resizeNurseries ( nat blocks );
void resizeNurseriesFixed ( nat blocks );
W_ countNurseryBlocks ( void );
W_ clearNursery ( Capability *cap );
void resizeNurseries ( W_ blocks );
void resizeNurseriesFixed ( W_ blocks );
W_ countNurseryBlocks ( void );
/* -----------------------------------------------------------------------------
Stats 'n' DEBUG stuff
......
......@@ -23,7 +23,7 @@ sweep(generation *gen)
{
bdescr *bd, *prev, *next;
nat i;
nat freed, resid, fragd, blocks, live;
W_ freed, resid, fragd, blocks, live;
ASSERT(countBlocks(gen->old_blocks) == gen->n_old_blocks);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment