Commit e7c3f957 authored by simonmar's avatar simonmar

[project @ 2005-02-10 13:01:52 by simonmar]

GC changes: instead of threading old-generation mutable lists
through objects in the heap, keep it in a separate flat array.

This has some advantages:

  - the IND_OLDGEN object is now only 2 words, so the minimum
    size of a THUNK is now 2 words instead of 3.  This saves
    some amount of allocation (about 2% on average according to
    my measurements), and is more friendly to the cache by
    squashing objects together more.

  - keeping the mutable list separate from the IND object
    will be necessary for our multiprocessor implementation.

  - removing the mut_link field makes the layout of some objects
    more uniform, leading to less complexity and special cases.

  - I also unified the two mutable lists (mut_once_list and mut_list)
    into a single mutable list, which lead to more simplifications
    in the GC.
parent 0c4c6606
......@@ -345,7 +345,7 @@ mkUpdInfoLabel = RtsLabel (RtsInfo SLIT("stg_upd_frame"))
mkSeqInfoLabel = RtsLabel (RtsInfo SLIT("stg_seq_frame"))
mkIndStaticInfoLabel = RtsLabel (RtsInfo SLIT("stg_IND_STATIC"))
mkMainCapabilityLabel = RtsLabel (RtsData SLIT("MainCapability"))
mkMAP_FROZEN_infoLabel = RtsLabel (RtsInfo SLIT("stg_MUT_ARR_PTRS_FROZEN"))
mkMAP_FROZEN_infoLabel = RtsLabel (RtsInfo SLIT("stg_MUT_ARR_PTRS_FROZEN0"))
mkEMPTY_MVAR_infoLabel = RtsLabel (RtsInfo SLIT("stg_EMPTY_MVAR"))
mkTopTickyCtrLabel = RtsLabel (RtsData SLIT("top_ct"))
......
......@@ -186,7 +186,7 @@ static :: { ExtFCode [CmmStatic] }
{ do lits <- sequence $4;
return $ map CmmStaticLit $
mkStaticClosure (mkRtsInfoLabelFS $3)
dontCareCCS (map getLit lits) [] [] }
dontCareCCS (map getLit lits) [] [] [] }
-- arrays of closures required for the CHARLIKE & INTLIKE arrays
lits :: { [ExtFCode CmmExpr] }
......@@ -712,7 +712,7 @@ funInfo name ptrs nptrs cl_type desc_str ty_str fun_type = do
staticClosure :: FastString -> FastString -> [CmmLit] -> ExtCode
staticClosure cl_label info payload
= code $ emitDataLits (mkRtsDataLabelFS cl_label) lits
where lits = mkStaticClosure (mkRtsInfoLabelFS info) dontCareCCS payload [] []
where lits = mkStaticClosure (mkRtsInfoLabelFS info) dontCareCCS payload [] [] []
foreignCall
:: String
......
%
% (c) The GRASP/AQUA Project, Glasgow University, 1992-1998
%
% $Id: CgHeapery.lhs,v 1.42 2004/11/26 16:20:09 simonmar Exp $
% $Id: CgHeapery.lhs,v 1.43 2005/02/10 13:01:53 simonmar Exp $
%
\section[CgHeapery]{Heap management functions}
......@@ -34,9 +34,8 @@ import CgTicky ( staticTickyHdr, tickyDynAlloc, tickyAllocHeap )
import CgParallel ( staticGranHdr, staticParHdr, doGranAllocate )
import CgStackery ( getFinalStackHW, getRealSp )
import CgCallConv ( mkRegLiveness )
import ClosureInfo ( closureSize, closureUpdReqd,
staticClosureNeedsLink,
mkConInfo,
import ClosureInfo ( closureSize, staticClosureNeedsLink,
mkConInfo, closureNeedsUpdSpace,
infoTableLabelFromCI, closureLabelFromCI,
nodeMustPointToIt, closureLFInfo,
ClosureInfo )
......@@ -189,26 +188,37 @@ mkStaticClosureFields
-> [CmmLit] -- Payload
-> [CmmLit] -- The full closure
mkStaticClosureFields cl_info ccs caf_refs payload
= mkStaticClosure info_lbl ccs payload padding_wds static_link_field
= mkStaticClosure info_lbl ccs payload padding_wds
static_link_field saved_info_field
where
info_lbl = infoTableLabelFromCI cl_info
upd_reqd = closureUpdReqd cl_info
-- CAFs must have consistent layout, regardless of whether they
-- are actually updatable or not. The layout of a CAF is:
--
-- 3 saved_info
-- 2 static_link
-- 1 indirectee
-- 0 info ptr
--
-- the static_link and saved_info fields must always be in the same
-- place. So we use closureNeedsUpdSpace rather than
-- closureUpdReqd here:
is_caf = closureNeedsUpdSpace cl_info
-- for the purposes of laying out the static closure, we consider all
-- thunks to be "updatable", so that the static link field is always
-- in the same place.
padding_wds
| not upd_reqd = []
| otherwise = replicate n (mkIntCLit 0) -- a bunch of 0s
| not is_caf = []
| otherwise = replicate n (mkIntCLit 0) -- a bunch of 0s
where n = max 0 (mIN_UPD_SIZE - length payload)
-- We always have a static link field for a thunk, it's used to
-- save the closure's info pointer when we're reverting CAFs
-- (see comment in Storage.c)
static_link_field
| upd_reqd || staticClosureNeedsLink cl_info = [static_link_value]
| otherwise = []
| is_caf || staticClosureNeedsLink cl_info = [static_link_value]
| otherwise = []
saved_info_field
| is_caf = [mkIntCLit 0]
| otherwise = []
-- for a static constructor which has NoCafRefs, we set the
-- static link field to a non-zero value so the garbage
......@@ -218,13 +228,14 @@ mkStaticClosureFields cl_info ccs caf_refs payload
| otherwise = mkIntCLit 1
mkStaticClosure :: CLabel -> CostCentreStack -> [CmmLit]
-> [CmmLit] -> [CmmLit] -> [CmmLit]
mkStaticClosure info_lbl ccs payload padding_wds static_link_field
-> [CmmLit] -> [CmmLit] -> [CmmLit] -> [CmmLit]
mkStaticClosure info_lbl ccs payload padding_wds static_link_field saved_info_field
= [CmmLabel info_lbl]
++ variable_header_words
++ payload
++ padding_wds
++ static_link_field
++ saved_info_field
where
variable_header_words
= staticGranHdr
......
......@@ -175,7 +175,7 @@ emitPrimOp [res] DataToTagOp [arg] live
-- #define unsafeFreezzeArrayzh(r,a)
-- {
-- SET_INFO((StgClosure *)a,&stg_MUT_ARR_PTRS_FROZEN_info);
-- SET_INFO((StgClosure *)a,&stg_MUT_ARR_PTRS_FROZEN0_info);
-- r = a;
-- }
emitPrimOp [res] UnsafeFreezeArrayOp [arg] live
......
......@@ -29,7 +29,8 @@ module ClosureInfo (
closureName, infoTableLabelFromCI,
closureLabelFromCI, closureSRT,
closureLFInfo, closureSMRep, closureUpdReqd,
closureLFInfo, closureSMRep, closureUpdReqd,
closureNeedsUpdSpace,
closureSingleEntry, closureReEntrant, isConstrClosure_maybe,
closureFunInfo, isStandardFormThunk, isKnownFun,
......
/* -----------------------------------------------------------------------------
* $Id: Block.h,v 1.17 2004/08/13 13:09:09 simonmar Exp $
* $Id: Block.h,v 1.18 2005/02/10 13:02:00 simonmar Exp $
*
* (c) The GHC Team, 1998-1999
*
......@@ -142,9 +142,11 @@ INLINE_HEADER bdescr *Bdescr(StgPtr p)
(1 + (W_)MBLOCK_ROUND_UP((n-BLOCKS_PER_MBLOCK) * BLOCK_SIZE) / MBLOCK_SIZE)
#ifndef CMINUSMINUS
// to the end...
/* Double-linked block lists: --------------------------------------------- */
#ifndef CMINUSMINUS
INLINE_HEADER void
dbl_link_onto(bdescr *bd, bdescr **list)
{
......@@ -155,6 +157,37 @@ dbl_link_onto(bdescr *bd, bdescr **list)
}
*list = bd;
}
#endif
/* Initialisation ---------------------------------------------------------- */
extern void initBlockAllocator(void);
/* Allocation -------------------------------------------------------------- */
extern bdescr *allocGroup(nat n);
extern bdescr *allocBlock(void);
/* De-Allocation ----------------------------------------------------------- */
extern void freeGroup(bdescr *p);
extern void freeChain(bdescr *p);
/* Round a value to megablocks --------------------------------------------- */
#define WORDS_PER_MBLOCK (BLOCKS_PER_MBLOCK * BLOCK_SIZE_W)
INLINE_HEADER nat
round_to_mblocks(nat words)
{
if (words > WORDS_PER_MBLOCK) {
if ((words % WORDS_PER_MBLOCK) < (WORDS_PER_MBLOCK / 2)) {
words = (words / WORDS_PER_MBLOCK) * WORDS_PER_MBLOCK;
} else {
words = ((words / WORDS_PER_MBLOCK) + 1) * WORDS_PER_MBLOCK;
}
}
return words;
}
#endif /* !CMINUSMINUS */
#endif /* BLOCK_H */
......@@ -174,16 +174,9 @@
/* These macros are optimised versions of the above for certain
* closure types. They *must* be equivalent to the generic
* STATIC_LINK.
*
* You may be surprised that the STATIC_LINK field for a THUNK_STATIC
* is at offset 2; that's because a THUNK_STATIC always has two words
* of (non-ptr) padding, to make room for the IND_STATIC that is
* going to overwrite it. It doesn't do any harm, because a
* THUNK_STATIC needs this extra word for the IND_STATIC's saved_info
* field anyhow. Hmm, this is all rather delicate. --SDM
*/
#define FUN_STATIC_LINK(p) ((p)->payload[0])
#define THUNK_STATIC_LINK(p) ((p)->payload[2])
#define THUNK_STATIC_LINK(p) ((p)->payload[1])
#define IND_STATIC_LINK(p) ((p)->payload[1])
#define STATIC_LINK2(info,p) \
......
/* ----------------------------------------------------------------------------
* $Id: ClosureTypes.h,v 1.19 2004/11/18 09:56:17 tharris Exp $
* $Id: ClosureTypes.h,v 1.20 2005/02/10 13:02:02 simonmar Exp $
*
* (c) The GHC Team, 1998-1999
*
......@@ -66,9 +66,9 @@
#define MVAR 50
#define ARR_WORDS 51
#define MUT_ARR_PTRS 52
#define MUT_ARR_PTRS_FROZEN 53
#define MUT_VAR 54
#define MUT_CONS 55
#define MUT_ARR_PTRS_FROZEN0 53
#define MUT_ARR_PTRS_FROZEN 54
#define MUT_VAR 55
#define WEAK 56
#define FOREIGN 57
#define STABLE_NAME 58
......@@ -87,5 +87,5 @@
#define CATCH_RETRY_FRAME 71
#define CATCH_STM_FRAME 72
#define N_CLOSURE_TYPES 73
#endif /* CLOSURETYPES_H */
......@@ -66,19 +66,6 @@ struct StgClosure_ {
struct StgClosure_ *payload[FLEXIBLE_ARRAY];
};
/* What a stroke of luck - all our mutable closures follow the same
* basic layout, with the mutable link field as the second field after
* the header. This means the following structure is the supertype of
* mutable closures.
*/
typedef struct StgMutClosure_ {
StgHeader header;
StgWord padding;
struct StgMutClosure_ *mut_link;
struct StgClosure_ *payload[FLEXIBLE_ARRAY];
} StgMutClosure;
typedef struct {
StgHeader header;
StgClosure *selectee;
......@@ -107,12 +94,6 @@ typedef struct {
StgClosure *indirectee;
} StgInd;
typedef struct {
StgHeader header;
StgClosure *indirectee;
StgMutClosure *mut_link;
} StgIndOldGen;
typedef struct {
StgHeader header;
StgClosure *indirectee;
......@@ -129,14 +110,12 @@ typedef struct {
typedef struct {
StgHeader header;
StgWord ptrs;
StgMutClosure *mut_link; /* mutable list */
StgClosure *payload[FLEXIBLE_ARRAY];
} StgMutArrPtrs;
typedef struct {
StgHeader header;
StgClosure *var;
StgMutClosure *mut_link;
} StgMutVar;
typedef struct _StgUpdateFrame {
......@@ -303,7 +282,6 @@ typedef struct {
typedef struct {
StgHeader header;
struct StgTSO_ *head;
StgMutClosure *mut_link;
struct StgTSO_ *tail;
StgClosure* value;
} StgMVar;
......@@ -329,7 +307,6 @@ typedef struct {
typedef struct StgTVarWaitQueue_ {
StgHeader header;
struct StgTSO_ *waiting_tso;
StgMutClosure *mut_link;
struct StgTVarWaitQueue_ *next_queue_entry;
struct StgTVarWaitQueue_ *prev_queue_entry;
} StgTVarWaitQueue;
......@@ -337,7 +314,6 @@ typedef struct StgTVarWaitQueue_ {
typedef struct {
StgHeader header;
StgClosure *current_value;
StgMutClosure *mut_link;
StgTVarWaitQueue *first_wait_queue_entry;
} StgTVar;
......@@ -354,7 +330,6 @@ typedef struct {
typedef struct StgTRecChunk_ {
StgHeader header;
struct StgTRecChunk_ *prev_chunk;
StgMutClosure *mut_link;
StgWord next_entry_idx;
TRecEntry entries[TREC_CHUNK_NUM_ENTRIES];
} StgTRecChunk;
......@@ -371,7 +346,6 @@ typedef enum {
typedef struct StgTRecHeader_ {
StgHeader header;
TRecState state;
StgMutClosure *mut_link;
struct StgTRecHeader_ *enclosing_trec;
StgTRecChunk *current_chunk;
} StgTRecHeader;
......@@ -401,8 +375,7 @@ typedef struct {
of closures that can be found on a blocking queue: StgTSO, StgRBHSave,
StgBlockedFetch. (StgRBHSave can only appear at the end of a blocking
queue). Logically, this is a union type, but defining another struct
with a common layout is easier to handle in the code (same as for
StgMutClosures).
with a common layout is easier to handle in the code.
Note that in the standard setup only StgTSOs can be on a blocking queue.
This is one of the main reasons for slightly different code in files
such as Schedule.c.
......@@ -410,7 +383,6 @@ typedef struct {
typedef struct StgBlockingQueueElement_ {
StgHeader header;
struct StgBlockingQueueElement_ *link; /* next elem in BQ */
StgMutClosure *mut_link; /* next elem in mutable list */
struct StgClosure_ *payload[FLEXIBLE_ARRAY];/* contents of the closure */
} StgBlockingQueueElement;
......@@ -418,7 +390,6 @@ typedef struct StgBlockingQueueElement_ {
typedef struct StgBlockingQueue_ {
StgHeader header;
struct StgBlockingQueueElement_ *blocking_queue; /* start of the BQ */
StgMutClosure *mut_link; /* next elem in mutable list */
} StgBlockingQueue;
/* this closure is hanging at the end of a blocking queue in (see RBH.c) */
......@@ -430,7 +401,6 @@ typedef struct StgRBHSave_ {
typedef struct StgRBH_ {
StgHeader header;
struct StgBlockingQueueElement_ *blocking_queue; /* start of the BQ */
StgMutClosure *mut_link; /* next elem in mutable list */
} StgRBH;
#else
......@@ -438,7 +408,6 @@ typedef struct StgRBH_ {
typedef struct StgBlockingQueue_ {
StgHeader header;
struct StgTSO_ *blocking_queue;
StgMutClosure *mut_link;
} StgBlockingQueue;
#endif
......@@ -448,14 +417,12 @@ typedef struct StgBlockingQueue_ {
typedef struct StgFetchMe_ {
StgHeader header;
globalAddr *ga; /* ptr to unique id for a closure */
StgMutClosure *mut_link; /* next elem in mutable list */
} StgFetchMe;
/* same contents as an ordinary StgBlockingQueue */
typedef struct StgFetchMeBlockingQueue_ {
StgHeader header;
struct StgBlockingQueueElement_ *blocking_queue; /* start of the BQ */
StgMutClosure *mut_link; /* next elem in mutable list */
} StgFetchMeBlockingQueue;
/* This is an entry in a blocking queue. It indicates a fetch request from a
......@@ -467,7 +434,6 @@ typedef struct StgFetchMeBlockingQueue_ {
typedef struct StgBlockedFetch_ {
StgHeader header;
struct StgBlockingQueueElement_ *link; /* next elem in the BQ */
StgMutClosure *mut_link; /* next elem in mutable list */
StgClosure *node; /* node to fetch */
globalAddr ga; /* where to send the result to */
} StgBlockedFetch; /* NB: not just a ptr to a GA */
......
/* ----------------------------------------------------------------------------
* $Id: Constants.h,v 1.27 2004/11/18 09:56:19 tharris Exp $
* $Id: Constants.h,v 1.28 2005/02/10 13:02:03 simonmar Exp $
*
* (c) The GHC Team, 1998-2002
*
......@@ -21,12 +21,12 @@
/* -----------------------------------------------------------------------------
Minimum closure sizes
Here we define the minimum size for updatable closures. This must be at
least 2, to allow for cons cells and linked indirections. All updates
Here we define the minimum size for updatable closures. All updates
will be performed on closures of this size. For non-updatable closures
the minimum size is 1 to allow for a forwarding pointer.
Linked indirections are UPD_OLDGEN things: see Closures.h
When we used to keep the mutable list threaded through closures on
the heap, MIN_UPD_SIZE used to be 2. Now it's 1.
o MIN_UPD_SIZE doesn't apply to stack closures, static closures
or non-updateable objects like PAPs or CONSTRs
......@@ -42,7 +42,7 @@
o EVACUATED
-------------------------------------------------------------------------- */
#define MIN_UPD_SIZE 2
#define MIN_UPD_SIZE 1
#define MIN_NONUPD_SIZE 1
/* -----------------------------------------------------------------------------
......
......@@ -122,10 +122,10 @@ RTS_INFO(stg_ARR_WORDS_info);
RTS_INFO(stg_MUT_ARR_WORDS_info);
RTS_INFO(stg_MUT_ARR_PTRS_info);
RTS_INFO(stg_MUT_ARR_PTRS_FROZEN_info);
RTS_INFO(stg_MUT_ARR_PTRS_FROZEN0_info);
RTS_INFO(stg_MUT_VAR_info);
RTS_INFO(stg_END_TSO_QUEUE_info);
RTS_INFO(stg_MUT_CONS_info);
RTS_INFO(stg_END_MUT_LIST_info);
RTS_INFO(stg_catch_info);
RTS_INFO(stg_PAP_info);
RTS_INFO(stg_AP_info);
......@@ -185,7 +185,6 @@ RTS_ENTRY(stg_MUT_ARR_PTRS_FROZEN_entry);
RTS_ENTRY(stg_MUT_VAR_entry);
RTS_ENTRY(stg_END_TSO_QUEUE_entry);
RTS_ENTRY(stg_MUT_CONS_entry);
RTS_ENTRY(stg_END_MUT_LIST_entry);
RTS_ENTRY(stg_catch_entry);
RTS_ENTRY(stg_PAP_entry);
RTS_ENTRY(stg_AP_entry);
......@@ -214,7 +213,6 @@ RTS_ENTRY(stg_raise_entry);
/* closures */
RTS_CLOSURE(stg_END_TSO_QUEUE_closure);
RTS_CLOSURE(stg_END_MUT_LIST_closure);
RTS_CLOSURE(stg_NO_FINALIZER_closure);
RTS_CLOSURE(stg_dummy_ret_closure);
RTS_CLOSURE(stg_forceIO_closure);
......
......@@ -80,11 +80,10 @@ typedef struct _generation {
step * steps; /* steps */
unsigned int n_steps; /* number of steps */
unsigned int max_blocks; /* max blocks in step 0 */
StgMutClosure *mut_list; /* mut objects in this gen (not G0)*/
StgMutClosure *mut_once_list; /* objects that point to younger gens */
bdescr *mut_list; /* mut objects in this gen (not G0)*/
/* temporary use during GC: */
StgMutClosure * saved_mut_list;
bdescr *saved_mut_list;
/* stats information */
unsigned int collections;
......@@ -200,37 +199,33 @@ extern Mutex sm_mutex;
#define RELEASE_SM_LOCK
#endif
/* ToDo: shouldn't recordMutable and recordOldToNewPtrs acquire some
* kind of lock in the SMP case?
/* ToDo: shouldn't recordMutable acquire some
* kind of lock in the SMP case? Or do we need per-processor
* mutable lists?
*/
INLINE_HEADER void
recordMutable(StgMutClosure *p)
recordMutableGen(StgClosure *p, generation *gen)
{
bdescr *bd;
#ifdef SMP
ASSERT(p->header.info == &stg_WHITEHOLE_info || closure_MUTABLE(p));
#else
ASSERT(closure_MUTABLE(p));
#endif
bd = Bdescr((P_)p);
if (bd->gen_no > 0) {
p->mut_link = RTS_DEREF(generations)[bd->gen_no].mut_list;
RTS_DEREF(generations)[bd->gen_no].mut_list = p;
}
bdescr *bd;
bd = gen->mut_list;
if (bd->free >= bd->start + BLOCK_SIZE_W) {
bdescr *new_bd;
new_bd = allocBlock();
new_bd->link = bd;
bd = new_bd;
gen->mut_list = bd;
}
*bd->free++ = (StgWord)p;
}
INLINE_HEADER void
recordOldToNewPtrs(StgMutClosure *p)
recordMutable(StgClosure *p)
{
bdescr *bd;
bd = Bdescr((P_)p);
if (bd->gen_no > 0) {
p->mut_link = RTS_DEREF(generations)[bd->gen_no].mut_once_list;
RTS_DEREF(generations)[bd->gen_no].mut_once_list = p;
}
bdescr *bd;
ASSERT(closure_MUTABLE(p));
bd = Bdescr((P_)p);
if (bd->gen_no > 0) recordMutableGen(p, &RTS_DEREF(generations)[bd->gen_no]);
}
/* -----------------------------------------------------------------------------
......@@ -277,10 +272,10 @@ INLINE_HEADER StgOffset CONSTR_sizeW( nat p, nat np )
{ return sizeofW(StgHeader) + p + np; }
INLINE_HEADER StgOffset THUNK_SELECTOR_sizeW ( void )
{ return sizeofW(StgHeader) + MIN_UPD_SIZE; }
{ return stg_max(sizeofW(StgHeader)+MIN_UPD_SIZE, sizeofW(StgSelector)); }
INLINE_HEADER StgOffset BLACKHOLE_sizeW ( void )
{ return sizeofW(StgHeader) + MIN_UPD_SIZE; }
{ return stg_max(sizeofW(StgHeader)+MIN_UPD_SIZE, sizeofW(StgBlockingQueue)); }
/* --------------------------------------------------------------------------
Sizes of closures
......@@ -353,14 +348,6 @@ extern bdescr * allocNursery ( bdescr *last_bd, nat blocks );
extern void resizeNursery ( nat blocks );
extern void tidyAllocateLists ( void );
/* -----------------------------------------------------------------------------
MUTABLE LISTS
A mutable list is ended with END_MUT_LIST, so that we can use NULL
as an indication that an object is not on a mutable list.
------------------------------------------------------------------------- */
#define END_MUT_LIST ((StgMutClosure *)(void *)&stg_END_MUT_LIST_closure)
/* -----------------------------------------------------------------------------
Functions from GC.c
-------------------------------------------------------------------------- */
......
/* -----------------------------------------------------------------------------
* $Id: TSO.h,v 1.40 2005/01/28 12:55:53 simonmar Exp $
* $Id: TSO.h,v 1.41 2005/02/10 13:02:05 simonmar Exp $
*
* (c) The GHC Team, 1998-1999
*
......@@ -126,7 +126,6 @@ typedef struct StgTSO_ {
StgHeader header;
struct StgTSO_* link; // Links threads onto blocking queues */
StgMutClosure * mut_link; // TSO's are mutable of course! */
struct StgTSO_* global_link; // Links all threads together */
StgWord16 what_next; // Values defined in Constants.h
......
......@@ -294,11 +294,8 @@ DEBUG_FILL_SLOP(StgClosure *p)
} else { \
if (info != stg_BLACKHOLE_BQ_info) { \
DEBUG_FILL_SLOP(p1); \
W_ __mut_once_list; \
__mut_once_list = generation(TO_W_(bdescr_gen_no(bd))) + \
OFFSET_generation_mut_once_list; \
StgMutClosure_mut_link(p1) = W_[__mut_once_list]; \
W_[__mut_once_list] = p1; \
foreign "C" recordMutableGen(p1 "ptr", \
generation(TO_W_(bdescr_gen_no(bd))) "ptr"); \
} \
StgInd_indirectee(p1) = p2; \
SET_INFO(p1, stg_IND_OLDGEN_info); \
......@@ -323,10 +320,9 @@ DEBUG_FILL_SLOP(StgClosure *p)
} else { \
if (_info != &stg_BLACKHOLE_BQ_info) { \
DEBUG_FILL_SLOP(p1); \
((StgIndOldGen *)p1)->mut_link = generations[bd->gen_no].mut_once_list; \
generations[bd->gen_no].mut_once_list = (StgMutClosure *)p1; \
recordMutableGen(p1, &generations[bd->gen_no]); \
} \
((StgIndOldGen *)p1)->indirectee = p2; \
((StgInd *)p1)->indirectee = p2; \
SET_INFO(p1, &stg_IND_OLDGEN_info); \
TICK_UPD_OLD_IND(); \
and_then; \
......@@ -362,10 +358,9 @@ updateWithPermIndirection(const StgInfoTable *info,
TICK_UPD_NEW_PERM_IND(p1);
} else {
if (info != &stg_BLACKHOLE_BQ_info) {
((StgIndOldGen *)p1)->mut_link = generations[bd->gen_no].mut_once_list;
generations[bd->gen_no].mut_once_list = (StgMutClosure *)p1;
recordMutableGen(p1, &generations[bd->gen_no]);
}
((StgIndOldGen *)p1)->indirectee = p2;
((StgInd *)p1)->indirectee = p2;
SET_INFO(p1, &stg_IND_OLDGEN_PERM_info);
// @LDV profiling
// We have just created a new closure.
......
......@@ -213,7 +213,7 @@ main(int argc, char *argv[])
struct_field(bdescr, link);
struct_size(generation);
struct_field(generation, mut_once_list);
struct_field(generation, mut_list);
struct_field(CostCentreStack, ccsID);
struct_field(CostCentreStack, mem_alloc);
......@@ -245,7 +245,6 @@ main(int argc, char *argv[])
closure_payload(StgArrWords, payload);
closure_field(StgTSO, link);
closure_field(StgTSO, mut_link);
closure_field(StgTSO, global_link);
closure_field(StgTSO, what_next);
closure_field(StgTSO, why_blocked);
......@@ -294,7 +293,6 @@ main(int argc, char *argv[])
closure_payload(StgAP_STACK, payload);
closure_field(StgInd, indirectee);
closure_field(StgMutClosure, mut_link);
closure_size(StgMutVar);
closure_field(StgMutVar, var);
......
......@@ -51,7 +51,8 @@ initGroup(nat n, bdescr *head)
if (n != 0) {
head->blocks = n;
head->free = head->start;
head->free = head->start;
head->link = NULL;
for (i=1, bd = head+1; i < n; i++, bd++) {
bd->free = 0;
bd->blocks = 0;
......@@ -78,9 +79,8 @@ allocGroup(nat n)
*last = bd->link;
/* no initialisation necessary - this is already a
* self-contained block group. */
#ifdef DEBUG
bd->free = bd->start; /* block isn't free now */
#endif
bd->link = NULL;
return bd;
}
if (bd->blocks > n) { /* block too big... */
......@@ -226,13 +226,12 @@ freeGroup(bdescr *p)
return;
}
#ifdef DEBUG
p->free = (void *)-1; /* indicates that this block is free */
p->step = NULL;
p->gen_no = 0;
/* fill the block group with garbage if sanity checking is on */
IF_DEBUG(sanity,memset(p->start, 0xaa, p->blocks * BLOCK_SIZE));
#endif
/* find correct place in free list to place new group */
last = NULL;
......
......@@ -9,37 +9,6 @@
#ifndef BLOCK_ALLOC_H
#define BLOCK_ALLOC_H
/* Initialisation ---------------------------------------------------------- */
extern void initBlockAllocator(void);
/* Allocation -------------------------------------------------------------- */
extern bdescr *allocGroup(nat n);
extern bdescr *allocBlock(void);
/* De-Allocation ----------------------------------------------------------- */
extern void freeGroup(bdescr *p);
extern void freeChain(bdescr *p);
/* Round a value to megablocks --------------------------------------------- */
#define WORDS_PER_MBLOCK (BLOCKS_PER_MBLOCK * BLOCK_SIZE_W)
INLINE_HEADER nat
round_to_mblocks(nat words)
{
if (words > WORDS_PER_MBLOCK) {
if ((words % WORDS_PER_MBLOCK) < (WORDS_PER_MBLOCK / 2)) {
words = (words / WORDS_PER_MBLOCK) * WORDS_PER_MBLOCK;
} else {
words = ((words / WORDS_PER_MBLOCK) + 1) * WORDS_PER_MBLOCK;
}
}
return words;
}
/* Debugging -------------------------------------------------------------- */
#ifdef DEBUG
......
This diff is collapsed.
......@@ -542,7 +542,6 @@ thread_obj (StgInfoTable *info, StgPtr p)
case STABLE_NAME:
case IND_PERM:
case MUT_VAR:
case MUT_CONS:
case CAF_BLACKHOLE:
case SE_CAF_BLACKHOLE:
case SE_BLACKHOLE:
......@@ -582,8 +581,8 @@ thread_obj (StgInfoTable *info, StgPtr p)
case IND_OLDGEN:
case IND_OLDGEN_PERM:
thread((StgPtr)&((StgIndOldGen *)p)->indirectee);
return p + sizeofW(StgIndOldGen);
thread((StgPtr)&((StgInd *)p)->indirectee);
return p + sizeofW(StgInd);
case THUNK_SELECTOR:
{
......@@ -841,7 +840,7 @@ update_bkwd_compact( step *stp )
// Rebuild the mutable list for the old generation.
if (ip_MUTABLE(info)) {
recordMutable((StgMutClosure *)free);
recordMutable((StgClosure *)free);
}
// relocate TSOs
......@@ -868,19 +867,6 @@ update_bkwd_compact( step *stp )
return free_blocks;
}
static void
thread_mut_once_list( generation *g )
{
StgMutClosure *p, *next;
for (p = g->mut_once_list; p != END_MUT_LIST; p = next) {
next = p->mut_link;