Commit 1ed01a87 authored by Simon Marlow's avatar Simon Marlow

Add a proper write barrier for MVars

Previously MVars were always on the mutable list of the old
generation, which meant every MVar was visited during every minor GC.
With lots of MVars hanging around, this gets expensive.  We addressed
this problem for MUT_VARs (aka IORefs) a while ago, the solution is to
use a traditional GC write-barrier when the object is modified.  This
patch does the same thing for MVars.

TVars are still done the old way, they could probably benefit from the
same treatment too.
parent 53d57aa3
......@@ -66,32 +66,33 @@
#define BLACKHOLE 42
#define SE_BLACKHOLE 43
#define SE_CAF_BLACKHOLE 44
#define MVAR 45
#define ARR_WORDS 46
#define MUT_ARR_PTRS_CLEAN 47
#define MUT_ARR_PTRS_DIRTY 48
#define MUT_ARR_PTRS_FROZEN0 49
#define MUT_ARR_PTRS_FROZEN 50
#define MUT_VAR_CLEAN 51
#define MUT_VAR_DIRTY 52
#define WEAK 53
#define STABLE_NAME 54
#define TSO 55
#define BLOCKED_FETCH 56
#define FETCH_ME 57
#define FETCH_ME_BQ 58
#define RBH 59
#define EVACUATED 60
#define REMOTE_REF 61
#define TVAR_WATCH_QUEUE 62
#define INVARIANT_CHECK_QUEUE 63
#define ATOMIC_INVARIANT 64
#define TVAR 65
#define TREC_CHUNK 66
#define TREC_HEADER 67
#define ATOMICALLY_FRAME 68
#define CATCH_RETRY_FRAME 69
#define CATCH_STM_FRAME 70
#define N_CLOSURE_TYPES 71
#define MVAR_CLEAN 45
#define MVAR_DIRTY 46
#define ARR_WORDS 47
#define MUT_ARR_PTRS_CLEAN 48
#define MUT_ARR_PTRS_DIRTY 49
#define MUT_ARR_PTRS_FROZEN0 50
#define MUT_ARR_PTRS_FROZEN 51
#define MUT_VAR_CLEAN 52
#define MUT_VAR_DIRTY 53
#define WEAK 54
#define STABLE_NAME 55
#define TSO 56
#define BLOCKED_FETCH 57
#define FETCH_ME 58
#define FETCH_ME_BQ 59
#define RBH 60
#define EVACUATED 61
#define REMOTE_REF 62
#define TVAR_WATCH_QUEUE 63
#define INVARIANT_CHECK_QUEUE 64
#define ATOMIC_INVARIANT 65
#define TVAR 66
#define TREC_CHUNK 67
#define TREC_HEADER 68
#define ATOMICALLY_FRAME 69
#define CATCH_RETRY_FRAME 70
#define CATCH_STM_FRAME 71
#define N_CLOSURE_TYPES 72
#endif /* CLOSURETYPES_H */
......@@ -124,5 +124,8 @@ extern void performMajorGC(void);
extern HsInt64 getAllocations( void );
extern void revertCAFs( void );
extern void dirty_MUT_VAR(StgRegTable *reg, StgClosure *p);
extern void dirty_MVAR(StgRegTable *reg, StgClosure *p);
extern void dirty_TSO(StgClosure *tso);
#endif /* RTSEXTERNAL_H */
......@@ -99,8 +99,8 @@ RTS_INFO(stg_EVACUATED_info);
RTS_INFO(stg_WEAK_info);
RTS_INFO(stg_DEAD_WEAK_info);
RTS_INFO(stg_STABLE_NAME_info);
RTS_INFO(stg_FULL_MVAR_info);
RTS_INFO(stg_EMPTY_MVAR_info);
RTS_INFO(stg_MVAR_CLEAN_info);
RTS_INFO(stg_MVAR_DIRTY_info);
RTS_INFO(stg_TSO_info);
RTS_INFO(stg_ARR_WORDS_info);
RTS_INFO(stg_MUT_ARR_WORDS_info);
......
......@@ -71,7 +71,8 @@ StgWord16 closure_flags[] = {
/* BLACKHOLE = */ ( _NS| _UPT ),
/* SE_BLACKHOLE = */ ( _NS| _UPT ),
/* SE_CAF_BLACKHOLE = */ ( _NS| _UPT ),
/* MVAR = */ (_HNF| _NS| _MUT|_UPT ),
/* MVAR_CLEAN = */ (_HNF| _NS| _MUT|_UPT ),
/* MVAR_DIRTY = */ (_HNF| _NS| _MUT|_UPT ),
/* ARR_WORDS = */ (_HNF| _NS| _UPT ),
/* MUT_ARR_PTRS_CLEAN = */ (_HNF| _NS| _MUT|_UPT ),
/* MUT_ARR_PTRS_DIRTY = */ (_HNF| _NS| _MUT|_UPT ),
......@@ -99,6 +100,6 @@ StgWord16 closure_flags[] = {
/* CATCH_STM_FRAME = */ ( _BTM )
};
#if N_CLOSURE_TYPES != 71
#if N_CLOSURE_TYPES != 72
#error Closure types changed: update ClosureFlags.c!
#endif
......@@ -827,7 +827,9 @@ INFO_TABLE_RET( stg_block_takemvar, RET_SMALL, "ptr" W_ unused )
stg_block_takemvar_finally
{
#ifdef THREADED_RTS
unlockClosure(R3, stg_EMPTY_MVAR_info);
unlockClosure(R3, stg_MVAR_DIRTY_info);
#else
SET_INFO(R3, stg_MVAR_DIRTY_info);
#endif
jump StgReturn;
}
......@@ -853,7 +855,9 @@ INFO_TABLE_RET( stg_block_putmvar, RET_SMALL, "ptr" W_ unused1, "ptr" W_ unused2
stg_block_putmvar_finally
{
#ifdef THREADED_RTS
unlockClosure(R3, stg_FULL_MVAR_info);
unlockClosure(R3, stg_MVAR_DIRTY_info);
#else
SET_INFO(R3, stg_MVAR_DIRTY_info);
#endif
jump StgReturn;
}
......
......@@ -96,7 +96,8 @@ processHeapClosureForDead( StgClosure *c )
'inherently used' cases: do nothing.
*/
case TSO:
case MVAR:
case MVAR_CLEAN:
case MVAR_DIRTY:
case MUT_ARR_PTRS_CLEAN:
case MUT_ARR_PTRS_DIRTY:
case MUT_ARR_PTRS_FROZEN:
......
......@@ -646,7 +646,8 @@ typedef struct _RtsSymbolVal {
SymX(stg_CAF_BLACKHOLE_info) \
SymX(awakenBlockedQueue) \
SymX(stg_CHARLIKE_closure) \
SymX(stg_EMPTY_MVAR_info) \
SymX(stg_MVAR_CLEAN_info) \
SymX(stg_MVAR_DIRTY_info) \
SymX(stg_IND_STATIC_info) \
SymX(stg_INTLIKE_closure) \
SymX(stg_MUT_ARR_PTRS_DIRTY_info) \
......
......@@ -1445,7 +1445,7 @@ isEmptyMVarzh_fast
{
/* args: R1 = MVar closure */
if (GET_INFO(R1) == stg_EMPTY_MVAR_info) {
if (StgMVar_value(R1) == stg_END_TSO_QUEUE_closure) {
RET_N(1);
} else {
RET_N(0);
......@@ -1460,7 +1460,8 @@ newMVarzh_fast
ALLOC_PRIM ( SIZEOF_StgMVar, NO_PTRS, newMVarzh_fast );
mvar = Hp - SIZEOF_StgMVar + WDS(1);
SET_HDR(mvar,stg_EMPTY_MVAR_info,W_[CCCS]);
SET_HDR(mvar,stg_MVAR_DIRTY_info,W_[CCCS]);
// MVARs start dirty: generation 0 has no mutable list
StgMVar_head(mvar) = stg_END_TSO_QUEUE_closure;
StgMVar_tail(mvar) = stg_END_TSO_QUEUE_closure;
StgMVar_value(mvar) = stg_END_TSO_QUEUE_closure;
......@@ -1495,11 +1496,15 @@ takeMVarzh_fast
#else
info = GET_INFO(mvar);
#endif
if (info == stg_MVAR_CLEAN_info) {
foreign "C" dirty_MVAR(BaseReg "ptr", mvar);
}
/* If the MVar is empty, put ourselves on its blocking queue,
* and wait until we're woken up.
*/
if (info == stg_EMPTY_MVAR_info) {
if (StgMVar_value(mvar) == stg_END_TSO_QUEUE_closure) {
if (StgMVar_head(mvar) == stg_END_TSO_QUEUE_closure) {
StgMVar_head(mvar) = CurrentTSO;
} else {
......@@ -1543,7 +1548,9 @@ takeMVarzh_fast
}
#if defined(THREADED_RTS)
unlockClosure(mvar, stg_FULL_MVAR_info);
unlockClosure(mvar, stg_MVAR_DIRTY_info);
#else
SET_INFO(mvar,stg_MVAR_DIRTY_info);
#endif
RET_P(val);
}
......@@ -1553,9 +1560,9 @@ takeMVarzh_fast
StgMVar_value(mvar) = stg_END_TSO_QUEUE_closure;
#if defined(THREADED_RTS)
unlockClosure(mvar, stg_EMPTY_MVAR_info);
unlockClosure(mvar, stg_MVAR_DIRTY_info);
#else
SET_INFO(mvar,stg_EMPTY_MVAR_info);
SET_INFO(mvar,stg_MVAR_DIRTY_info);
#endif
RET_P(val);
......@@ -1577,9 +1584,9 @@ tryTakeMVarzh_fast
info = GET_INFO(mvar);
#endif
if (info == stg_EMPTY_MVAR_info) {
if (StgMVar_value(mvar) == stg_END_TSO_QUEUE_closure) {
#if defined(THREADED_RTS)
unlockClosure(mvar, stg_EMPTY_MVAR_info);
unlockClosure(mvar, info);
#endif
/* HACK: we need a pointer to pass back,
* so we abuse NO_FINALIZER_closure
......@@ -1587,6 +1594,10 @@ tryTakeMVarzh_fast
RET_NP(0, stg_NO_FINALIZER_closure);
}
if (info == stg_MVAR_CLEAN_info) {
foreign "C" dirty_MVAR(BaseReg "ptr", mvar);
}
/* we got the value... */
val = StgMVar_value(mvar);
......@@ -1616,7 +1627,9 @@ tryTakeMVarzh_fast
StgMVar_tail(mvar) = stg_END_TSO_QUEUE_closure;
}
#if defined(THREADED_RTS)
unlockClosure(mvar, stg_FULL_MVAR_info);
unlockClosure(mvar, stg_MVAR_DIRTY_info);
#else
SET_INFO(mvar,stg_MVAR_DIRTY_info);
#endif
}
else
......@@ -1624,9 +1637,9 @@ tryTakeMVarzh_fast
/* No further putMVars, MVar is now empty */
StgMVar_value(mvar) = stg_END_TSO_QUEUE_closure;
#if defined(THREADED_RTS)
unlockClosure(mvar, stg_EMPTY_MVAR_info);
unlockClosure(mvar, stg_MVAR_DIRTY_info);
#else
SET_INFO(mvar,stg_EMPTY_MVAR_info);
SET_INFO(mvar,stg_MVAR_DIRTY_info);
#endif
}
......@@ -1647,7 +1660,11 @@ putMVarzh_fast
info = GET_INFO(mvar);
#endif
if (info == stg_FULL_MVAR_info) {
if (info == stg_MVAR_CLEAN_info) {
foreign "C" dirty_MVAR(BaseReg "ptr", mvar);
}
if (StgMVar_value(mvar) != stg_END_TSO_QUEUE_closure) {
if (StgMVar_head(mvar) == stg_END_TSO_QUEUE_closure) {
StgMVar_head(mvar) = CurrentTSO;
} else {
......@@ -1686,7 +1703,9 @@ putMVarzh_fast
}
#if defined(THREADED_RTS)
unlockClosure(mvar, stg_EMPTY_MVAR_info);
unlockClosure(mvar, stg_MVAR_DIRTY_info);
#else
SET_INFO(mvar,stg_MVAR_DIRTY_info);
#endif
jump %ENTRY_CODE(Sp(0));
}
......@@ -1696,9 +1715,9 @@ putMVarzh_fast
StgMVar_value(mvar) = R2;
#if defined(THREADED_RTS)
unlockClosure(mvar, stg_FULL_MVAR_info);
unlockClosure(mvar, stg_MVAR_DIRTY_info);
#else
SET_INFO(mvar,stg_FULL_MVAR_info);
SET_INFO(mvar,stg_MVAR_DIRTY_info);
#endif
jump %ENTRY_CODE(Sp(0));
}
......@@ -1720,13 +1739,17 @@ tryPutMVarzh_fast
info = GET_INFO(mvar);
#endif
if (info == stg_FULL_MVAR_info) {
if (StgMVar_value(mvar) != stg_END_TSO_QUEUE_closure) {
#if defined(THREADED_RTS)
unlockClosure(mvar, stg_FULL_MVAR_info);
unlockClosure(mvar, info);
#endif
RET_N(0);
}
if (info == stg_MVAR_CLEAN_info) {
foreign "C" dirty_MVAR(BaseReg "ptr", mvar);
}
if (StgMVar_head(mvar) != stg_END_TSO_QUEUE_closure) {
/* There are takeMVar(s) waiting: wake up the first one
......@@ -1752,7 +1775,9 @@ tryPutMVarzh_fast
}
#if defined(THREADED_RTS)
unlockClosure(mvar, stg_EMPTY_MVAR_info);
unlockClosure(mvar, stg_MVAR_DIRTY_info);
#else
SET_INFO(mvar,stg_MVAR_DIRTY_info);
#endif
}
else
......@@ -1761,9 +1786,9 @@ tryPutMVarzh_fast
StgMVar_value(mvar) = R2;
#if defined(THREADED_RTS)
unlockClosure(mvar, stg_FULL_MVAR_info);
unlockClosure(mvar, stg_MVAR_DIRTY_info);
#else
SET_INFO(mvar,stg_FULL_MVAR_info);
SET_INFO(mvar,stg_MVAR_DIRTY_info);
#endif
}
......
......@@ -340,7 +340,8 @@ printClosure( StgClosure *obj )
debugBelch("MUT_ARR_PTRS_FROZEN(size=%lu)\n", (lnat)((StgMutArrPtrs *)obj)->ptrs);
break;
case MVAR:
case MVAR_CLEAN:
case MVAR_DIRTY:
{
StgMVar* mv = (StgMVar*)obj;
debugBelch("MVAR(head=%p, tail=%p, value=%p)\n", mv->head, mv->tail, mv->value);
......
......@@ -146,7 +146,8 @@ static char *type_names[] = {
"BLACKHOLE",
"SE_BLACKHOLE",
"SE_CAF_BLACKHOLE",
"MVAR",
"MVAR_CLEAN",
"MVAR_DIRTY",
"ARR_WORDS",
"MUT_ARR_PTRS_CLEAN",
"MUT_ARR_PTRS_DIRTY",
......@@ -974,7 +975,8 @@ heapCensusChain( Census *census, bdescr *bd )
size = bco_sizeW((StgBCO *)p);
break;
case MVAR:
case MVAR_CLEAN:
case MVAR_DIRTY:
case WEAK:
case STABLE_NAME:
case MUT_VAR_CLEAN:
......
......@@ -282,7 +282,13 @@ check_target:
// ASSUMPTION: tso->block_info must always point to a
// closure. In the threaded RTS it does.
if (get_itbl(mvar)->type != MVAR) goto retry;
switch (get_itbl(mvar)->type) {
case MVAR_CLEAN:
case MVAR_DIRTY:
break;
default:
goto retry;
}
info = lockClosure((StgClosure *)mvar);
......
......@@ -491,7 +491,8 @@ push( StgClosure *c, retainer c_child_r, StgClosure **first_child )
// three children (fixed), no SRT
// need to push a stackElement
case MVAR:
case MVAR_CLEAN:
case MVAR_DIRTY:
// head must be TSO and the head of a linked list of TSOs.
// Shoule it be a child? Seems to be yes.
*first_child = (StgClosure *)((StgMVar *)c)->head;
......@@ -804,7 +805,8 @@ pop( StgClosure **c, StgClosure **cp, retainer *r )
// three children (fixed), no SRT
// need to push a stackElement
case MVAR:
case MVAR_CLEAN:
case MVAR_DIRTY:
if (se->info.next.step == 2) {
*c = (StgClosure *)((StgMVar *)se->c)->tail;
se->info.next.step++; // move to the next step
......@@ -1057,7 +1059,8 @@ isRetainer( StgClosure *c )
case TSO:
// mutable objects
case MVAR:
case MVAR_CLEAN:
case MVAR_DIRTY:
case MUT_VAR_CLEAN:
case MUT_VAR_DIRTY:
case MUT_ARR_PTRS_CLEAN:
......
......@@ -256,7 +256,8 @@ checkClosure( StgClosure* p )
info = get_itbl(p);
switch (info->type) {
case MVAR:
case MVAR_CLEAN:
case MVAR_DIRTY:
{
StgMVar *mvar = (StgMVar *)p;
ASSERT(LOOKS_LIKE_CLOSURE_PTR(mvar->head));
......
......@@ -467,11 +467,11 @@ INFO_TABLE(stg_STABLE_NAME,0,1,STABLE_NAME,"STABLE_NAME","STABLE_NAME")
and entry code for each type.
------------------------------------------------------------------------- */
INFO_TABLE(stg_FULL_MVAR,3,0,MVAR,"MVAR","MVAR")
{ foreign "C" barf("FULL_MVAR object entered!") never returns; }
INFO_TABLE(stg_MVAR_CLEAN,3,0,MVAR_CLEAN,"MVAR","MVAR")
{ foreign "C" barf("MVAR object entered!") never returns; }
INFO_TABLE(stg_EMPTY_MVAR,3,0,MVAR,"MVAR","MVAR")
{ foreign "C" barf("EMPTY_MVAR object entered!") never returns; }
INFO_TABLE(stg_MVAR_DIRTY,3,0,MVAR_DIRTY,"MVAR","MVAR")
{ foreign "C" barf("MVAR object entered!") never returns; }
/* -----------------------------------------------------------------------------
STM
......
......@@ -644,7 +644,8 @@ thread_obj (StgInfoTable *info, StgPtr p)
return p + sizeofW(StgWeak);
}
case MVAR:
case MVAR_CLEAN:
case MVAR_DIRTY:
{
StgMVar *mvar = (StgMVar *)p;
thread_(&mvar->head);
......
......@@ -448,7 +448,8 @@ loop:
case MUT_VAR_CLEAN:
case MUT_VAR_DIRTY:
case MVAR:
case MVAR_CLEAN:
case MVAR_DIRTY:
return copy(q,sizeW_fromITBL(info),stp);
case CONSTR_0_1:
......
......@@ -124,6 +124,7 @@ static lnat g0s0_pcnt_kept = 30; // percentage of g0s0 live at last minor GC
#ifdef DEBUG
nat mutlist_MUTVARS,
mutlist_MUTARRS,
mutlist_MVARS,
mutlist_OTHERS;
#endif
......@@ -637,9 +638,9 @@ GarbageCollect ( rtsBool force_major_gc )
copied += mut_list_size;
debugTrace(DEBUG_gc,
"mut_list_size: %lu (%d vars, %d arrays, %d others)",
"mut_list_size: %lu (%d vars, %d arrays, %d MVARs, %d others)",
(unsigned long)(mut_list_size * sizeof(W_)),
mutlist_MUTVARS, mutlist_MUTARRS, mutlist_OTHERS);
mutlist_MUTVARS, mutlist_MUTARRS, mutlist_MVARS, mutlist_OTHERS);
}
for (s = 0; s < generations[g].n_steps; s++) {
......
......@@ -36,7 +36,7 @@ extern lnat new_blocks; // blocks allocated during this GC
extern lnat new_scavd_blocks; // ditto, but depth-first blocks
#ifdef DEBUG
extern nat mutlist_MUTVARS, mutlist_MUTARRS, mutlist_OTHERS;
extern nat mutlist_MUTVARS, mutlist_MUTARRS, mutlist_MVARS, mutlist_OTHERS;
#endif
StgClosure * isAlive(StgClosure *p);
......
......@@ -293,15 +293,23 @@ scavenge(step *stp)
q = p;
switch (info->type) {
case MVAR:
case MVAR_CLEAN:
case MVAR_DIRTY:
{
rtsBool saved_eager_promotion = eager_promotion;
StgMVar *mvar = ((StgMVar *)p);
evac_gen = 0;
eager_promotion = rtsFalse;
mvar->head = (StgTSO *)evacuate((StgClosure *)mvar->head);
mvar->tail = (StgTSO *)evacuate((StgClosure *)mvar->tail);
mvar->value = evacuate((StgClosure *)mvar->value);
evac_gen = saved_evac_gen;
failed_to_evac = rtsTrue; // mutable.
eager_promotion = saved_eager_promotion;
if (failed_to_evac) {
mvar->header.info = &stg_MVAR_DIRTY_info;
} else {
mvar->header.info = &stg_MVAR_CLEAN_info;
}
p += sizeofW(StgMVar);
break;
}
......@@ -696,17 +704,25 @@ linear_scan:
q = p;
switch (info->type) {
case MVAR:
{
StgMVar *mvar = ((StgMVar *)p);
evac_gen = 0;
mvar->head = (StgTSO *)evacuate((StgClosure *)mvar->head);
mvar->tail = (StgTSO *)evacuate((StgClosure *)mvar->tail);
mvar->value = evacuate((StgClosure *)mvar->value);
evac_gen = saved_evac_gen;
failed_to_evac = rtsTrue; // mutable.
break;
}
case MVAR_CLEAN:
case MVAR_DIRTY:
{
rtsBool saved_eager_promotion = eager_promotion;
StgMVar *mvar = ((StgMVar *)p);
eager_promotion = rtsFalse;
mvar->head = (StgTSO *)evacuate((StgClosure *)mvar->head);
mvar->tail = (StgTSO *)evacuate((StgClosure *)mvar->tail);
mvar->value = evacuate((StgClosure *)mvar->value);
eager_promotion = saved_eager_promotion;
if (failed_to_evac) {
mvar->header.info = &stg_MVAR_DIRTY_info;
} else {
mvar->header.info = &stg_MVAR_CLEAN_info;
}
break;
}
case FUN_2_0:
scavenge_fun_srt(info);
......@@ -1074,15 +1090,23 @@ scavenge_one(StgPtr p)
switch (info->type) {
case MVAR:
case MVAR_CLEAN:
case MVAR_DIRTY:
{
rtsBool saved_eager_promotion = eager_promotion;
StgMVar *mvar = ((StgMVar *)p);
evac_gen = 0;
eager_promotion = rtsFalse;
mvar->head = (StgTSO *)evacuate((StgClosure *)mvar->head);
mvar->tail = (StgTSO *)evacuate((StgClosure *)mvar->tail);
mvar->value = evacuate((StgClosure *)mvar->value);
evac_gen = saved_evac_gen;
failed_to_evac = rtsTrue; // mutable.
eager_promotion = saved_eager_promotion;
if (failed_to_evac) {
mvar->header.info = &stg_MVAR_DIRTY_info;
} else {
mvar->header.info = &stg_MVAR_CLEAN_info;
}
break;
}
......@@ -1409,6 +1433,10 @@ scavenge_mutable_list(generation *gen)
case MUT_ARR_PTRS_FROZEN:
case MUT_ARR_PTRS_FROZEN0:
mutlist_MUTARRS++; break;
case MVAR_CLEAN:
barf("MVAR_CLEAN on mutable list");
case MVAR_DIRTY:
mutlist_MVARS++; break;
default:
mutlist_OTHERS++; break;
}
......
......@@ -781,12 +781,15 @@ allocatePinned( nat n )
}
/* -----------------------------------------------------------------------------
Write Barriers
-------------------------------------------------------------------------- */
/*
This is the write barrier for MUT_VARs, a.k.a. IORefs. A
MUT_VAR_CLEAN object is not on the mutable list; a MUT_VAR_DIRTY
is. When written to, a MUT_VAR_CLEAN turns into a MUT_VAR_DIRTY
and is put on the mutable list.
-------------------------------------------------------------------------- */
*/
void
dirty_MUT_VAR(StgRegTable *reg, StgClosure *p)
{
......@@ -799,6 +802,23 @@ dirty_MUT_VAR(StgRegTable *reg, StgClosure *p)
}
}
/*
This is the write barrier for MVARs. An MVAR_CLEAN objects is not
on the mutable list; a MVAR_DIRTY is. When written to, a
MVAR_CLEAN turns into a MVAR_DIRTY and is put on the mutable list.
The check for MVAR_CLEAN is inlined at the call site for speed,
this really does make a difference on concurrency-heavy benchmarks
such as Chaneneos and cheap-concurrency.
*/
void
dirty_MVAR(StgRegTable *reg, StgClosure *p)
{
Capability *cap = regTableToCapability(reg);
bdescr *bd;
bd = Bdescr((StgPtr)p);
if (bd->gen_no > 0) recordMutableCap(p,cap,bd->gen_no);
}
/* -----------------------------------------------------------------------------
Allocation functions for GMP.
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment