Commit 0417404f authored by Simon Marlow's avatar Simon Marlow

Fix #650: use a card table to mark dirty sections of mutable arrays

The card table is an array of bytes, placed directly following the
actual array data.  This means that array reading is unaffected, but
array writing needs to read the array size from the header in order to
find the card table.

We use a bytemap rather than a bitmap, because updating the card table
must be multi-thread safe.  Each byte refers to 128 entries of the
array, but this is tunable by changing the constant
MUT_ARR_PTRS_CARD_BITS in includes/Constants.h.
parent fb783aea
...@@ -571,9 +571,21 @@ doWriteByteArrayOp _ _ _ _ ...@@ -571,9 +571,21 @@ doWriteByteArrayOp _ _ _ _
doWritePtrArrayOp :: CmmExpr -> CmmExpr -> CmmExpr -> Code doWritePtrArrayOp :: CmmExpr -> CmmExpr -> CmmExpr -> Code
doWritePtrArrayOp addr idx val doWritePtrArrayOp addr idx val
= do stmtC (setInfo addr (CmmLit (CmmLabel mkMAP_DIRTY_infoLabel))) = do mkBasicIndexedWrite arrPtrsHdrSize Nothing bWord addr idx val
mkBasicIndexedWrite arrPtrsHdrSize Nothing bWord addr idx val stmtC (setInfo addr (CmmLit (CmmLabel mkMAP_DIRTY_infoLabel)))
-- the write barrier. We must write a byte into the mark table:
-- bits8[a + header_size + StgMutArrPtrs_size(a) + x >> N]
stmtC $ CmmStore (
cmmOffsetExpr
(cmmOffsetExprW (cmmOffsetB addr arrPtrsHdrSize)
(loadArrPtrsSize addr))
(CmmMachOp mo_wordUShr [idx,
CmmLit (mkIntCLit mUT_ARR_PTRS_CARD_BITS)])
) (CmmLit (CmmInt 1 W8))
loadArrPtrsSize :: CmmExpr -> CmmExpr
loadArrPtrsSize addr = CmmLoad (cmmOffsetB addr off) bWord
where off = fixedHdrSize*wORD_SIZE + oFFSET_StgMutArrPtrs_ptrs
mkBasicIndexedRead :: ByteOff -> Maybe MachOp -> CmmType mkBasicIndexedRead :: ByteOff -> Maybe MachOp -> CmmType
-> LocalReg -> CmmExpr -> CmmExpr -> Code -> LocalReg -> CmmExpr -> CmmExpr -> Code
......
...@@ -463,6 +463,9 @@ ...@@ -463,6 +463,9 @@
#define StgFunInfoExtra_bitmap(i) StgFunInfoExtraFwd_bitmap(i) #define StgFunInfoExtra_bitmap(i) StgFunInfoExtraFwd_bitmap(i)
#endif #endif
#define mutArrPtrsCardWords(n) \
ROUNDUP_BYTES_TO_WDS(((n) + (1 << MUT_ARR_PTRS_CARD_BITS) - 1) >> MUT_ARR_PTRS_CARD_BITS)
/* ----------------------------------------------------------------------------- /* -----------------------------------------------------------------------------
Voluntary Yields/Blocks Voluntary Yields/Blocks
......
...@@ -62,6 +62,9 @@ mIN_CHARLIKE, mAX_CHARLIKE :: Int ...@@ -62,6 +62,9 @@ mIN_CHARLIKE, mAX_CHARLIKE :: Int
mIN_CHARLIKE = MIN_CHARLIKE mIN_CHARLIKE = MIN_CHARLIKE
mAX_CHARLIKE = MAX_CHARLIKE mAX_CHARLIKE = MAX_CHARLIKE
mUT_ARR_PTRS_CARD_BITS :: Int
mUT_ARR_PTRS_CARD_BITS = MUT_ARR_PTRS_CARD_BITS
-- A section of code-generator-related MAGIC CONSTANTS. -- A section of code-generator-related MAGIC CONSTANTS.
mAX_Vanilla_REG :: Int mAX_Vanilla_REG :: Int
......
...@@ -279,6 +279,7 @@ main(int argc, char *argv[]) ...@@ -279,6 +279,7 @@ main(int argc, char *argv[])
closure_size(StgMutArrPtrs); closure_size(StgMutArrPtrs);
closure_field(StgMutArrPtrs, ptrs); closure_field(StgMutArrPtrs, ptrs);
closure_field(StgMutArrPtrs, size);
closure_size(StgArrWords); closure_size(StgArrWords);
closure_field(StgArrWords, words); closure_field(StgArrWords, words);
......
...@@ -66,6 +66,13 @@ ...@@ -66,6 +66,13 @@
#define MAX_CHARLIKE 255 #define MAX_CHARLIKE 255
#define MIN_CHARLIKE 0 #define MIN_CHARLIKE 0
/* Each byte in the card table for an StgMutaArrPtrs covers
* (1<<MUT_ARR_PTRS_CARD_BITS) elements in the array. To find a good
* value for this, I used the benchmarks nofib/gc/hash,
* nofib/gc/graph, and nofib/gc/gc_bench.
*/
#define MUT_ARR_PTRS_CARD_BITS 7
/* ----------------------------------------------------------------------------- /* -----------------------------------------------------------------------------
STG Registers. STG Registers.
......
...@@ -278,7 +278,7 @@ INLINE_HEADER StgOffset arr_words_sizeW( StgArrWords* x ) ...@@ -278,7 +278,7 @@ INLINE_HEADER StgOffset arr_words_sizeW( StgArrWords* x )
{ return sizeofW(StgArrWords) + x->words; } { return sizeofW(StgArrWords) + x->words; }
INLINE_HEADER StgOffset mut_arr_ptrs_sizeW( StgMutArrPtrs* x ) INLINE_HEADER StgOffset mut_arr_ptrs_sizeW( StgMutArrPtrs* x )
{ return sizeofW(StgMutArrPtrs) + x->ptrs; } { return sizeofW(StgMutArrPtrs) + x->size; }
INLINE_HEADER StgWord tso_sizeW ( StgTSO *tso ) INLINE_HEADER StgWord tso_sizeW ( StgTSO *tso )
{ return TSO_STRUCT_SIZEW + tso->stack_size; } { return TSO_STRUCT_SIZEW + tso->stack_size; }
...@@ -392,4 +392,32 @@ INLINE_HEADER StgWord stack_frame_sizeW( StgClosure *frame ) ...@@ -392,4 +392,32 @@ INLINE_HEADER StgWord stack_frame_sizeW( StgClosure *frame )
} }
} }
/* -----------------------------------------------------------------------------
StgMutArrPtrs macros
An StgMutArrPtrs has a card table to indicate which elements are
dirty for the generational GC. The card table is an array of
bytes, where each byte covers (1 << MUT_ARR_PTRS_CARD_BITS)
elements. The card table is directly after the array data itself.
-------------------------------------------------------------------------- */
// The number of card bytes needed
INLINE_HEADER lnat mutArrPtrsCards (lnat elems)
{
return (lnat)((elems + (1 << MUT_ARR_PTRS_CARD_BITS) - 1)
>> MUT_ARR_PTRS_CARD_BITS);
}
// The number of words in the card table
INLINE_HEADER lnat mutArrPtrsCardTableSize (lnat elems)
{
return ROUNDUP_BYTES_TO_WDS(mutArrPtrsCards(elems));
}
// The address of the card for a particular card number
INLINE_HEADER StgWord8 *mutArrPtrsCard (StgMutArrPtrs *a, lnat n)
{
return ((StgWord8 *)&(a->payload[a->ptrs]) + n);
}
#endif /* RTS_STORAGE_CLOSUREMACROS_H */ #endif /* RTS_STORAGE_CLOSUREMACROS_H */
...@@ -136,7 +136,9 @@ typedef struct { ...@@ -136,7 +136,9 @@ typedef struct {
typedef struct { typedef struct {
StgHeader header; StgHeader header;
StgWord ptrs; StgWord ptrs;
StgWord size; // ptrs plus card table
StgClosure *payload[FLEXIBLE_ARRAY]; StgClosure *payload[FLEXIBLE_ARRAY];
// see also: StgMutArrPtrs macros in ClosureMacros.h
} StgMutArrPtrs; } StgMutArrPtrs;
typedef struct { typedef struct {
......
...@@ -132,18 +132,23 @@ stg_newAlignedPinnedByteArrayzh ...@@ -132,18 +132,23 @@ stg_newAlignedPinnedByteArrayzh
stg_newArrayzh stg_newArrayzh
{ {
W_ words, n, init, arr, p; W_ words, n, init, arr, p, size;
/* Args: R1 = words, R2 = initialisation value */ /* Args: R1 = words, R2 = initialisation value */
n = R1; n = R1;
MAYBE_GC(R2_PTR,stg_newArrayzh); MAYBE_GC(R2_PTR,stg_newArrayzh);
words = BYTES_TO_WDS(SIZEOF_StgMutArrPtrs) + n; // the mark area contains one byte for each 2^MUT_ARR_PTRS_CARD_BITS words
// in the array, making sure we round up, and then rounding up to a whole
// number of words.
size = n + mutArrPtrsCardWords(n);
words = BYTES_TO_WDS(SIZEOF_StgMutArrPtrs) + size;
("ptr" arr) = foreign "C" allocate(MyCapability() "ptr",words) [R2]; ("ptr" arr) = foreign "C" allocate(MyCapability() "ptr",words) [R2];
TICK_ALLOC_PRIM(SIZEOF_StgMutArrPtrs, WDS(n), 0); TICK_ALLOC_PRIM(SIZEOF_StgMutArrPtrs, WDS(n), 0);
SET_HDR(arr, stg_MUT_ARR_PTRS_DIRTY_info, W_[CCCS]); SET_HDR(arr, stg_MUT_ARR_PTRS_DIRTY_info, W_[CCCS]);
StgMutArrPtrs_ptrs(arr) = n; StgMutArrPtrs_ptrs(arr) = n;
StgMutArrPtrs_size(arr) = size;
// Initialise all elements of the the array with the value in R2 // Initialise all elements of the the array with the value in R2
init = R2; init = R2;
...@@ -154,6 +159,13 @@ stg_newArrayzh ...@@ -154,6 +159,13 @@ stg_newArrayzh
p = p + WDS(1); p = p + WDS(1);
goto for; goto for;
} }
// Initialise the mark bits with 0
for2:
if (p < arr + WDS(size)) {
W_[p] = 0;
p = p + WDS(1);
goto for2;
}
RET_P(arr); RET_P(arr);
} }
...@@ -165,7 +177,7 @@ stg_unsafeThawArrayzh ...@@ -165,7 +177,7 @@ stg_unsafeThawArrayzh
// A MUT_ARR_PTRS lives on the mutable list, but a MUT_ARR_PTRS_FROZEN // A MUT_ARR_PTRS lives on the mutable list, but a MUT_ARR_PTRS_FROZEN
// normally doesn't. However, when we freeze a MUT_ARR_PTRS, we leave // normally doesn't. However, when we freeze a MUT_ARR_PTRS, we leave
// it on the mutable list for the GC to remove (removing something from // it on the mutable list for the GC to remove (removing something from
// the mutable list is not easy, because the mut_list is only singly-linked). // the mutable list is not easy).
// //
// So that we can tell whether a MUT_ARR_PTRS_FROZEN is on the mutable list, // So that we can tell whether a MUT_ARR_PTRS_FROZEN is on the mutable list,
// when we freeze it we set the info ptr to be MUT_ARR_PTRS_FROZEN0 // when we freeze it we set the info ptr to be MUT_ARR_PTRS_FROZEN0
...@@ -1582,9 +1594,10 @@ stg_unpackClosurezh ...@@ -1582,9 +1594,10 @@ stg_unpackClosurezh
}} }}
out: out:
W_ ptrs_arr_sz, nptrs_arr_sz; W_ ptrs_arr_sz, ptrs_arr_cards, nptrs_arr_sz;
nptrs_arr_sz = SIZEOF_StgArrWords + WDS(nptrs); nptrs_arr_sz = SIZEOF_StgArrWords + WDS(nptrs);
ptrs_arr_sz = SIZEOF_StgMutArrPtrs + WDS(ptrs); ptrs_arr_cards = mutArrPtrsCardWords(ptrs);
ptrs_arr_sz = SIZEOF_StgMutArrPtrs + WDS(ptrs) + WDS(ptrs_arr_cards);
ALLOC_PRIM (ptrs_arr_sz + nptrs_arr_sz, R1_PTR, stg_unpackClosurezh); ALLOC_PRIM (ptrs_arr_sz + nptrs_arr_sz, R1_PTR, stg_unpackClosurezh);
...@@ -1596,6 +1609,8 @@ out: ...@@ -1596,6 +1609,8 @@ out:
SET_HDR(ptrs_arr, stg_MUT_ARR_PTRS_FROZEN_info, W_[CCCS]); SET_HDR(ptrs_arr, stg_MUT_ARR_PTRS_FROZEN_info, W_[CCCS]);
StgMutArrPtrs_ptrs(ptrs_arr) = ptrs; StgMutArrPtrs_ptrs(ptrs_arr) = ptrs;
StgMutArrPtrs_size(ptrs_arr) = ptrs + ptrs_arr_cards;
p = 0; p = 0;
for: for:
if(p < ptrs) { if(p < ptrs) {
...@@ -1603,6 +1618,9 @@ for: ...@@ -1603,6 +1618,9 @@ for:
p = p + 1; p = p + 1;
goto for; goto for;
} }
/* We can leave the card table uninitialised, since the array is
allocated in the nursery. The GC will fill it in if/when the array
is promoted. */
SET_HDR(nptrs_arr, stg_ARR_WORDS_info, W_[CCCS]); SET_HDR(nptrs_arr, stg_ARR_WORDS_info, W_[CCCS]);
StgArrWords_words(nptrs_arr) = nptrs; StgArrWords_words(nptrs_arr) = nptrs;
......
...@@ -76,7 +76,8 @@ scheduleFinalizers(Capability *cap, StgWeak *list) ...@@ -76,7 +76,8 @@ scheduleFinalizers(Capability *cap, StgWeak *list)
StgWeak *w; StgWeak *w;
StgTSO *t; StgTSO *t;
StgMutArrPtrs *arr; StgMutArrPtrs *arr;
nat n; StgWord size;
nat n, i;
running_finalizers = rtsTrue; running_finalizers = rtsTrue;
...@@ -120,10 +121,12 @@ scheduleFinalizers(Capability *cap, StgWeak *list) ...@@ -120,10 +121,12 @@ scheduleFinalizers(Capability *cap, StgWeak *list)
debugTrace(DEBUG_weak, "weak: batching %d finalizers", n); debugTrace(DEBUG_weak, "weak: batching %d finalizers", n);
arr = (StgMutArrPtrs *)allocate(cap, sizeofW(StgMutArrPtrs) + n); size = n + mutArrPtrsCardTableSize(n);
arr = (StgMutArrPtrs *)allocate(cap, sizeofW(StgMutArrPtrs) + size);
TICK_ALLOC_PRIM(sizeofW(StgMutArrPtrs), n, 0); TICK_ALLOC_PRIM(sizeofW(StgMutArrPtrs), n, 0);
SET_HDR(arr, &stg_MUT_ARR_PTRS_FROZEN_info, CCS_SYSTEM); SET_HDR(arr, &stg_MUT_ARR_PTRS_FROZEN_info, CCS_SYSTEM);
arr->ptrs = n; arr->ptrs = n;
arr->size = size;
n = 0; n = 0;
for (w = list; w; w = w->link) { for (w = list; w; w = w->link) {
...@@ -132,6 +135,10 @@ scheduleFinalizers(Capability *cap, StgWeak *list) ...@@ -132,6 +135,10 @@ scheduleFinalizers(Capability *cap, StgWeak *list)
n++; n++;
} }
} }
// set all the cards to 1
for (i = n; i < size; i++) {
arr->payload[i] = (StgClosure *)(W_)(-1);
}
t = createIOThread(cap, t = createIOThread(cap,
RtsFlags.GcFlags.initialStkSize, RtsFlags.GcFlags.initialStkSize,
......
...@@ -111,6 +111,81 @@ scavengeTSO (StgTSO *tso) ...@@ -111,6 +111,81 @@ scavengeTSO (StgTSO *tso)
gct->eager_promotion = saved_eager; gct->eager_promotion = saved_eager;
} }
/* -----------------------------------------------------------------------------
Mutable arrays of pointers
-------------------------------------------------------------------------- */
static StgPtr scavenge_mut_arr_ptrs (StgMutArrPtrs *a)
{
lnat m;
rtsBool any_failed;
StgPtr p, q;
any_failed = rtsFalse;
p = (StgPtr)&a->payload[0];
for (m = 0; (int)m < (int)mutArrPtrsCards(a->ptrs) - 1; m++)
{
q = p + (1 << MUT_ARR_PTRS_CARD_BITS);
for (; p < q; p++) {
evacuate((StgClosure**)p);
}
if (gct->failed_to_evac) {
any_failed = rtsTrue;
*mutArrPtrsCard(a,m) = 1;
gct->failed_to_evac = rtsFalse;
} else {
*mutArrPtrsCard(a,m) = 0;
}
}
q = (StgPtr)&a->payload[a->ptrs];
if (p < q) {
for (; p < q; p++) {
evacuate((StgClosure**)p);
}
if (gct->failed_to_evac) {
any_failed = rtsTrue;
*mutArrPtrsCard(a,m) = 1;
gct->failed_to_evac = rtsFalse;
} else {
*mutArrPtrsCard(a,m) = 0;
}
}
gct->failed_to_evac = any_failed;
return (StgPtr)a + mut_arr_ptrs_sizeW(a);
}
// scavenge only the marked areas of a MUT_ARR_PTRS
static StgPtr scavenge_mut_arr_ptrs_marked (StgMutArrPtrs *a)
{
lnat m;
StgPtr p, q;
rtsBool any_failed;
any_failed = rtsFalse;
for (m = 0; m < mutArrPtrsCards(a->ptrs); m++)
{
if (*mutArrPtrsCard(a,m) != 0) {
p = (StgPtr)&a->payload[m << MUT_ARR_PTRS_CARD_BITS];
q = stg_min(p + (1 << MUT_ARR_PTRS_CARD_BITS),
(StgPtr)&a->payload[a->ptrs]);
for (; p < q; p++) {
evacuate((StgClosure**)p);
}
if (gct->failed_to_evac) {
any_failed = rtsTrue;
gct->failed_to_evac = rtsFalse;
} else {
*mutArrPtrsCard(a,m) = 0;
}
}
}
gct->failed_to_evac = any_failed;
return (StgPtr)a + mut_arr_ptrs_sizeW(a);
}
/* ----------------------------------------------------------------------------- /* -----------------------------------------------------------------------------
Blocks of function args occur on the stack (at the top) and Blocks of function args occur on the stack (at the top) and
in PAPs. in PAPs.
...@@ -554,20 +629,14 @@ scavenge_block (bdescr *bd) ...@@ -554,20 +629,14 @@ scavenge_block (bdescr *bd)
case MUT_ARR_PTRS_CLEAN: case MUT_ARR_PTRS_CLEAN:
case MUT_ARR_PTRS_DIRTY: case MUT_ARR_PTRS_DIRTY:
// follow everything
{ {
StgPtr next; // We don't eagerly promote objects pointed to by a mutable
// array, but if we find the array only points to objects in
// the same or an older generation, we mark it "clean" and
// avoid traversing it during minor GCs.
gct->eager_promotion = rtsFalse;
// We don't eagerly promote objects pointed to by a mutable p = scavenge_mut_arr_ptrs((StgMutArrPtrs*)p);
// array, but if we find the array only points to objects in
// the same or an older generation, we mark it "clean" and
// avoid traversing it during minor GCs.
gct->eager_promotion = rtsFalse;
next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
evacuate((StgClosure **)p);
}
gct->eager_promotion = saved_eager_promotion;
if (gct->failed_to_evac) { if (gct->failed_to_evac) {
((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info; ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info;
...@@ -575,6 +644,7 @@ scavenge_block (bdescr *bd) ...@@ -575,6 +644,7 @@ scavenge_block (bdescr *bd)
((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info; ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info;
} }
gct->eager_promotion = saved_eager_promotion;
gct->failed_to_evac = rtsTrue; // always put it on the mutable list. gct->failed_to_evac = rtsTrue; // always put it on the mutable list.
break; break;
} }
...@@ -583,17 +653,12 @@ scavenge_block (bdescr *bd) ...@@ -583,17 +653,12 @@ scavenge_block (bdescr *bd)
case MUT_ARR_PTRS_FROZEN0: case MUT_ARR_PTRS_FROZEN0:
// follow everything // follow everything
{ {
StgPtr next; p = scavenge_mut_arr_ptrs((StgMutArrPtrs*)p);
next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
evacuate((StgClosure **)p);
}
// If we're going to put this object on the mutable list, then // If we're going to put this object on the mutable list, then
// set its info ptr to MUT_ARR_PTRS_FROZEN0 to indicate that. // set its info ptr to MUT_ARR_PTRS_FROZEN0 to indicate that.
if (gct->failed_to_evac) { if (gct->failed_to_evac) {
((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN0_info; ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN0_info;
} else { } else {
((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN_info; ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN_info;
} }
...@@ -922,7 +987,6 @@ scavenge_mark_stack(void) ...@@ -922,7 +987,6 @@ scavenge_mark_stack(void)
case MUT_ARR_PTRS_DIRTY: case MUT_ARR_PTRS_DIRTY:
// follow everything // follow everything
{ {
StgPtr next;
rtsBool saved_eager; rtsBool saved_eager;
// We don't eagerly promote objects pointed to by a mutable // We don't eagerly promote objects pointed to by a mutable
...@@ -931,18 +995,16 @@ scavenge_mark_stack(void) ...@@ -931,18 +995,16 @@ scavenge_mark_stack(void)
// avoid traversing it during minor GCs. // avoid traversing it during minor GCs.
saved_eager = gct->eager_promotion; saved_eager = gct->eager_promotion;
gct->eager_promotion = rtsFalse; gct->eager_promotion = rtsFalse;
next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
evacuate((StgClosure **)p);
}
gct->eager_promotion = saved_eager;
if (gct->failed_to_evac) { scavenge_mut_arr_ptrs((StgMutArrPtrs *)p);
((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info;
} else { if (gct->failed_to_evac) {
((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info; ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info;
} } else {
((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info;
}
gct->eager_promotion = saved_eager;
gct->failed_to_evac = rtsTrue; // mutable anyhow. gct->failed_to_evac = rtsTrue; // mutable anyhow.
break; break;
} }
...@@ -951,12 +1013,9 @@ scavenge_mark_stack(void) ...@@ -951,12 +1013,9 @@ scavenge_mark_stack(void)
case MUT_ARR_PTRS_FROZEN0: case MUT_ARR_PTRS_FROZEN0:
// follow everything // follow everything
{ {
StgPtr next, q = p; StgPtr q = p;
next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p); scavenge_mut_arr_ptrs((StgMutArrPtrs *)p);
for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
evacuate((StgClosure **)p);
}
// If we're going to put this object on the mutable list, then // If we're going to put this object on the mutable list, then
// set its info ptr to MUT_ARR_PTRS_FROZEN0 to indicate that. // set its info ptr to MUT_ARR_PTRS_FROZEN0 to indicate that.
...@@ -1196,7 +1255,6 @@ scavenge_one(StgPtr p) ...@@ -1196,7 +1255,6 @@ scavenge_one(StgPtr p)
case MUT_ARR_PTRS_CLEAN: case MUT_ARR_PTRS_CLEAN:
case MUT_ARR_PTRS_DIRTY: case MUT_ARR_PTRS_DIRTY:
{ {
StgPtr next, q;
rtsBool saved_eager; rtsBool saved_eager;
// We don't eagerly promote objects pointed to by a mutable // We don't eagerly promote objects pointed to by a mutable
...@@ -1205,19 +1263,16 @@ scavenge_one(StgPtr p) ...@@ -1205,19 +1263,16 @@ scavenge_one(StgPtr p)
// avoid traversing it during minor GCs. // avoid traversing it during minor GCs.
saved_eager = gct->eager_promotion; saved_eager = gct->eager_promotion;
gct->eager_promotion = rtsFalse; gct->eager_promotion = rtsFalse;
q = p;
next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p); scavenge_mut_arr_ptrs((StgMutArrPtrs *)p);
for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
evacuate((StgClosure **)p);
}
gct->eager_promotion = saved_eager;
if (gct->failed_to_evac) { if (gct->failed_to_evac) {
((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info; ((StgClosure *)p)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info;
} else { } else {
((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info; ((StgClosure *)p)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info;
} }
gct->eager_promotion = saved_eager;
gct->failed_to_evac = rtsTrue; gct->failed_to_evac = rtsTrue;
break; break;
} }
...@@ -1226,19 +1281,14 @@ scavenge_one(StgPtr p) ...@@ -1226,19 +1281,14 @@ scavenge_one(StgPtr p)
case MUT_ARR_PTRS_FROZEN0: case MUT_ARR_PTRS_FROZEN0:
{ {
// follow everything // follow everything
StgPtr next, q=p; scavenge_mut_arr_ptrs((StgMutArrPtrs *)p);
next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
evacuate((StgClosure **)p);
}
// If we're going to put this object on the mutable list, then // If we're going to put this object on the mutable list, then
// set its info ptr to MUT_ARR_PTRS_FROZEN0 to indicate that. // set its info ptr to MUT_ARR_PTRS_FROZEN0 to indicate that.
if (gct->failed_to_evac) { if (gct->failed_to_evac) {
((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN0_info; ((StgClosure *)p)->header.info = &stg_MUT_ARR_PTRS_FROZEN0_info;
} else { } else {
((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN_info; ((StgClosure *)p)->header.info = &stg_MUT_ARR_PTRS_FROZEN_info;
} }
break; break;
} }
...@@ -1412,13 +1462,32 @@ scavenge_mutable_list(bdescr *bd, generation *gen) ...@@ -1412,13 +1462,32 @@ scavenge_mutable_list(bdescr *bd, generation *gen)
// definitely doesn't point into a young generation. // definitely doesn't point into a young generation.
// Clean objects don't need to be scavenged. Some clean // Clean objects don't need to be scavenged. Some clean
// objects (MUT_VAR_CLEAN) are not kept on the mutable // objects (MUT_VAR_CLEAN) are not kept on the mutable
// list at all; others, such as MUT_ARR_PTRS_CLEAN // list at all; others, such as TSO
// are always on the mutable list. // are always on the mutable list.
// //
switch (get_itbl((StgClosure *)p)->type) { switch (get_itbl((StgClosure *)p)->type) {
case MUT_ARR_PTRS_CLEAN: case MUT_ARR_PTRS_CLEAN:
recordMutableGen_GC((StgClosure *)p,gen->no); recordMutableGen_GC((StgClosure *)p,gen->no);
continue; continue;
case MUT_ARR_PTRS_DIRTY:
{
rtsBool saved_eager;
saved_eager = gct->eager_promotion;
gct->eager_promotion = rtsFalse;