Commit 6d784c43 authored by Simon Marlow's avatar Simon Marlow

Add a write barrier for TVAR closures

This improves GC performance when there are a lot of TVars in the
heap.  For instance, a TChan with a lot of elements causes a massive
GC drag without this patch.

There's more to do - several other STM closure types don't have write
barriers, so GC performance when there are a lot of threads blocked on
STM isn't great.  But fixing the problem for TVar is a good start.
parent 97dc57c6
......@@ -61,23 +61,24 @@
#define BLACKHOLE 40
#define MVAR_CLEAN 41
#define MVAR_DIRTY 42
#define ARR_WORDS 43
#define MUT_ARR_PTRS_CLEAN 44
#define MUT_ARR_PTRS_DIRTY 45
#define MUT_ARR_PTRS_FROZEN0 46
#define MUT_ARR_PTRS_FROZEN 47
#define MUT_VAR_CLEAN 48
#define MUT_VAR_DIRTY 49
#define WEAK 50
#define PRIM 51
#define MUT_PRIM 52
#define TSO 53
#define STACK 54
#define TREC_CHUNK 55
#define ATOMICALLY_FRAME 56
#define CATCH_RETRY_FRAME 57
#define CATCH_STM_FRAME 58
#define WHITEHOLE 59
#define N_CLOSURE_TYPES 60
#define TVAR 43
#define ARR_WORDS 44
#define MUT_ARR_PTRS_CLEAN 45
#define MUT_ARR_PTRS_DIRTY 46
#define MUT_ARR_PTRS_FROZEN0 47
#define MUT_ARR_PTRS_FROZEN 48
#define MUT_VAR_CLEAN 49
#define MUT_VAR_DIRTY 50
#define WEAK 51
#define PRIM 52
#define MUT_PRIM 53
#define TSO 54
#define STACK 55
#define TREC_CHUNK 56
#define ATOMICALLY_FRAME 57
#define CATCH_RETRY_FRAME 58
#define CATCH_STM_FRAME 59
#define WHITEHOLE 60
#define N_CLOSURE_TYPES 61
#endif /* RTS_STORAGE_CLOSURETYPES_H */
......@@ -101,6 +101,8 @@ RTS_ENTRY(stg_DEAD_WEAK);
RTS_ENTRY(stg_STABLE_NAME);
RTS_ENTRY(stg_MVAR_CLEAN);
RTS_ENTRY(stg_MVAR_DIRTY);
RTS_ENTRY(stg_TVAR_CLEAN);
RTS_ENTRY(stg_TVAR_DIRTY);
RTS_ENTRY(stg_TSO);
RTS_ENTRY(stg_STACK);
RTS_ENTRY(stg_ARR_WORDS);
......@@ -130,7 +132,6 @@ RTS_ENTRY(stg_atomically);
RTS_ENTRY(stg_TVAR_WATCH_QUEUE);
RTS_ENTRY(stg_INVARIANT_CHECK_QUEUE);
RTS_ENTRY(stg_ATOMIC_INVARIANT);
RTS_ENTRY(stg_TVAR);
RTS_ENTRY(stg_TREC_CHUNK);
RTS_ENTRY(stg_TREC_HEADER);
RTS_ENTRY(stg_END_STM_WATCH_QUEUE);
......
......@@ -64,7 +64,8 @@ StgWord16 closure_flags[] = {
[BLOCKING_QUEUE] = ( _NS| _MUT|_UPT ),
[MVAR_CLEAN] = (_HNF| _NS| _MUT|_UPT ),
[MVAR_DIRTY] = (_HNF| _NS| _MUT|_UPT ),
[ARR_WORDS] = (_HNF| _NS| _UPT ),
[TVAR] = (_HNF| _NS| _MUT|_UPT ),
[ARR_WORDS] = (_HNF| _NS| _UPT ),
[MUT_ARR_PTRS_CLEAN] = (_HNF| _NS| _MUT|_UPT ),
[MUT_ARR_PTRS_DIRTY] = (_HNF| _NS| _MUT|_UPT ),
[MUT_ARR_PTRS_FROZEN0] = (_HNF| _NS| _MUT|_UPT ),
......@@ -83,6 +84,6 @@ StgWord16 closure_flags[] = {
[WHITEHOLE] = ( 0 )
};
#if N_CLOSURE_TYPES != 60
#if N_CLOSURE_TYPES != 61
#error Closure types changed: update ClosureFlags.c!
#endif
......@@ -475,7 +475,7 @@ retry_pop_stack:
W_ trec, outer;
W_ r;
trec = StgTSO_trec(CurrentTSO);
(r) = ccall stmValidateNestOfTransactions(trec "ptr");
(r) = ccall stmValidateNestOfTransactions(MyCapability() "ptr", trec "ptr");
outer = StgTRecHeader_enclosing_trec(trec);
ccall stmAbortTransaction(MyCapability() "ptr", trec "ptr");
ccall stmFreeAbortedTRec(MyCapability() "ptr", trec "ptr");
......
......@@ -63,6 +63,7 @@ processHeapClosureForDead( StgClosure *c )
case STACK:
case MVAR_CLEAN:
case MVAR_DIRTY:
case TVAR:
case MUT_ARR_PTRS_CLEAN:
case MUT_ARR_PTRS_DIRTY:
case MUT_ARR_PTRS_FROZEN:
......
......@@ -1085,6 +1085,7 @@ typedef struct _RtsSymbolVal {
SymI_HasProto(stg_deRefWeakzh) \
SymI_HasProto(stg_deRefStablePtrzh) \
SymI_HasProto(dirty_MUT_VAR) \
SymI_HasProto(dirty_TVAR) \
SymI_HasProto(stg_forkzh) \
SymI_HasProto(stg_forkOnzh) \
SymI_HasProto(forkProcess) \
......@@ -1219,6 +1220,8 @@ typedef struct _RtsSymbolVal {
SymI_HasProto(startTimer) \
SymI_HasProto(stg_MVAR_CLEAN_info) \
SymI_HasProto(stg_MVAR_DIRTY_info) \
SymI_HasProto(stg_TVAR_CLEAN_info) \
SymI_HasProto(stg_TVAR_DIRTY_info) \
SymI_HasProto(stg_IND_STATIC_info) \
SymI_HasProto(stg_ARR_WORDS_info) \
SymI_HasProto(stg_MUT_ARR_PTRS_DIRTY_info) \
......
......@@ -1065,7 +1065,7 @@ stg_newTVarzh (P_ init)
ALLOC_PRIM_P (SIZEOF_StgTVar, stg_newTVarzh, init);
tv = Hp - SIZEOF_StgTVar + WDS(1);
SET_HDR (tv, stg_TVAR_info, CCCS);
SET_HDR (tv, stg_TVAR_DIRTY_info, CCCS);
StgTVar_current_value(tv) = init;
StgTVar_first_watch_queue_entry(tv) = stg_END_STM_WATCH_QUEUE_closure;
......
......@@ -162,6 +162,12 @@ printClosure( StgClosure *obj )
printStdObjPayload(obj);
break;
case MUT_PRIM:
debugBelch("MUT_PRIM(");
printPtr((StgPtr)obj->header.info);
printStdObjPayload(obj);
break;
case THUNK:
case THUNK_1_0: case THUNK_0_1:
case THUNK_1_1: case THUNK_0_2: case THUNK_2_0:
......@@ -324,6 +330,13 @@ printClosure( StgClosure *obj )
break;
}
case TVAR:
{
StgTVar* tv = (StgTVar*)obj;
debugBelch("TVAR(value=%p, wq=%p, num_updates=%" FMT_Word ")\n", tv->current_value, tv->first_watch_queue_entry, tv->num_updates);
break;
}
case MUT_VAR_CLEAN:
{
StgMutVar* mv = (StgMutVar*)obj;
......@@ -1089,6 +1102,7 @@ char *closure_type_names[] = {
[BLACKHOLE] = "BLACKHOLE",
[MVAR_CLEAN] = "MVAR_CLEAN",
[MVAR_DIRTY] = "MVAR_DIRTY",
[TVAR] = "TVAR",
[ARR_WORDS] = "ARR_WORDS",
[MUT_ARR_PTRS_CLEAN] = "MUT_ARR_PTRS_CLEAN",
[MUT_ARR_PTRS_DIRTY] = "MUT_ARR_PTRS_DIRTY",
......
......@@ -988,7 +988,8 @@ heapCensusChain( Census *census, bdescr *bd )
case MVAR_CLEAN:
case MVAR_DIRTY:
case WEAK:
case TVAR:
case WEAK:
case PRIM:
case MUT_PRIM:
case MUT_VAR_CLEAN:
......
......@@ -505,6 +505,7 @@ push( StgClosure *c, retainer c_child_r, StgClosure **first_child )
break;
// layout.payload.ptrs, no SRT
case TVAR:
case CONSTR:
case PRIM:
case MUT_PRIM:
......@@ -844,7 +845,8 @@ pop( StgClosure **c, StgClosure **cp, retainer *r )
return;
}
case CONSTR:
case TVAR:
case CONSTR:
case PRIM:
case MUT_PRIM:
case BCO:
......@@ -1009,6 +1011,7 @@ isRetainer( StgClosure *c )
case MUT_PRIM:
case MVAR_CLEAN:
case MVAR_DIRTY:
case TVAR:
case MUT_VAR_CLEAN:
case MUT_VAR_DIRTY:
case MUT_ARR_PTRS_CLEAN:
......
......@@ -91,6 +91,7 @@
#include "STM.h"
#include "Trace.h"
#include "Threads.h"
#include "sm/Storage.h"
#include <stdio.h>
......@@ -195,13 +196,15 @@ static StgClosure *lock_tvar(StgTRecHeader *trec STG_UNUSED,
return result;
}
static void unlock_tvar(StgTRecHeader *trec STG_UNUSED,
StgTVar *s STG_UNUSED,
static void unlock_tvar(Capability *cap,
StgTRecHeader *trec STG_UNUSED,
StgTVar *s,
StgClosure *c,
StgBool force_update) {
TRACE("%p : unlock_tvar(%p)", trec, s);
if (force_update) {
s -> current_value = c;
dirty_TVAR(cap,s);
}
}
......@@ -252,14 +255,16 @@ static StgClosure *lock_tvar(StgTRecHeader *trec STG_UNUSED,
return result;
}
static void *unlock_tvar(StgTRecHeader *trec STG_UNUSED,
StgTVar *s STG_UNUSED,
static void *unlock_tvar(Capability *cap,
StgTRecHeader *trec STG_UNUSED,
StgTVar *s,
StgClosure *c,
StgBool force_update) {
TRACE("%p : unlock_tvar(%p, %p)", trec, s, c);
ASSERT (smp_locked == trec);
if (force_update) {
s -> current_value = c;
dirty_TVAR(cap,s);
}
}
......@@ -311,13 +316,15 @@ static StgClosure *lock_tvar(StgTRecHeader *trec,
return result;
}
static void unlock_tvar(StgTRecHeader *trec STG_UNUSED,
static void unlock_tvar(Capability *cap,
StgTRecHeader *trec STG_UNUSED,
StgTVar *s,
StgClosure *c,
StgBool force_update STG_UNUSED) {
TRACE("%p : unlock_tvar(%p, %p)", trec, s, c);
ASSERT(s -> current_value == (StgClosure *)trec);
s -> current_value = c;
dirty_TVAR(cap,s);
}
static StgBool cond_lock_tvar(StgTRecHeader *trec,
......@@ -585,6 +592,7 @@ static void build_watch_queue_entries_for_trec(Capability *cap,
}
s -> first_watch_queue_entry = q;
e -> new_value = (StgClosure *) q;
dirty_TVAR(cap,s); // we modified first_watch_queue_entry
});
}
......@@ -621,9 +629,10 @@ static void remove_watch_queue_entries_for_trec(Capability *cap,
} else {
ASSERT (s -> first_watch_queue_entry == q);
s -> first_watch_queue_entry = nq;
dirty_TVAR(cap,s); // we modified first_watch_queue_entry
}
free_stg_tvar_watch_queue(cap, q);
unlock_tvar(trec, s, saw, FALSE);
unlock_tvar(cap, trec, s, saw, FALSE);
});
}
......@@ -758,7 +767,8 @@ static StgBool tvar_is_locked(StgTVar *s, StgTRecHeader *h) {
// the TVars involved. "revert_all" is not set in commit operations
// where we don't lock TVars that have been read from but not updated.
static void revert_ownership(StgTRecHeader *trec STG_UNUSED,
static void revert_ownership(Capability *cap STG_UNUSED,
StgTRecHeader *trec STG_UNUSED,
StgBool revert_all STG_UNUSED) {
#if defined(STM_FG_LOCKS)
FOR_EACH_ENTRY(trec, e, {
......@@ -766,7 +776,7 @@ static void revert_ownership(StgTRecHeader *trec STG_UNUSED,
StgTVar *s;
s = e -> tvar;
if (tvar_is_locked(s, trec)) {
unlock_tvar(trec, s, e -> expected_value, TRUE);
unlock_tvar(cap, trec, s, e -> expected_value, TRUE);
}
}
});
......@@ -788,7 +798,8 @@ static void revert_ownership(StgTRecHeader *trec STG_UNUSED,
// to ensure that an atomic snapshot of all of these locations has been
// seen.
static StgBool validate_and_acquire_ownership (StgTRecHeader *trec,
static StgBool validate_and_acquire_ownership (Capability *cap,
StgTRecHeader *trec,
int acquire_all,
int retain_ownership) {
StgBool result;
......@@ -836,7 +847,7 @@ static StgBool validate_and_acquire_ownership (StgTRecHeader *trec,
}
if ((!result) || (!retain_ownership)) {
revert_ownership(trec, acquire_all);
revert_ownership(cap, trec, acquire_all);
}
return result;
......@@ -1020,7 +1031,7 @@ void stmCondemnTransaction(Capability *cap,
/*......................................................................*/
StgBool stmValidateNestOfTransactions(StgTRecHeader *trec) {
StgBool stmValidateNestOfTransactions(Capability *cap, StgTRecHeader *trec) {
StgTRecHeader *t;
StgBool result;
......@@ -1035,7 +1046,7 @@ StgBool stmValidateNestOfTransactions(StgTRecHeader *trec) {
t = trec;
result = TRUE;
while (t != NO_TREC) {
result &= validate_and_acquire_ownership(t, TRUE, FALSE);
result &= validate_and_acquire_ownership(cap, t, TRUE, FALSE);
t = t -> enclosing_trec;
}
......@@ -1107,7 +1118,8 @@ static void disconnect_invariant(Capability *cap,
} else {
ASSERT (s -> first_watch_queue_entry == q);
s -> first_watch_queue_entry = nq;
}
dirty_TVAR(cap,s); // we modified first_watch_queue_entry
}
TRACE(" found it in watch queue entry %p", q);
free_stg_tvar_watch_queue(cap, q);
DEBUG_ONLY( found = TRUE );
......@@ -1147,6 +1159,7 @@ static void connect_invariant_to_trec(Capability *cap,
fq -> prev_queue_entry = q;
}
s -> first_watch_queue_entry = q;
dirty_TVAR(cap,s); // we modified first_watch_queue_entry
});
inv -> last_execution = my_execution;
......@@ -1248,7 +1261,7 @@ StgInvariantCheckQueue *stmGetInvariantsToCheck(Capability *cap, StgTRecHeader *
}
}
unlock_tvar(trec, s, old, FALSE);
unlock_tvar(cap, trec, s, old, FALSE);
}
}
c = c -> prev_chunk;
......@@ -1337,7 +1350,7 @@ StgBool stmCommitTransaction(Capability *cap, StgTRecHeader *trec) {
use_read_phase = ((config_use_read_phase) && (!touched_invariants));
result = validate_and_acquire_ownership(trec, (!use_read_phase), TRUE);
result = validate_and_acquire_ownership(cap, trec, (!use_read_phase), TRUE);
if (result) {
// We now know that all the updated locations hold their expected values.
ASSERT (trec -> state == TREC_ACTIVE);
......@@ -1397,12 +1410,12 @@ StgBool stmCommitTransaction(Capability *cap, StgTRecHeader *trec) {
IF_STM_FG_LOCKS({
s -> num_updates ++;
});
unlock_tvar(trec, s, e -> new_value, TRUE);
unlock_tvar(cap, trec, s, e -> new_value, TRUE);
}
ACQ_ASSERT(!tvar_is_locked(s, trec));
});
} else {
revert_ownership(trec, FALSE);
revert_ownership(cap, trec, FALSE);
}
}
......@@ -1427,7 +1440,7 @@ StgBool stmCommitNestedTransaction(Capability *cap, StgTRecHeader *trec) {
lock_stm(trec);
et = trec -> enclosing_trec;
result = validate_and_acquire_ownership(trec, (!config_use_read_phase), TRUE);
result = validate_and_acquire_ownership(cap, trec, (!config_use_read_phase), TRUE);
if (result) {
// We now know that all the updated locations hold their expected values.
......@@ -1448,13 +1461,13 @@ StgBool stmCommitNestedTransaction(Capability *cap, StgTRecHeader *trec) {
StgTVar *s;
s = e -> tvar;
if (entry_is_update(e)) {
unlock_tvar(trec, s, e -> expected_value, FALSE);
unlock_tvar(cap, trec, s, e -> expected_value, FALSE);
}
merge_update_into(cap, et, s, e -> expected_value, e -> new_value);
ACQ_ASSERT(s -> current_value != (StgClosure *)trec);
});
} else {
revert_ownership(trec, FALSE);
revert_ownership(cap, trec, FALSE);
}
}
......@@ -1478,7 +1491,7 @@ StgBool stmWait(Capability *cap, StgTSO *tso, StgTRecHeader *trec) {
(trec -> state == TREC_CONDEMNED));
lock_stm(trec);
result = validate_and_acquire_ownership(trec, TRUE, TRUE);
result = validate_and_acquire_ownership(cap, trec, TRUE, TRUE);
if (result) {
// The transaction is valid so far so we can actually start waiting.
// (Otherwise the transaction was not valid and the thread will have to
......@@ -1510,8 +1523,8 @@ StgBool stmWait(Capability *cap, StgTSO *tso, StgTRecHeader *trec) {
void
stmWaitUnlock(Capability *cap STG_UNUSED, StgTRecHeader *trec) {
revert_ownership(trec, TRUE);
stmWaitUnlock(Capability *cap, StgTRecHeader *trec) {
revert_ownership(cap, trec, TRUE);
unlock_stm(trec);
}
......@@ -1528,14 +1541,14 @@ StgBool stmReWait(Capability *cap, StgTSO *tso) {
(trec -> state == TREC_CONDEMNED));
lock_stm(trec);
result = validate_and_acquire_ownership(trec, TRUE, TRUE);
result = validate_and_acquire_ownership(cap, trec, TRUE, TRUE);
TRACE("%p : validation %s", trec, result ? "succeeded" : "failed");
if (result) {
// The transaction remains valid -- do nothing because it is already on
// the wait queues
ASSERT (trec -> state == TREC_WAITING);
park_tso(tso);
revert_ownership(trec, TRUE);
revert_ownership(cap, trec, TRUE);
} else {
// The transcation has become invalid. We can now remove it from the wait
// queues.
......
......@@ -97,7 +97,7 @@ void stmCondemnTransaction(Capability *cap, StgTRecHeader *trec);
threads at GC (in case they are stuck looping)
*/
StgBool stmValidateNestOfTransactions(StgTRecHeader *trec);
StgBool stmValidateNestOfTransactions(Capability *cap, StgTRecHeader *trec);
/*----------------------------------------------------------------------
......
......@@ -1056,7 +1056,7 @@ schedulePostRunThread (Capability *cap, StgTSO *t)
// and a is never equal to b given a consistent view of memory.
//
if (t -> trec != NO_TREC && t -> why_blocked == NotBlocked) {
if (!stmValidateNestOfTransactions (t -> trec)) {
if (!stmValidateNestOfTransactions(cap, t -> trec)) {
debugTrace(DEBUG_sched | DEBUG_stm,
"trec %p found wasting its time", t);
......
......@@ -474,8 +474,11 @@ INFO_TABLE(stg_MVAR_DIRTY,3,0,MVAR_DIRTY,"MVAR","MVAR")
STM
-------------------------------------------------------------------------- */
INFO_TABLE(stg_TVAR, 2, 1, MUT_PRIM, "TVAR", "TVAR")
{ foreign "C" barf("TVAR object entered!") never returns; }
INFO_TABLE(stg_TVAR_CLEAN, 2, 1, TVAR, "TVAR", "TVAR")
{ foreign "C" barf("TVAR_CLEAN object entered!") never returns; }
INFO_TABLE(stg_TVAR_DIRTY, 2, 1, TVAR, "TVAR", "TVAR")
{ foreign "C" barf("TVAR_DIRTY object entered!") never returns; }
INFO_TABLE(stg_TVAR_WATCH_QUEUE, 3, 0, MUT_PRIM, "TVAR_WATCH_QUEUE", "TVAR_WATCH_QUEUE")
{ foreign "C" barf("TVAR_WATCH_QUEUE object entered!") never returns; }
......
......@@ -603,6 +603,7 @@ thread_obj (StgInfoTable *info, StgPtr p)
case MUT_PRIM:
case MUT_VAR_CLEAN:
case MUT_VAR_DIRTY:
case TVAR:
case BLACKHOLE:
case BLOCKING_QUEUE:
{
......
......@@ -540,13 +540,6 @@ loop:
case WHITEHOLE:
goto loop;
case MUT_VAR_CLEAN:
case MUT_VAR_DIRTY:
case MVAR_CLEAN:
case MVAR_DIRTY:
copy(p,info,q,sizeW_fromITBL(INFO_PTR_TO_STRUCT(info)),gen_no);
return;
// For ints and chars of low value, save space by replacing references to
// these with closures with references to common, shared ones in the RTS.
//
......@@ -646,6 +639,11 @@ loop:
goto loop;
}
case MUT_VAR_CLEAN:
case MUT_VAR_DIRTY:
case MVAR_CLEAN:
case MVAR_DIRTY:
case TVAR:
case BLOCKING_QUEUE:
case WEAK:
case PRIM:
......
......@@ -109,6 +109,12 @@ static W_ g0_pcnt_kept = 30; // percentage of g0 live at last minor GC
nat mutlist_MUTVARS,
mutlist_MUTARRS,
mutlist_MVARS,
mutlist_TVAR,
mutlist_TVAR_WATCH_QUEUE,
mutlist_TREC_CHUNK,
mutlist_TREC_HEADER,
mutlist_ATOMIC_INVARIANT,
mutlist_INVARIANT_CHECK_QUEUE,
mutlist_OTHERS;
#endif
......@@ -218,6 +224,13 @@ GarbageCollect (nat collect_gen,
#ifdef DEBUG
mutlist_MUTVARS = 0;
mutlist_MUTARRS = 0;
mutlist_MVARS = 0;
mutlist_TVAR = 0;
mutlist_TVAR_WATCH_QUEUE = 0;
mutlist_TREC_CHUNK = 0;
mutlist_TREC_HEADER = 0;
mutlist_ATOMIC_INVARIANT = 0;
mutlist_INVARIANT_CHECK_QUEUE = 0;
mutlist_OTHERS = 0;
#endif
......@@ -499,9 +512,14 @@ GarbageCollect (nat collect_gen,
copied += mut_list_size;
debugTrace(DEBUG_gc,
"mut_list_size: %lu (%d vars, %d arrays, %d MVARs, %d others)",
"mut_list_size: %lu (%d vars, %d arrays, %d MVARs, %d TVARs, %d TVAR_WATCH_QUEUEs, %d TREC_CHUNKs, %d TREC_HEADERs, %d ATOMIC_INVARIANTs, %d INVARIANT_CHECK_QUEUEs, %d others)",
(unsigned long)(mut_list_size * sizeof(W_)),
mutlist_MUTVARS, mutlist_MUTARRS, mutlist_MVARS, mutlist_OTHERS);
mutlist_MUTVARS, mutlist_MUTARRS, mutlist_MVARS,
mutlist_TVAR, mutlist_TVAR_WATCH_QUEUE,
mutlist_TREC_CHUNK, mutlist_TREC_HEADER,
mutlist_ATOMIC_INVARIANT,
mutlist_INVARIANT_CHECK_QUEUE,
mutlist_OTHERS);
}
bdescr *next, *prev;
......
......@@ -37,7 +37,13 @@ extern long copied;
extern rtsBool work_stealing;
#ifdef DEBUG
extern nat mutlist_MUTVARS, mutlist_MUTARRS, mutlist_MVARS, mutlist_OTHERS;
extern nat mutlist_MUTVARS, mutlist_MUTARRS, mutlist_MVARS, mutlist_OTHERS,
mutlist_TVAR,
mutlist_TVAR_WATCH_QUEUE,
mutlist_TREC_CHUNK,
mutlist_TREC_HEADER,
mutlist_ATOMIC_INVARIANT,
mutlist_INVARIANT_CHECK_QUEUE;
#endif
#if defined(PROF_SPIN) && defined(THREADED_RTS)
......
......@@ -282,6 +282,7 @@ checkClosure( StgClosure* p )
case MUT_PRIM:
case MUT_VAR_CLEAN:
case MUT_VAR_DIRTY:
case TVAR:
case CONSTR_STATIC:
case CONSTR_NOCAF_STATIC:
case THUNK_STATIC:
......
......@@ -424,6 +424,23 @@ scavenge_block (bdescr *bd)
break;
}
case TVAR:
{
StgTVar *tvar = ((StgTVar *)p);
gct->eager_promotion = rtsFalse;
evacuate((StgClosure **)&tvar->current_value);
evacuate((StgClosure **)&tvar->first_watch_queue_entry);
gct->eager_promotion = saved_eager_promotion;
if (gct->failed_to_evac) {
tvar->header.info = &stg_TVAR_DIRTY_info;
} else {
tvar->header.info = &stg_TVAR_CLEAN_info;
}
p += sizeofW(StgTVar);
break;
}
case FUN_2_0:
scavenge_fun_srt(info);
evacuate(&((StgClosure *)p)->payload[1]);
......@@ -783,6 +800,22 @@ scavenge_mark_stack(void)
break;
}
case TVAR:
{
StgTVar *tvar = ((StgTVar *)p);
gct->eager_promotion = rtsFalse;
evacuate((StgClosure **)&tvar->current_value);
evacuate((StgClosure **)&tvar->first_watch_queue_entry);
gct->eager_promotion = saved_eager_promotion;
if (gct->failed_to_evac) {
tvar->header.info = &stg_TVAR_DIRTY_info;
} else {
tvar->header.info = &stg_TVAR_CLEAN_info;
}
break;
}
case FUN_2_0:
scavenge_fun_srt(info);
evacuate(&((StgClosure *)p)->payload[1]);
......@@ -1088,6 +1121,22 @@ scavenge_one(StgPtr p)
break;
}
case TVAR:
{
StgTVar *tvar = ((StgTVar *)p);
gct->eager_promotion = rtsFalse;
evacuate((StgClosure **)&tvar->current_value);
evacuate((StgClosure **)&tvar->first_watch_queue_entry);
gct->eager_promotion = saved_eager_promotion;
if (gct->failed_to_evac) {
tvar->header.info = &stg_TVAR_DIRTY_info;
} else {
tvar->header.info = &stg_TVAR_CLEAN_info;
}
break;
}
case THUNK:
case THUNK_1_0:
case THUNK_0_1:
......@@ -1363,10 +1412,26 @@ scavenge_mutable_list(bdescr *bd, generation *gen)
case MVAR_CLEAN:
barf("MVAR_CLEAN on mutable list");
case MVAR_DIRTY:
mutlist_MVARS++; break;
default:
mutlist_OTHERS++; break;
}
mutlist_MVARS++; break;
case TVAR:
mutlist_TVAR++; break;
case TREC_CHUNK:
mutlist_TREC_CHUNK++; break;
case MUT_PRIM:
if (((StgClosure*)p)->header.info == &stg_TVAR_WATCH_QUEUE_info)
mutlist_TVAR_WATCH_QUEUE++;
else if (((StgClosure*)p)->header.info == &stg_TREC_HEADER_info)
mutlist_TREC_HEADER++;
else if (((StgClosure*)p)->header.info == &stg_ATOMIC_INVARIANT_info)
mutlist_ATOMIC_INVARIANT++;
else if (((StgClosure*)p)->header.info == &stg_INVARIANT_CHECK_QUEUE_info)
mutlist_INVARIANT_CHECK_QUEUE++;
else
mutlist_OTHERS++;
break;
default:
mutlist_OTHERS++; break;
}
#endif
// Check whether this object is "clean", that is it
......
......@@ -845,6 +845,15 @@ dirty_MUT_VAR(StgRegTable *reg, StgClosure *p)
}
}
void
dirty_TVAR(Capability *cap, StgTVar *p)
{
if (p->header.info == &stg_TVAR_CLEAN_info) {
p->header.info = &stg_TVAR_DIRTY_info;
recordClosureMutated(cap,(StgClosure*)p);
}
}
// Setting a TSO's link field with a write barrier.
// It is *not* necessary to call this function when
// * setting the link field to END_TSO_QUEUE
......
......@@ -69,10 +69,11 @@ extern Mutex sm_mutex;
#endif
/* -----------------------------------------------------------------------------
The write barrier for MVARs
The write barrier for MVARs and TVARs
-------------------------------------------------------------------------- */
void dirty_MVAR(StgRegTable *reg, StgClosure *p);
void dirty_TVAR(Capability *cap, StgTVar *p);
/* -----------------------------------------------------------------------------
Nursery manipulation
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment