Commit a122d4fd authored by Ben Gamari's avatar Ben Gamari Committed by Ben Gamari

rts: Rip out support for STM invariants

This feature has some very serious correctness issues (#14310),
introduces a great deal of complexity, and hasn't seen wide usage.
Consequently we are removing it, as proposed in Proposal #77 [1]. This
is heavily based on a patch from fryguybob.

Updates stm submodule.

[1] https://github.com/ghc-proposals/ghc-proposals/pull/77

Test Plan: Validate

Reviewers: erikd, simonmar, hvr

Reviewed By: simonmar

Subscribers: rwbarton, thomie, carter

GHC Trac Issues: #14310

Differential Revision: https://phabricator.haskell.org/D4760
parent 9ea45963
......@@ -2412,13 +2412,6 @@ primop CatchSTMOp "catchSTM#" GenPrimOp
out_of_line = True
has_side_effects = True
primop Check "check#" GenPrimOp
(State# RealWorld -> (# State# RealWorld, a #) )
-> (State# RealWorld -> State# RealWorld)
with
out_of_line = True
has_side_effects = True
primop NewTVarOp "newTVar#" GenPrimOp
a
-> State# s -> (# State# s, TVar# s a #)
......
......@@ -135,6 +135,11 @@ Runtime system
- The runtime now allows use of the :rts-flag:`-hT` profiling variety on
programs built with :ghc-flag:`-prof`.
- The STM assertions mechanism (namely the ``always`` and ``alwaysSucceeds``
functions) has been removed. This happened a bit earlier than proposed in the
deprecation pragma included in GHC 8.4, but due to community feedback we
decided to move ahead with the early removal.
Template Haskell
~~~~~~~~~~~~~~~~
......
......@@ -799,7 +799,6 @@
#define NO_TREC stg_NO_TREC_closure
#define END_TSO_QUEUE stg_END_TSO_QUEUE_closure
#define STM_AWOKEN stg_STM_AWOKEN_closure
#define END_INVARIANT_CHECK_QUEUE stg_END_INVARIANT_CHECK_QUEUE_closure
#define recordMutableCap(p, gen) \
W_ __bd; \
......
......@@ -308,7 +308,7 @@ typedef struct StgTRecHeader_ StgTRecHeader;
typedef struct StgTVarWatchQueue_ {
StgHeader header;
StgClosure *closure; // StgTSO or StgAtomicInvariant
StgClosure *closure; // StgTSO
struct StgTVarWatchQueue_ *next_queue_entry;
struct StgTVarWatchQueue_ *prev_queue_entry;
} StgTVarWatchQueue;
......@@ -320,13 +320,6 @@ typedef struct {
StgInt volatile num_updates;
} StgTVar;
typedef struct {
StgHeader header;
StgClosure *code;
StgTRecHeader *last_execution;
StgWord lock;
} StgAtomicInvariant;
/* new_value == expected_value for read-only accesses */
/* new_value is a StgTVarWatchQueue entry when trec in state TREC_WAITING */
typedef struct {
......@@ -355,25 +348,16 @@ typedef enum {
TREC_WAITING, /* Transaction currently waiting */
} TRecState;
typedef struct StgInvariantCheckQueue_ {
StgHeader header;
StgAtomicInvariant *invariant;
StgTRecHeader *my_execution;
struct StgInvariantCheckQueue_ *next_queue_entry;
} StgInvariantCheckQueue;
struct StgTRecHeader_ {
StgHeader header;
struct StgTRecHeader_ *enclosing_trec;
StgTRecChunk *current_chunk;
StgInvariantCheckQueue *invariants_to_check;
TRecState state;
};
typedef struct {
StgHeader header;
StgClosure *code;
StgTVarWatchQueue *next_invariant_to_check;
StgClosure *result;
} StgAtomicallyFrame;
......
......@@ -143,12 +143,9 @@ RTS_ENTRY(stg_raise);
RTS_ENTRY(stg_raise_ret);
RTS_ENTRY(stg_atomically);
RTS_ENTRY(stg_TVAR_WATCH_QUEUE);
RTS_ENTRY(stg_INVARIANT_CHECK_QUEUE);
RTS_ENTRY(stg_ATOMIC_INVARIANT);
RTS_ENTRY(stg_TREC_CHUNK);
RTS_ENTRY(stg_TREC_HEADER);
RTS_ENTRY(stg_END_STM_WATCH_QUEUE);
RTS_ENTRY(stg_END_INVARIANT_CHECK_QUEUE);
RTS_ENTRY(stg_END_STM_CHUNK_LIST);
RTS_ENTRY(stg_NO_TREC);
RTS_ENTRY(stg_COMPACT_NFDATA_CLEAN);
......@@ -179,7 +176,6 @@ RTS_CLOSURE(stg_dummy_ret_closure);
RTS_CLOSURE(stg_forceIO_closure);
RTS_CLOSURE(stg_END_STM_WATCH_QUEUE_closure);
RTS_CLOSURE(stg_END_INVARIANT_CHECK_QUEUE_closure);
RTS_CLOSURE(stg_END_STM_CHUNK_LIST_closure);
RTS_CLOSURE(stg_NO_TREC_closure);
......@@ -471,7 +467,6 @@ RTS_FUN_DECL(stg_newTVarzh);
RTS_FUN_DECL(stg_readTVarzh);
RTS_FUN_DECL(stg_readTVarIOzh);
RTS_FUN_DECL(stg_writeTVarzh);
RTS_FUN_DECL(stg_checkzh);
RTS_FUN_DECL(stg_unpackClosurezh);
RTS_FUN_DECL(stg_getApStackValzh);
......
......@@ -74,8 +74,6 @@ module GHC.Conc
, orElse
, throwSTM
, catchSTM
, alwaysSucceeds
, always
, TVar(..)
, newTVar
, newTVarIO
......
......@@ -74,8 +74,6 @@ module GHC.Conc.Sync
, orElse
, throwSTM
, catchSTM
, alwaysSucceeds
, always
, TVar(..)
, newTVar
, newTVarIO
......@@ -777,43 +775,6 @@ catchSTM (STM m) handler = STM $ catchSTM# m handler'
Just e' -> unSTM (handler e')
Nothing -> raiseIO# e
-- Invariant checking has been removed. See #14324 and
-- https://github.com/ghc-proposals/ghc-proposals/blob/master/proposals/0011-deprecate-stm-invariants.rst
{-# DEPRECATED checkInv, always, alwaysSucceeds
[ "The STM invariant-checking mechanism is deprecated in GHC 8.4"
, "and will be removed in GHC 8.10. See "
, "<https://github.com/ghc-proposals/ghc-proposals/blob/master/proposals/0011-deprecate-stm-invariants.rst>."
, ""
, "Existing users are encouraged to encapsulate their STM"
, "operations in safe abstractions which can perform the invariant"
, "checking without help from the runtime system."
] #-}
-- | Low-level primitive on which 'always' and 'alwaysSucceeds' are built.
-- 'checkInv' differs from these in that,
--
-- 1. the invariant is not checked when 'checkInv' is called, only at the end of
-- this and subsequent transactions
-- 2. the invariant failure is indicated by raising an exception.
checkInv :: STM a -> STM ()
checkInv (STM m) = STM (\s -> case (check# m) s of s' -> (# s', () #))
-- | 'alwaysSucceeds' adds a new invariant that must be true when passed
-- to 'alwaysSucceeds', at the end of the current transaction, and at
-- the end of every subsequent transaction. If it fails at any
-- of those points then the transaction violating it is aborted
-- and the exception raised by the invariant is propagated.
alwaysSucceeds :: STM a -> STM ()
alwaysSucceeds i = do ( i >> retry ) `orElse` ( return () )
checkInv i
-- | 'always' is a variant of 'alwaysSucceeds' in which the invariant is
-- expressed as an @STM Bool@ action that must return @True@. Returning
-- @False@ or raising an exception are both treated as invariant failures.
always :: STM Bool -> STM ()
always i = alwaysSucceeds ( do v <- i
if (v) then return () else ( errorWithoutStackTrace "Transactional invariant violation" ) )
-- |Shared memory locations that support atomic memory transactions.
data TVar a = TVar (TVar# RealWorld a)
......
......@@ -3,6 +3,17 @@
## 4.12.0.0 *TBA*
* Bundled with GHC *TBA*
* The STM invariant-checking mechanism (`always` and `alwaysSucceeds`), which
was deprecated in GHC 8.4, has been removed (as proposed in
<https://github.com/ghc-proposals/ghc-proposals/blob/master/proposals/0011-deprecate-stm-invariants.rst>).
This is a bit earlier than proposed in the deprecation pragma included in
GHC 8.4, but due to community feedback we decided to move ahead with the
early removal.
Existing users are encouraged to encapsulate their STM operations in safe
abstractions which can perform the invariant checking without help from the
runtime system.
* Add a new module `GHC.ResponseFile` (previously defined in the `haddock`
package). (#13896)
......
Subproject commit 33a36c33de150f562a98803e2fc332f07bb29457
Subproject commit 8c4d0fabb15ad00beb1e15d027825c78b2c39881
......@@ -298,7 +298,6 @@ initCapability (Capability *cap, uint32_t i)
cap->weak_ptr_list_hd = NULL;
cap->weak_ptr_list_tl = NULL;
cap->free_tvar_watch_queues = END_STM_WATCH_QUEUE;
cap->free_invariant_check_queues = END_INVARIANT_CHECK_QUEUE;
cap->free_trec_chunks = END_STM_CHUNK_LIST;
cap->free_trec_headers = NO_TREC;
cap->transaction_tokens = 0;
......
......@@ -154,7 +154,6 @@ struct Capability_ {
// Per-capability STM-related data
StgTVarWatchQueue *free_tvar_watch_queues;
StgInvariantCheckQueue *free_invariant_check_queues;
StgTRecChunk *free_trec_chunks;
StgTRecHeader *free_trec_headers;
uint32_t transaction_tokens;
......
......@@ -489,11 +489,6 @@ retry_pop_stack:
ccall stmAbortTransaction(MyCapability() "ptr", trec "ptr");
ccall stmFreeAbortedTRec(MyCapability() "ptr", trec "ptr");
if (outer != NO_TREC) {
ccall stmAbortTransaction(MyCapability() "ptr", outer "ptr");
ccall stmFreeAbortedTRec(MyCapability() "ptr", outer "ptr");
}
StgTSO_trec(CurrentTSO) = NO_TREC;
if (r != 0) {
// Transaction was valid: continue searching for a catch frame
......
......@@ -1057,11 +1057,10 @@ INFO_TABLE_RET(stg_catch_retry_frame, CATCH_RETRY_FRAME,
// Atomically frame ------------------------------------------------------------
// This must match StgAtomicallyFrame in Closures.h
#define ATOMICALLY_FRAME_FIELDS(w_,p_,info_ptr,p1,p2,code,next,result) \
#define ATOMICALLY_FRAME_FIELDS(w_,p_,info_ptr,p1,p2,code,result) \
w_ info_ptr, \
PROF_HDR_FIELDS(w_,p1,p2) \
p_ code, \
p_ next, \
p_ result
......@@ -1070,67 +1069,36 @@ INFO_TABLE_RET(stg_atomically_frame, ATOMICALLY_FRAME,
ATOMICALLY_FRAME_FIELDS(W_,P_,
info_ptr, p1, p2,
code,
next_invariant,
frame_result))
return (P_ result) // value returned to the frame
{
W_ valid;
gcptr trec, outer, next_invariant, q;
gcptr trec, outer, q;
trec = StgTSO_trec(CurrentTSO);
outer = StgTRecHeader_enclosing_trec(trec);
if (outer == NO_TREC) {
/* First time back at the atomically frame -- pick up invariants */
("ptr" next_invariant) =
ccall stmGetInvariantsToCheck(MyCapability() "ptr", trec "ptr");
frame_result = result;
/* Back at the atomically frame */
frame_result = result;
/* try to commit */
(valid) = ccall stmCommitTransaction(MyCapability() "ptr", trec "ptr");
if (valid != 0) {
/* Transaction was valid: commit succeeded */
StgTSO_trec(CurrentTSO) = NO_TREC;
return (frame_result);
} else {
/* Second/subsequent time back at the atomically frame -- abort the
* tx that's checking the invariant and move on to the next one */
StgTSO_trec(CurrentTSO) = outer;
StgInvariantCheckQueue_my_execution(next_invariant) = trec;
ccall stmAbortTransaction(MyCapability() "ptr", trec "ptr");
/* Don't free trec -- it's linked from q and will be stashed in the
* invariant if we eventually commit. */
next_invariant =
StgInvariantCheckQueue_next_queue_entry(next_invariant);
trec = outer;
}
if (next_invariant != END_INVARIANT_CHECK_QUEUE) {
/* We can't commit yet: another invariant to check */
("ptr" trec) = ccall stmStartTransaction(MyCapability() "ptr", trec "ptr");
/* Transaction was not valid: try again */
("ptr" trec) = ccall stmStartTransaction(MyCapability() "ptr",
NO_TREC "ptr");
StgTSO_trec(CurrentTSO) = trec;
q = StgInvariantCheckQueue_invariant(next_invariant);
jump stg_ap_v_fast
// push the StgAtomicallyFrame again: the code generator is
// clever enough to only assign the fields that have changed.
(ATOMICALLY_FRAME_FIELDS(,,info_ptr,p1,p2,
code,next_invariant,frame_result))
(StgAtomicInvariant_code(q));
} else {
/* We've got no more invariants to check, try to commit */
(valid) = ccall stmCommitTransaction(MyCapability() "ptr", trec "ptr");
if (valid != 0) {
/* Transaction was valid: commit succeeded */
StgTSO_trec(CurrentTSO) = NO_TREC;
return (frame_result);
} else {
/* Transaction was not valid: try again */
("ptr" trec) = ccall stmStartTransaction(MyCapability() "ptr",
NO_TREC "ptr");
StgTSO_trec(CurrentTSO) = trec;
next_invariant = END_INVARIANT_CHECK_QUEUE;
jump stg_ap_v_fast
// push the StgAtomicallyFrame again: the code generator is
// clever enough to only assign the fields that have changed.
(ATOMICALLY_FRAME_FIELDS(,,info_ptr,p1,p2,
code,next_invariant,frame_result))
(code);
}
code,frame_result))
(code);
}
}
......@@ -1140,7 +1108,6 @@ INFO_TABLE_RET(stg_atomically_waiting_frame, ATOMICALLY_FRAME,
ATOMICALLY_FRAME_FIELDS(W_,P_,
info_ptr, p1, p2,
code,
next_invariant,
frame_result))
return (/* no return values */)
{
......@@ -1152,7 +1119,7 @@ INFO_TABLE_RET(stg_atomically_waiting_frame, ATOMICALLY_FRAME,
/* Previous attempt is still valid: no point trying again yet */
jump stg_block_noregs
(ATOMICALLY_FRAME_FIELDS(,,info_ptr, p1, p2,
code,next_invariant,frame_result))
code,frame_result))
();
} else {
/* Previous attempt is no longer valid: try again */
......@@ -1162,7 +1129,7 @@ INFO_TABLE_RET(stg_atomically_waiting_frame, ATOMICALLY_FRAME,
// change the frame header to stg_atomically_frame_info
jump stg_ap_v_fast
(ATOMICALLY_FRAME_FIELDS(,,stg_atomically_frame_info, p1, p2,
code,next_invariant,frame_result))
code,frame_result))
(code);
}
}
......@@ -1213,7 +1180,7 @@ stg_atomicallyzh (P_ stm)
{
P_ old_trec;
P_ new_trec;
P_ code, next_invariant, frame_result;
P_ code, frame_result;
// stmStartTransaction may allocate
MAYBE_GC_P(stg_atomicallyzh, stm);
......@@ -1228,7 +1195,6 @@ stg_atomicallyzh (P_ stm)
}
code = stm;
next_invariant = END_INVARIANT_CHECK_QUEUE;
frame_result = NO_TREC;
/* Start the memory transcation */
......@@ -1237,7 +1203,7 @@ stg_atomicallyzh (P_ stm)
jump stg_ap_v_fast
(ATOMICALLY_FRAME_FIELDS(,,stg_atomically_frame_info, CCCS, 0,
code,next_invariant,frame_result))
code,frame_result))
(stm);
}
......@@ -1340,16 +1306,6 @@ retry_pop_stack:
// We've reached the ATOMICALLY_FRAME: attempt to wait
ASSERT(frame_type == ATOMICALLY_FRAME);
if (outer != NO_TREC) {
// We called retry while checking invariants, so abort the current
// invariant check (merging its TVar accesses into the parents read
// set so we'll wait on them)
ccall stmAbortTransaction(MyCapability() "ptr", trec "ptr");
ccall stmFreeAbortedTRec(MyCapability() "ptr", trec "ptr");
trec = outer;
StgTSO_trec(CurrentTSO) = trec;
outer = StgTRecHeader_enclosing_trec(trec);
}
ASSERT(outer == NO_TREC);
(r) = ccall stmWait(MyCapability() "ptr", CurrentTSO "ptr", trec "ptr");
......@@ -1369,20 +1325,6 @@ retry_pop_stack:
}
}
stg_checkzh (P_ closure /* STM a */)
{
W_ trec;
MAYBE_GC_P (stg_checkzh, closure);
trec = StgTSO_trec(CurrentTSO);
ccall stmAddInvariantToCheck(MyCapability() "ptr",
trec "ptr",
closure "ptr");
return ();
}
stg_newTVarzh (P_ init)
{
W_ tv;
......
......@@ -558,7 +558,6 @@
SymI_HasProto(stg_catchzh) \
SymI_HasProto(stg_catchRetryzh) \
SymI_HasProto(stg_catchSTMzh) \
SymI_HasProto(stg_checkzh) \
SymI_HasProto(stg_clearCCSzh) \
SymI_HasProto(stg_compactAddWithSharingzh) \
SymI_HasProto(stg_compactAddzh) \
......
......@@ -211,15 +211,6 @@ static StgBool cond_lock_tvar(StgTRecHeader *trec STG_UNUSED,
TRACE("%p : %s", trec, (result == expected) ? "success" : "failure");
return (result == expected);
}
static StgBool lock_inv(StgAtomicInvariant *inv STG_UNUSED) {
// Nothing -- uniproc
return true;
}
static void unlock_inv(StgAtomicInvariant *inv STG_UNUSED) {
// Nothing -- uniproc
}
#endif
#if defined(STM_CG_LOCK) /*........................................*/
......@@ -272,15 +263,6 @@ static StgBool cond_lock_tvar(StgTRecHeader *trec STG_UNUSED,
TRACE("%p : %d", result ? "success" : "failure");
return (result == expected);
}
static StgBool lock_inv(StgAtomicInvariant *inv STG_UNUSED) {
// Nothing -- protected by STM lock
return true;
}
static void unlock_inv(StgAtomicInvariant *inv STG_UNUSED) {
// Nothing -- protected by STM lock
}
#endif
#if defined(STM_FG_LOCKS) /*...................................*/
......@@ -332,32 +314,10 @@ static StgBool cond_lock_tvar(StgTRecHeader *trec,
TRACE("%p : %s", trec, result ? "success" : "failure");
return (result == expected);
}
static StgBool lock_inv(StgAtomicInvariant *inv) {
return (cas(&(inv -> lock), 0, 1) == 0);
}
static void unlock_inv(StgAtomicInvariant *inv) {
ASSERT(inv -> lock == 1);
inv -> lock = 0;
}
#endif
/*......................................................................*/
static StgBool watcher_is_tso(StgTVarWatchQueue *q) {
StgClosure *c = q -> closure;
const StgInfoTable *info = get_itbl(c);
return (info -> type) == TSO;
}
static StgBool watcher_is_invariant(StgTVarWatchQueue *q) {
StgClosure *c = q -> closure;
return (c->header.info == &stg_ATOMIC_INVARIANT_info);
}
/*......................................................................*/
// Helper functions for thread blocking and unblocking
static void park_tso(StgTSO *tso) {
......@@ -406,9 +366,7 @@ static void unpark_waiters_on(Capability *cap, StgTVar *s) {
for (;
q != END_STM_WATCH_QUEUE;
q = q -> prev_queue_entry) {
if (watcher_is_tso(q)) {
unpark_tso(cap, (StgTSO *)(q -> closure));
}
}
}
......@@ -416,16 +374,6 @@ static void unpark_waiters_on(Capability *cap, StgTVar *s) {
// Helper functions for downstream allocation and initialization
static StgInvariantCheckQueue *new_stg_invariant_check_queue(Capability *cap,
StgAtomicInvariant *invariant) {
StgInvariantCheckQueue *result;
result = (StgInvariantCheckQueue *)allocate(cap, sizeofW(StgInvariantCheckQueue));
SET_HDR (result, &stg_INVARIANT_CHECK_QUEUE_info, CCS_SYSTEM);
result -> invariant = invariant;
result -> my_execution = NO_TREC;
return result;
}
static StgTVarWatchQueue *new_stg_tvar_watch_queue(Capability *cap,
StgClosure *closure) {
StgTVarWatchQueue *result;
......@@ -452,7 +400,6 @@ static StgTRecHeader *new_stg_trec_header(Capability *cap,
result -> enclosing_trec = enclosing_trec;
result -> current_chunk = new_stg_trec_chunk(cap);
result -> invariants_to_check = END_INVARIANT_CHECK_QUEUE;
if (enclosing_trec == NO_TREC) {
result -> state = TREC_ACTIVE;
......@@ -470,20 +417,6 @@ static StgTRecHeader *new_stg_trec_header(Capability *cap,
// Allocation / deallocation functions that retain per-capability lists
// of closures that can be re-used
static StgInvariantCheckQueue *alloc_stg_invariant_check_queue(Capability *cap,
StgAtomicInvariant *invariant) {
StgInvariantCheckQueue *result = NULL;
if (cap -> free_invariant_check_queues == END_INVARIANT_CHECK_QUEUE) {
result = new_stg_invariant_check_queue(cap, invariant);
} else {
result = cap -> free_invariant_check_queues;
result -> invariant = invariant;
result -> my_execution = NO_TREC;
cap -> free_invariant_check_queues = result -> next_queue_entry;
}
return result;
}
static StgTVarWatchQueue *alloc_stg_tvar_watch_queue(Capability *cap,
StgClosure *closure) {
StgTVarWatchQueue *result = NULL;
......@@ -536,7 +469,6 @@ static StgTRecHeader *alloc_stg_trec_header(Capability *cap,
cap -> free_trec_headers = result -> enclosing_trec;
result -> enclosing_trec = enclosing_trec;
result -> current_chunk -> next_entry_idx = 0;
result -> invariants_to_check = END_INVARIANT_CHECK_QUEUE;
if (enclosing_trec == NO_TREC) {
result -> state = TREC_ACTIVE;
} else {
......@@ -1111,202 +1043,8 @@ static TRecEntry *get_entry_for(StgTRecHeader *trec, StgTVar *tvar, StgTRecHeade
/*......................................................................*/
/*
* Add/remove links between an invariant TVars. The caller must have
* locked the TVars involved and the invariant.
*/
static void disconnect_invariant(Capability *cap,
StgAtomicInvariant *inv) {
StgTRecHeader *last_execution = inv -> last_execution;
TRACE("unhooking last execution inv=%p trec=%p", inv, last_execution);
FOR_EACH_ENTRY(last_execution, e, {
StgTVar *s = e -> tvar;
StgTVarWatchQueue *q = s -> first_watch_queue_entry;
DEBUG_ONLY( StgBool found = false );
TRACE(" looking for trec on tvar=%p", s);
for (q = s -> first_watch_queue_entry;
q != END_STM_WATCH_QUEUE;
q = q -> next_queue_entry) {
if (q -> closure == (StgClosure*)inv) {
StgTVarWatchQueue *pq;
StgTVarWatchQueue *nq;
nq = q -> next_queue_entry;
pq = q -> prev_queue_entry;
if (nq != END_STM_WATCH_QUEUE) {
nq -> prev_queue_entry = pq;
}
if (pq != END_STM_WATCH_QUEUE) {
pq -> next_queue_entry = nq;
} else {
ASSERT(s -> first_watch_queue_entry == q);
s -> first_watch_queue_entry = nq;
dirty_TVAR(cap,s); // we modified first_watch_queue_entry
}
TRACE(" found it in watch queue entry %p", q);
free_stg_tvar_watch_queue(cap, q);
DEBUG_ONLY( found = true );
break;
}
}
ASSERT(found);
});
inv -> last_execution = NO_TREC;
}
static void connect_invariant_to_trec(Capability *cap,
StgAtomicInvariant *inv,
StgTRecHeader *my_execution) {
TRACE("connecting execution inv=%p trec=%p", inv, my_execution);
ASSERT(inv -> last_execution == NO_TREC);
FOR_EACH_ENTRY(my_execution, e, {
StgTVar *s = e -> tvar;
StgTVarWatchQueue *q = alloc_stg_tvar_watch_queue(cap, (StgClosure*)inv);
StgTVarWatchQueue *fq = s -> first_watch_queue_entry;
// We leave "last_execution" holding the values that will be
// in the heap after the transaction we're in the process
// of committing has finished.
TRecEntry *entry = get_entry_for(my_execution -> enclosing_trec, s, NULL);
if (entry != NULL) {
e -> expected_value = entry -> new_value;
e -> new_value = entry -> new_value;
}
TRACE(" linking trec on tvar=%p value=%p q=%p", s, e -> expected_value, q);
q -> next_queue_entry = fq;
q -> prev_queue_entry = END_STM_WATCH_QUEUE;
if (fq != END_STM_WATCH_QUEUE) {
fq -> prev_queue_entry = q;
}
s -> first_watch_queue_entry = q;
dirty_TVAR(cap,s); // we modified first_watch_queue_entry
});
inv -> last_execution = my_execution;
}
/*
* Add a new invariant to the trec's list of invariants to check on commit
*/
void stmAddInvariantToCheck(Capability *cap,
StgTRecHeader *trec,
StgClosure *code) {
StgAtomicInvariant *invariant;
StgInvariantCheckQueue *q;
TRACE("%p : stmAddInvariantToCheck closure=%p", trec, code);
ASSERT(trec != NO_TREC);
ASSERT(trec -> state == TREC_ACTIVE ||
trec -> state == TREC_CONDEMNED);
// 1. Allocate an StgAtomicInvariant, set last_execution to NO_TREC
// to signal that this is a new invariant in the current atomic block
invariant = (StgAtomicInvariant *) allocate(cap, sizeofW(StgAtomicInvariant));