CgHeapery.lhs 18.8 KB
Newer Older
1
%
2 3
% (c) The GRASP/AQUA Project, Glasgow University, 1992-1998
%
4
% $Id: CgHeapery.lhs,v 1.47 2005/06/21 10:44:41 simonmar Exp $
5 6 7 8 9
%
\section[CgHeapery]{Heap management functions}

\begin{code}
module CgHeapery (
10 11
	initHeapUsage, getVirtHp, setVirtHp, setRealHp, 
	getHpRelOffset,	hpRel,
12

13 14 15 16 17 18 19 20 21
	funEntryChecks, thunkEntryChecks, 
	altHeapCheck, unbxTupleHeapCheck, 
	hpChkGen, hpChkNodePointsAssignSp0,
	stkChkGen, stkChkNodePoints,

	layOutDynConstr, layOutStaticConstr,
	mkVirtHeapOffsets, mkStaticClosureFields, mkStaticClosure,

	allocDynClosure, emitSetDynHdr
22 23
    ) where

24
#include "HsVersions.h"
25

26
import StgSyn		( AltType(..) )
27 28 29
import CLabel		( CLabel, mkRtsCodeLabel )
import CgUtils		( mkWordCLit, cmmRegOffW, cmmOffsetW,
			  cmmOffsetExprB )
30
import CgMonad
31 32 33 34 35
import CgProf		( staticProfHdr, profDynAlloc, dynProfHdr )
import CgTicky		( staticTickyHdr, tickyDynAlloc, tickyAllocHeap )
import CgParallel	( staticGranHdr, staticParHdr, doGranAllocate )
import CgStackery	( getFinalStackHW, getRealSp )
import CgCallConv	( mkRegLiveness )
36 37
import ClosureInfo	( closureSize, staticClosureNeedsLink, 
			  mkConInfo,  closureNeedsUpdSpace,
38 39 40 41
			  infoTableLabelFromCI, closureLabelFromCI,
			  nodeMustPointToIt, closureLFInfo, 			
			  ClosureInfo )
import SMRep		( CgRep(..), cgRepSizeW, separateByPtrFollowness,
42 43
			  WordOff, fixedHdrSize, thunkHdrSize,
			  isVoidArg, primRepToCgRep )
44 45 46 47 48 49 50 51

import Cmm		( CmmLit(..), CmmStmt(..), CmmExpr(..), GlobalReg(..),
			  CmmReg(..), hpReg, nodeReg, spReg )
import MachOp		( mo_wordULt, mo_wordUGt, mo_wordSub )
import CmmUtils		( mkIntCLit, CmmStmts, noStmts, oneStmt, plusStmts,
			  mkStmts )
import Id		( Id )
import DataCon		( DataCon )
52
import TyCon		( tyConPrimRep )
53 54 55
import CostCentre	( CostCentreStack )
import Util		( mapAccumL, filterOut )
import Constants	( wORD_SIZE )
56
import Packages		( HomeModules )
57
import Outputable
58

59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125
\end{code}


%************************************************************************
%*									*
\subsection[CgUsages-heapery]{Monad things for fiddling with heap usage}
%*									*
%************************************************************************

The heap always grows upwards, so hpRel is easy

\begin{code}
hpRel :: VirtualHpOffset 	-- virtual offset of Hp
      -> VirtualHpOffset 	-- virtual offset of The Thing
      -> WordOff			-- integer word offset
hpRel hp off = off - hp
\end{code}

@initHeapUsage@ applies a function to the amount of heap that it uses.
It initialises the heap usage to zeros, and passes on an unchanged
heap usage.

It is usually a prelude to performing a GC check, so everything must
be in a tidy and consistent state.

rje: Note the slightly suble fixed point behaviour needed here

\begin{code}
initHeapUsage :: (VirtualHpOffset -> Code) -> Code
initHeapUsage fcode
  = do	{ orig_hp_usage <- getHpUsage
	; setHpUsage initHpUsage
	; fixC (\heap_usage2 -> do
		{ fcode (heapHWM heap_usage2)
		; getHpUsage })
	; setHpUsage orig_hp_usage }

setVirtHp :: VirtualHpOffset -> Code
setVirtHp new_virtHp
  = do	{ hp_usage <- getHpUsage
	; setHpUsage (hp_usage {virtHp = new_virtHp}) }

getVirtHp :: FCode VirtualHpOffset
getVirtHp 
  = do	{ hp_usage <- getHpUsage
	; return (virtHp hp_usage) }

setRealHp ::  VirtualHpOffset -> Code
setRealHp new_realHp
  = do	{ hp_usage <- getHpUsage
	; setHpUsage (hp_usage {realHp = new_realHp}) }

getHpRelOffset :: VirtualHpOffset -> FCode CmmExpr
getHpRelOffset virtual_offset
  = do	{ hp_usg <- getHpUsage
	; return (cmmRegOffW hpReg (hpRel (realHp hp_usg) virtual_offset)) }
\end{code}


%************************************************************************
%*									*
		Layout of heap objects
%*									*
%************************************************************************

\begin{code}
layOutDynConstr, layOutStaticConstr
126
	:: HomeModules
127
	-> DataCon 	
128 129 130 131 132 133 134
	-> [(CgRep,a)]
	-> (ClosureInfo,
	    [(a,VirtualHpOffset)])

layOutDynConstr    = layOutConstr False
layOutStaticConstr = layOutConstr True

135 136
layOutConstr  is_static hmods data_con args
   = (mkConInfo hmods is_static data_con tot_wds ptr_wds,
137 138
      things_w_offsets)
  where
139 140
    (tot_wds,		 --  #ptr_wds + #nonptr_wds
     ptr_wds,		 --  #ptr_wds
141
     things_w_offsets) = mkVirtHeapOffsets False{-not a thunk-} args
142 143 144 145 146 147 148 149
\end{code}

@mkVirtHeapOffsets@ always returns boxed things with smaller offsets
than the unboxed things, and furthermore, the offsets in the result
list

\begin{code}
mkVirtHeapOffsets
150 151
	  :: Bool		-- True <=> is a thunk
	  -> [(CgRep,a)]	-- Things to make offsets for
152
	  -> (WordOff,		-- _Total_ number of words allocated
153 154 155 156 157 158 159
	      WordOff,		-- Number of words allocated for *pointers*
	      [(a, VirtualHpOffset)])
				-- Things with their offsets from start of 
				--  object in order of increasing offset

-- First in list gets lowest offset, which is initial offset + 1.

160
mkVirtHeapOffsets is_thunk things
161 162 163 164 165 166 167
  = let non_void_things		      = filterOut (isVoidArg . fst) things
	(ptrs, non_ptrs)    	      = separateByPtrFollowness non_void_things
    	(wds_of_ptrs, ptrs_w_offsets) = mapAccumL computeOffset 0 ptrs
	(tot_wds, non_ptrs_w_offsets) = mapAccumL computeOffset wds_of_ptrs non_ptrs
    in
    (tot_wds, wds_of_ptrs, ptrs_w_offsets ++ non_ptrs_w_offsets)
  where
168 169 170
    hdr_size 	| is_thunk   = thunkHdrSize
		| otherwise  = fixedHdrSize

171
    computeOffset wds_so_far (rep, thing)
172
      = (wds_so_far + cgRepSizeW rep, (thing, hdr_size + wds_so_far))
173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192
\end{code}


%************************************************************************
%*									*
		Lay out a static closure
%*									*
%************************************************************************

Make a static closure, adding on any extra padding needed for CAFs,
and adding a static link field if necessary.

\begin{code}
mkStaticClosureFields 
	:: ClosureInfo 
	-> CostCentreStack 
	-> Bool 		-- Has CAF refs
	-> [CmmLit]		-- Payload
	-> [CmmLit]		-- The full closure
mkStaticClosureFields cl_info ccs caf_refs payload
193 194
  = mkStaticClosure info_lbl ccs payload padding_wds 
	static_link_field saved_info_field
195 196 197
  where
    info_lbl = infoTableLabelFromCI cl_info

198 199 200 201 202 203 204 205 206 207 208 209 210
    -- CAFs must have consistent layout, regardless of whether they
    -- are actually updatable or not.  The layout of a CAF is:
    --
    --        3 saved_info
    --        2 static_link
    --        1 indirectee
    --        0 info ptr
    --
    -- the static_link and saved_info fields must always be in the same
    -- place.  So we use closureNeedsUpdSpace rather than
    -- closureUpdReqd here:

    is_caf = closureNeedsUpdSpace cl_info
211 212

    padding_wds
213
	| not is_caf = []
214
	| otherwise  = ASSERT(null payload) [mkIntCLit 0]
215 216

    static_link_field
217 218 219 220 221 222
	| is_caf || staticClosureNeedsLink cl_info = [static_link_value]
	| otherwise				   = []

    saved_info_field
	| is_caf     = [mkIntCLit 0]
	| otherwise  = []
223 224 225 226 227 228 229 230

	-- for a static constructor which has NoCafRefs, we set the
	-- static link field to a non-zero value so the garbage
	-- collector will ignore it.
    static_link_value
	| caf_refs	= mkIntCLit 0
	| otherwise	= mkIntCLit 1

231

232
mkStaticClosure :: CLabel -> CostCentreStack -> [CmmLit]
233 234
  -> [CmmLit] -> [CmmLit] -> [CmmLit] -> [CmmLit]
mkStaticClosure info_lbl ccs payload padding_wds static_link_field saved_info_field
235 236 237 238 239
  =  [CmmLabel info_lbl]
  ++ variable_header_words
  ++ payload
  ++ padding_wds
  ++ static_link_field
240
  ++ saved_info_field
241 242 243 244 245 246
  where
    variable_header_words
	=  staticGranHdr
	++ staticParHdr
	++ staticProfHdr ccs
	++ staticTickyHdr
247 248 249 250 251 252 253 254
\end{code}

%************************************************************************
%*									*
\subsection[CgHeapery-heap-overflow]{Heap overflow checking}
%*									*
%************************************************************************

255 256 257 258 259 260 261
The new code  for heapChecks. For GrAnSim the code for doing a heap check
and doing a context switch has been separated. Especially, the HEAP_CHK
macro only performs a heap check. THREAD_CONTEXT_SWITCH should be used for
doing a context switch. GRAN_FETCH_AND_RESCHEDULE must be put at the
beginning of every slow entry code in order to simulate the fetching of
closures. If fetching is necessary (i.e. current closure is not local) then
an automatic context switch is done.
262

263
--------------------------------------------------------------
264
A heap/stack check at a function or thunk entry point.
265

266
\begin{code}
267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293
funEntryChecks :: ClosureInfo -> CmmStmts -> Code -> Code
funEntryChecks cl_info reg_save_code code 
  = hpStkCheck cl_info True reg_save_code code

thunkEntryChecks :: ClosureInfo -> Code -> Code
thunkEntryChecks cl_info code 
  = hpStkCheck cl_info False noStmts code

hpStkCheck :: ClosureInfo	-- Function closure
	   -> Bool 		-- Is a function? (not a thunk)
	   -> CmmStmts		-- Register saves
	   -> Code
	   -> Code

hpStkCheck cl_info is_fun reg_save_code code
  =  getFinalStackHW	$ \ spHw -> do
	{ sp <- getRealSp
	; let stk_words = spHw - sp
	; initHeapUsage	$ \ hpHw  -> do
	    {	-- Emit heap checks, but be sure to do it lazily so 
		-- that the conditionals on hpHw don't cause a black hole
	      codeOnly $ do
		{ do_checks stk_words hpHw full_save_code rts_label
		; tickyAllocHeap hpHw }
	    ; setRealHp hpHw
	    ; code }
	}
294
  where
295 296 297 298 299 300 301 302 303 304 305 306 307 308 309
    node_asst 
	| nodeMustPointToIt (closureLFInfo cl_info)
	= noStmts
	| otherwise
	= oneStmt (CmmAssign nodeReg (CmmLit (CmmLabel closure_lbl)))
    closure_lbl = closureLabelFromCI cl_info

    full_save_code = node_asst `plusStmts` reg_save_code

    rts_label | is_fun    = CmmReg (CmmGlobal GCFun)
				-- Function entry point
	      | otherwise = CmmReg (CmmGlobal GCEnter1)
				-- Thunk or case return
	-- In the thunk/case-return case, R1 points to a closure
	-- which should be (re)-entered after GC
310
\end{code}
311

312 313
Heap checks in a case alternative are nice and easy, provided this is
a bog-standard algebraic case.  We have in our hand:
314

315 316
       * one return address, on the stack,
       * one return value, in Node.
317

318 319 320 321 322
the canned code for this heap check failure just pushes Node on the
stack, saying 'EnterGHC' to return.  The scheduler will return by
entering the top value on the stack, which in turn will return through
the return address, getting us back to where we were.  This is
therefore only valid if the return value is *lifted* (just being
323
boxed isn't good enough).
324

325 326 327
For primitive returns, we have an unlifted value in some register
(either R1 or FloatReg1 or DblReg1).  This means using specialised
heap-check code for these cases.
328

329 330
\begin{code}
altHeapCheck 
331 332 333 334 335
    :: AltType	-- PolyAlt, PrimAlt, AlgAlt, but *not* UbxTupAlt
		--	(Unboxed tuples are dealt with by ubxTupleHeapCheck)
    -> Code	-- Continuation
    -> Code
altHeapCheck alt_type code
336 337 338 339 340 341 342 343
  = initHeapUsage $ \ hpHw -> do
	{ codeOnly $ do
	     { do_checks 0 {- no stack chk -} hpHw
			 noStmts {- nothign to save -}
			 (rts_label alt_type)
	     ; tickyAllocHeap hpHw }
	; setRealHp hpHw
	; code }
344
  where
345 346 347 348 349 350 351 352 353 354
    rts_label PolyAlt = CmmLit (CmmLabel (mkRtsCodeLabel SLIT( "stg_gc_unpt_r1")))
      	-- Do *not* enter R1 after a heap check in
	-- a polymorphic case.  It might be a function
	-- and the entry code for a function (currently)
	-- applies it
	--
	-- However R1 is guaranteed to be a pointer

    rts_label (AlgAlt tc) = stg_gc_enter1
	-- Enter R1 after the heap check; it's a pointer
355
 	
356 357 358 359 360 361 362 363 364 365 366 367 368 369 370
    rts_label (PrimAlt tc)
      = CmmLit $ CmmLabel $ 
	case primRepToCgRep (tyConPrimRep tc) of
	  VoidArg   -> mkRtsCodeLabel SLIT( "stg_gc_noregs")
	  FloatArg  -> mkRtsCodeLabel SLIT( "stg_gc_f1")
	  DoubleArg -> mkRtsCodeLabel SLIT( "stg_gc_d1")
	  LongArg   -> mkRtsCodeLabel SLIT( "stg_gc_l1")
				-- R1 is boxed but unlifted: 
	  PtrArg    -> mkRtsCodeLabel SLIT( "stg_gc_unpt_r1")
				-- R1 is unboxed:
	  NonPtrArg -> mkRtsCodeLabel SLIT( "stg_gc_unbx_r1")

    rts_label (UbxTupAlt _) = panic "altHeapCheck"
\end{code}

371

372 373 374 375 376 377 378 379
Unboxed tuple alternatives and let-no-escapes (the two most annoying
constructs to generate code for!)  For unboxed tuple returns, there
are an arbitrary number of possibly unboxed return values, some of
which will be in registers, and the others will be on the stack.  We
always organise the stack-resident fields into pointers &
non-pointers, and pass the number of each to the heap check code.

\begin{code}
380
unbxTupleHeapCheck 
381 382 383 384
	:: [(Id, GlobalReg)]	-- Live registers
	-> WordOff	-- no. of stack slots containing ptrs
	-> WordOff	-- no. of stack slots containing nonptrs
	-> CmmStmts	-- code to insert in the failure path
385 386 387 388
	-> Code
	-> Code

unbxTupleHeapCheck regs ptrs nptrs fail_code code
389 390
  -- We can't manage more than 255 pointers/non-pointers 
  -- in a generic heap check.
391
  | ptrs > 255 || nptrs > 255 = panic "altHeapCheck"
392 393 394 395 396 397 398
  | otherwise 
  = initHeapUsage $ \ hpHw -> do
	{ codeOnly $ do { do_checks 0 {- no stack check -} hpHw
				    full_fail_code rts_label
			; tickyAllocHeap hpHw }
	; setRealHp hpHw
	; code }
399
  where
400 401 402 403 404 405
    full_fail_code  = fail_code `plusStmts` oneStmt assign_liveness
    assign_liveness = CmmAssign (CmmGlobal (VanillaReg 9)) 	-- Ho ho ho!
				(CmmLit (mkWordCLit liveness))
    liveness 	    = mkRegLiveness regs ptrs nptrs
    rts_label	    = CmmLit (CmmLabel (mkRtsCodeLabel SLIT("stg_gc_ut")))

406 407
\end{code}

408 409 410 411 412 413 414 415 416 417 418 419 420 421 422

%************************************************************************
%*									*
		Heap/Stack Checks.
%*									*
%************************************************************************

When failing a check, we save a return address on the stack and
jump to a pre-compiled code fragment that saves the live registers
and returns to the scheduler.

The return address in most cases will be the beginning of the basic
block in which the check resides, since we need to perform the check
again on re-entry because someone else might have stolen the resource
in the meantime.
423 424

\begin{code}
425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512
do_checks :: WordOff	-- Stack headroom
	  -> WordOff	-- Heap  headroom
	  -> CmmStmts	-- Assignments to perform on failure
	  -> CmmExpr	-- Rts address to jump to on failure
	  -> Code
do_checks 0 0 _ _   = nopC
do_checks stk hp reg_save_code rts_lbl
  = do_checks' (CmmLit (mkIntCLit (stk*wORD_SIZE))) 
	       (CmmLit (mkIntCLit (hp*wORD_SIZE)))
	 (stk /= 0) (hp /= 0) reg_save_code rts_lbl

-- The offsets are now in *bytes*
do_checks' stk_expr hp_expr stk_nonzero hp_nonzero reg_save_code rts_lbl
  = do	{ doGranAllocate hp_expr

	-- Emit a block for the heap-check-failure code
	; blk_id <- forkLabelledCode $ do
			{ whenC hp_nonzero $
				stmtC (CmmAssign (CmmGlobal HpAlloc) hp_expr)
			; emitStmts reg_save_code
			; stmtC (CmmJump rts_lbl []) }

	-- Check for stack overflow *FIRST*; otherwise
	-- we might bumping Hp and then failing stack oflo
	; whenC stk_nonzero
		(stmtC (CmmCondBranch stk_oflo blk_id))

	; whenC hp_nonzero
		(stmtsC [CmmAssign hpReg 
				(cmmOffsetExprB (CmmReg hpReg) hp_expr),
		        CmmCondBranch hp_oflo blk_id]) 
		-- Bump heap pointer, and test for heap exhaustion
		-- Note that we don't move the heap pointer unless the 
		-- stack check succeeds.  Otherwise we might end up
		-- with slop at the end of the current block, which can 
		-- confuse the LDV profiler.
    }
  where
	-- Stk overflow if (Sp - stk_bytes < SpLim)
    stk_oflo = CmmMachOp mo_wordULt 
		  [CmmMachOp mo_wordSub [CmmReg spReg, stk_expr],
		   CmmReg (CmmGlobal SpLim)]

	-- Hp overflow if (Hpp > HpLim)
	-- (Hp has been incremented by now)
	-- HpLim points to the LAST WORD of valid allocation space.
    hp_oflo = CmmMachOp mo_wordUGt 
		  [CmmReg hpReg, CmmReg (CmmGlobal HpLim)]
\end{code}

%************************************************************************
%*									*
     Generic Heap/Stack Checks - used in the RTS
%*									*
%************************************************************************

\begin{code}
hpChkGen :: CmmExpr -> CmmExpr -> CmmExpr -> Code
hpChkGen bytes liveness reentry
  = do_checks' (CmmLit (mkIntCLit 0)) bytes False True assigns stg_gc_gen
  where
    assigns = mkStmts [
    		CmmAssign (CmmGlobal (VanillaReg 9))  liveness,
    		CmmAssign (CmmGlobal (VanillaReg 10)) reentry
		]

-- a heap check where R1 points to the closure to enter on return, and
-- we want to assign to Sp[0] on failure (used in AutoApply.cmm:BUILD_PAP).
hpChkNodePointsAssignSp0 :: CmmExpr -> CmmExpr -> Code
hpChkNodePointsAssignSp0 bytes sp0
  = do_checks' (CmmLit (mkIntCLit 0)) bytes False True assign stg_gc_enter1
  where assign = oneStmt (CmmStore (CmmReg spReg) sp0)

stkChkGen :: CmmExpr -> CmmExpr -> CmmExpr -> Code
stkChkGen bytes liveness reentry
  = do_checks' bytes (CmmLit (mkIntCLit 0)) True False assigns stg_gc_gen
  where
    assigns = mkStmts [
    		CmmAssign (CmmGlobal (VanillaReg 9))  liveness,
    		CmmAssign (CmmGlobal (VanillaReg 10)) reentry
		]

stkChkNodePoints :: CmmExpr -> Code
stkChkNodePoints bytes
  = do_checks' bytes (CmmLit (mkIntCLit 0)) True False noStmts stg_gc_enter1

stg_gc_gen = CmmLit (CmmLabel (mkRtsCodeLabel SLIT("stg_gc_gen")))
stg_gc_enter1 = CmmReg (CmmGlobal GCEnter1)
513 514 515 516 517 518 519 520 521 522 523 524 525 526
\end{code}

%************************************************************************
%*									*
\subsection[initClosure]{Initialise a dynamic closure}
%*									*
%************************************************************************

@allocDynClosure@ puts the thing in the heap, and modifies the virtual Hp
to account for this.

\begin{code}
allocDynClosure
	:: ClosureInfo
527 528
	-> CmmExpr 		-- Cost Centre to stick in the object
	-> CmmExpr 		-- Cost Centre to blame for this alloc
529 530
				-- (usually the same; sometimes "OVERHEAD")

531 532 533
	-> [(CmmExpr, VirtualHpOffset)]	-- Offsets from start of the object
					-- ie Info ptr has offset zero.
	-> FCode VirtualHpOffset	-- Returns virt offset of object
534

535 536
allocDynClosure cl_info use_cc blame_cc amodes_with_offsets
  = do	{ virt_hp <- getVirtHp
537 538

	-- FIND THE OFFSET OF THE INFO-PTR WORD
539 540 541 542 543 544 545 546 547
	; let	info_offset = virt_hp + 1
		-- info_offset is the VirtualHpOffset of the first
		-- word of the new object
		-- Remember, virtHp points to last allocated word, 
		-- ie 1 *before* the info-ptr word of new object.

		info_ptr = CmmLit (CmmLabel (infoTableLabelFromCI cl_info))
		hdr_w_offsets = initDynHdr info_ptr use_cc `zip` [0..]

548
	-- SAY WHAT WE ARE ABOUT TO DO
549 550 551 552 553 554
	; profDynAlloc cl_info use_cc	
		-- ToDo: This is almost certainly wrong
		-- We're ignoring blame_cc. But until we've
		-- fixed the boxing hack in chooseDynCostCentres etc,
		-- we're worried about making things worse by "fixing"
		-- this part to use blame_cc!
555

556
	; tickyDynAlloc cl_info
557

558 559 560
	-- ALLOCATE THE OBJECT
	; base <- getHpRelOffset info_offset
	; hpStore base (hdr_w_offsets ++ amodes_with_offsets)
561

562 563 564
	-- BUMP THE VIRTUAL HEAP POINTER
	; setVirtHp (virt_hp + closureSize cl_info)
	
565
	-- RETURN PTR TO START OF OBJECT
566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587
	; returnFC info_offset }


initDynHdr :: CmmExpr 
	   -> CmmExpr		-- Cost centre to put in object
	   -> [CmmExpr]
initDynHdr info_ptr cc
  =  [info_ptr]
     	-- ToDo: Gransim stuff
	-- ToDo: Parallel stuff
  ++ dynProfHdr cc
	-- No ticky header

hpStore :: CmmExpr -> [(CmmExpr, VirtualHpOffset)] -> Code
-- Store the item (expr,off) in base[off]
hpStore base es
  = stmtsC [ CmmStore (cmmOffsetW base off) val 
	   | (val, off) <- es ]

emitSetDynHdr :: CmmExpr -> CmmExpr -> CmmExpr -> Code
emitSetDynHdr base info_ptr ccs 
  = hpStore base (zip (initDynHdr info_ptr ccs) [0..])
588
\end{code}