CgHeapery.lhs 19.9 KB
Newer Older
1
%
Simon Marlow's avatar
Simon Marlow committed
2
% (c) The University of Glasgow 2006
3 4
% (c) The GRASP/AQUA Project, Glasgow University, 1992-1998
%
5 6 7
\section[CgHeapery]{Heap management functions}

\begin{code}
Ian Lynagh's avatar
Ian Lynagh committed
8 9 10 11 12 13 14
{-# OPTIONS -fno-warn-tabs #-}
-- The above warning supression flag is a temporary kludge.
-- While working on this module you are encouraged to remove it and
-- detab the module (please do the detabbing in a separate patch). See
--     http://hackage.haskell.org/trac/ghc/wiki/Commentary/CodingStyle#TabsvsSpaces
-- for details

15
module CgHeapery (
16 17
	initHeapUsage, getVirtHp, setVirtHp, setRealHp, 
	getHpRelOffset,	hpRel,
18

19 20 21 22 23 24 25 26 27
	funEntryChecks, thunkEntryChecks, 
	altHeapCheck, unbxTupleHeapCheck, 
	hpChkGen, hpChkNodePointsAssignSp0,
	stkChkGen, stkChkNodePoints,

	layOutDynConstr, layOutStaticConstr,
	mkVirtHeapOffsets, mkStaticClosureFields, mkStaticClosure,

	allocDynClosure, emitSetDynHdr
28 29
    ) where

30
#include "HsVersions.h"
31

Simon Marlow's avatar
Simon Marlow committed
32 33 34
import StgSyn
import CLabel
import CgUtils
35
import CgMonad
Simon Marlow's avatar
Simon Marlow committed
36 37 38 39 40 41 42 43
import CgProf
import CgTicky
import CgParallel
import CgStackery
import CgCallConv
import ClosureInfo
import SMRep

44 45
import OldCmm
import OldCmmUtils
Simon Marlow's avatar
Simon Marlow committed
46 47 48 49 50
import Id
import DataCon
import TyCon
import CostCentre
import Util
51
import Module
Simon Marlow's avatar
Simon Marlow committed
52
import Constants
53
import Outputable
54
import FastString
55 56

import Data.List
57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88
\end{code}


%************************************************************************
%*									*
\subsection[CgUsages-heapery]{Monad things for fiddling with heap usage}
%*									*
%************************************************************************

The heap always grows upwards, so hpRel is easy

\begin{code}
hpRel :: VirtualHpOffset 	-- virtual offset of Hp
      -> VirtualHpOffset 	-- virtual offset of The Thing
      -> WordOff			-- integer word offset
hpRel hp off = off - hp
\end{code}

@initHeapUsage@ applies a function to the amount of heap that it uses.
It initialises the heap usage to zeros, and passes on an unchanged
heap usage.

It is usually a prelude to performing a GC check, so everything must
be in a tidy and consistent state.

rje: Note the slightly suble fixed point behaviour needed here

\begin{code}
initHeapUsage :: (VirtualHpOffset -> Code) -> Code
initHeapUsage fcode
  = do	{ orig_hp_usage <- getHpUsage
	; setHpUsage initHpUsage
89
	; fixC_(\heap_usage2 -> do
90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123
		{ fcode (heapHWM heap_usage2)
		; getHpUsage })
	; setHpUsage orig_hp_usage }

setVirtHp :: VirtualHpOffset -> Code
setVirtHp new_virtHp
  = do	{ hp_usage <- getHpUsage
	; setHpUsage (hp_usage {virtHp = new_virtHp}) }

getVirtHp :: FCode VirtualHpOffset
getVirtHp 
  = do	{ hp_usage <- getHpUsage
	; return (virtHp hp_usage) }

setRealHp ::  VirtualHpOffset -> Code
setRealHp new_realHp
  = do	{ hp_usage <- getHpUsage
	; setHpUsage (hp_usage {realHp = new_realHp}) }

getHpRelOffset :: VirtualHpOffset -> FCode CmmExpr
getHpRelOffset virtual_offset
  = do	{ hp_usg <- getHpUsage
	; return (cmmRegOffW hpReg (hpRel (realHp hp_usg) virtual_offset)) }
\end{code}


%************************************************************************
%*									*
		Layout of heap objects
%*									*
%************************************************************************

\begin{code}
layOutDynConstr, layOutStaticConstr
124
	:: DataCon
125 126 127 128 129 130 131
	-> [(CgRep,a)]
	-> (ClosureInfo,
	    [(a,VirtualHpOffset)])

layOutDynConstr    = layOutConstr False
layOutStaticConstr = layOutConstr True

Ian Lynagh's avatar
Ian Lynagh committed
132 133
layOutConstr :: Bool -> DataCon -> [(CgRep, a)]
             -> (ClosureInfo, [(a, VirtualHpOffset)])
134 135
layOutConstr is_static data_con args
   = (mkConInfo is_static data_con tot_wds ptr_wds,
136 137
      things_w_offsets)
  where
138 139
    (tot_wds,		 --  #ptr_wds + #nonptr_wds
     ptr_wds,		 --  #ptr_wds
140
     things_w_offsets) = mkVirtHeapOffsets False{-not a thunk-} args
141 142 143 144 145 146 147 148
\end{code}

@mkVirtHeapOffsets@ always returns boxed things with smaller offsets
than the unboxed things, and furthermore, the offsets in the result
list

\begin{code}
mkVirtHeapOffsets
149 150
	  :: Bool		-- True <=> is a thunk
	  -> [(CgRep,a)]	-- Things to make offsets for
151
	  -> (WordOff,		-- _Total_ number of words allocated
152 153 154 155 156 157 158
	      WordOff,		-- Number of words allocated for *pointers*
	      [(a, VirtualHpOffset)])
				-- Things with their offsets from start of 
				--  object in order of increasing offset

-- First in list gets lowest offset, which is initial offset + 1.

159
mkVirtHeapOffsets is_thunk things
160 161 162 163 164 165 166
  = let non_void_things		      = filterOut (isVoidArg . fst) things
	(ptrs, non_ptrs)    	      = separateByPtrFollowness non_void_things
    	(wds_of_ptrs, ptrs_w_offsets) = mapAccumL computeOffset 0 ptrs
	(tot_wds, non_ptrs_w_offsets) = mapAccumL computeOffset wds_of_ptrs non_ptrs
    in
    (tot_wds, wds_of_ptrs, ptrs_w_offsets ++ non_ptrs_w_offsets)
  where
167 168 169
    hdr_size 	| is_thunk   = thunkHdrSize
		| otherwise  = fixedHdrSize

170
    computeOffset wds_so_far (rep, thing)
171
      = (wds_so_far + cgRepSizeW rep, (thing, hdr_size + wds_so_far))
172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191
\end{code}


%************************************************************************
%*									*
		Lay out a static closure
%*									*
%************************************************************************

Make a static closure, adding on any extra padding needed for CAFs,
and adding a static link field if necessary.

\begin{code}
mkStaticClosureFields 
	:: ClosureInfo 
	-> CostCentreStack 
	-> Bool 		-- Has CAF refs
	-> [CmmLit]		-- Payload
	-> [CmmLit]		-- The full closure
mkStaticClosureFields cl_info ccs caf_refs payload
192 193
  = mkStaticClosure info_lbl ccs payload padding_wds 
	static_link_field saved_info_field
194
  where
195
    info_lbl = infoTableLabelFromCI cl_info
196

197 198 199 200 201 202 203 204 205 206 207 208 209
    -- CAFs must have consistent layout, regardless of whether they
    -- are actually updatable or not.  The layout of a CAF is:
    --
    --        3 saved_info
    --        2 static_link
    --        1 indirectee
    --        0 info ptr
    --
    -- the static_link and saved_info fields must always be in the same
    -- place.  So we use closureNeedsUpdSpace rather than
    -- closureUpdReqd here:

    is_caf = closureNeedsUpdSpace cl_info
210 211

    padding_wds
212
	| not is_caf = []
213
	| otherwise  = ASSERT(null payload) [mkIntCLit 0]
214 215

    static_link_field
216 217 218 219 220 221
	| is_caf || staticClosureNeedsLink cl_info = [static_link_value]
	| otherwise				   = []

    saved_info_field
	| is_caf     = [mkIntCLit 0]
	| otherwise  = []
222 223 224 225 226 227 228 229 230

	-- for a static constructor which has NoCafRefs, we set the
	-- static link field to a non-zero value so the garbage
	-- collector will ignore it.
    static_link_value
	| caf_refs	= mkIntCLit 0
	| otherwise	= mkIntCLit 1

mkStaticClosure :: CLabel -> CostCentreStack -> [CmmLit]
231 232
  -> [CmmLit] -> [CmmLit] -> [CmmLit] -> [CmmLit]
mkStaticClosure info_lbl ccs payload padding_wds static_link_field saved_info_field
233 234
  =  [CmmLabel info_lbl]
  ++ variable_header_words
235
  ++ concatMap padLitToWord payload
236 237
  ++ padding_wds
  ++ static_link_field
238
  ++ saved_info_field
239 240 241 242 243 244
  where
    variable_header_words
	=  staticGranHdr
	++ staticParHdr
	++ staticProfHdr ccs
	++ staticTickyHdr
245 246 247

padLitToWord :: CmmLit -> [CmmLit]
padLitToWord lit = lit : padding pad_length
248 249
  where width = typeWidth (cmmLitType lit)
        pad_length = wORD_SIZE - widthInBytes width :: Int
250 251

        padding n | n <= 0 = []
252 253 254 255
                  | n `rem` 2 /= 0 = CmmInt 0 W8  : padding (n-1)
                  | n `rem` 4 /= 0 = CmmInt 0 W16 : padding (n-2)
                  | n `rem` 8 /= 0 = CmmInt 0 W32 : padding (n-4)
                  | otherwise      = CmmInt 0 W64 : padding (n-8)
256 257 258 259 260 261 262 263
\end{code}

%************************************************************************
%*									*
\subsection[CgHeapery-heap-overflow]{Heap overflow checking}
%*									*
%************************************************************************

264 265 266 267 268 269 270
The new code  for heapChecks. For GrAnSim the code for doing a heap check
and doing a context switch has been separated. Especially, the HEAP_CHK
macro only performs a heap check. THREAD_CONTEXT_SWITCH should be used for
doing a context switch. GRAN_FETCH_AND_RESCHEDULE must be put at the
beginning of every slow entry code in order to simulate the fetching of
closures. If fetching is necessary (i.e. current closure is not local) then
an automatic context switch is done.
271

272
--------------------------------------------------------------
273
A heap/stack check at a function or thunk entry point.
274

275
\begin{code}
276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302
funEntryChecks :: ClosureInfo -> CmmStmts -> Code -> Code
funEntryChecks cl_info reg_save_code code 
  = hpStkCheck cl_info True reg_save_code code

thunkEntryChecks :: ClosureInfo -> Code -> Code
thunkEntryChecks cl_info code 
  = hpStkCheck cl_info False noStmts code

hpStkCheck :: ClosureInfo	-- Function closure
	   -> Bool 		-- Is a function? (not a thunk)
	   -> CmmStmts		-- Register saves
	   -> Code
	   -> Code

hpStkCheck cl_info is_fun reg_save_code code
  =  getFinalStackHW	$ \ spHw -> do
	{ sp <- getRealSp
	; let stk_words = spHw - sp
	; initHeapUsage	$ \ hpHw  -> do
	    {	-- Emit heap checks, but be sure to do it lazily so 
		-- that the conditionals on hpHw don't cause a black hole
	      codeOnly $ do
		{ do_checks stk_words hpHw full_save_code rts_label
		; tickyAllocHeap hpHw }
	    ; setRealHp hpHw
	    ; code }
	}
303
  where
304 305 306 307 308
    node_asst 
	| nodeMustPointToIt (closureLFInfo cl_info)
	= noStmts
	| otherwise
	= oneStmt (CmmAssign nodeReg (CmmLit (CmmLabel closure_lbl)))
Simon Marlow's avatar
Simon Marlow committed
309 310 311
        -- Strictly speaking, we should tag node here.  But if
        -- node doesn't point to the closure, the code for the closure
        -- cannot depend on the value of R1 anyway, so we're safe.
312
    closure_lbl = closureLabelFromCI cl_info
313 314 315 316 317 318 319 320 321

    full_save_code = node_asst `plusStmts` reg_save_code

    rts_label | is_fun    = CmmReg (CmmGlobal GCFun)
				-- Function entry point
	      | otherwise = CmmReg (CmmGlobal GCEnter1)
				-- Thunk or case return
	-- In the thunk/case-return case, R1 points to a closure
	-- which should be (re)-entered after GC
322
\end{code}
323

324 325
Heap checks in a case alternative are nice and easy, provided this is
a bog-standard algebraic case.  We have in our hand:
326

327 328
       * one return address, on the stack,
       * one return value, in Node.
329

330 331 332 333 334
the canned code for this heap check failure just pushes Node on the
stack, saying 'EnterGHC' to return.  The scheduler will return by
entering the top value on the stack, which in turn will return through
the return address, getting us back to where we were.  This is
therefore only valid if the return value is *lifted* (just being
335
boxed isn't good enough).
336

337 338 339
For primitive returns, we have an unlifted value in some register
(either R1 or FloatReg1 or DblReg1).  This means using specialised
heap-check code for these cases.
340

341 342
\begin{code}
altHeapCheck 
343 344 345 346 347
    :: AltType	-- PolyAlt, PrimAlt, AlgAlt, but *not* UbxTupAlt
		--	(Unboxed tuples are dealt with by ubxTupleHeapCheck)
    -> Code	-- Continuation
    -> Code
altHeapCheck alt_type code
348 349 350 351 352 353 354 355
  = initHeapUsage $ \ hpHw -> do
	{ codeOnly $ do
	     { do_checks 0 {- no stack chk -} hpHw
			 noStmts {- nothign to save -}
			 (rts_label alt_type)
	     ; tickyAllocHeap hpHw }
	; setRealHp hpHw
	; code }
356
  where
357
    rts_label PolyAlt = CmmLit (CmmLabel (mkCmmCodeLabel rtsPackageId (fsLit "stg_gc_unpt_r1")))
358 359 360 361 362 363 364
      	-- Do *not* enter R1 after a heap check in
	-- a polymorphic case.  It might be a function
	-- and the entry code for a function (currently)
	-- applies it
	--
	-- However R1 is guaranteed to be a pointer

Ian Lynagh's avatar
Ian Lynagh committed
365
    rts_label (AlgAlt _) = stg_gc_enter1
366
	-- Enter R1 after the heap check; it's a pointer
367
 	
368 369 370
    rts_label (PrimAlt tc)
      = CmmLit $ CmmLabel $ 
	case primRepToCgRep (tyConPrimRep tc) of
371 372 373 374
	  VoidArg   -> mkCmmCodeLabel rtsPackageId (fsLit "stg_gc_noregs")
	  FloatArg  -> mkCmmCodeLabel rtsPackageId (fsLit "stg_gc_f1")
	  DoubleArg -> mkCmmCodeLabel rtsPackageId (fsLit "stg_gc_d1")
	  LongArg   -> mkCmmCodeLabel rtsPackageId (fsLit "stg_gc_l1")
375
				-- R1 is boxed but unlifted: 
376
	  PtrArg    -> mkCmmCodeLabel rtsPackageId (fsLit "stg_gc_unpt_r1")
377
				-- R1 is unboxed:
378
	  NonPtrArg -> mkCmmCodeLabel rtsPackageId (fsLit "stg_gc_unbx_r1")
379 380 381 382

    rts_label (UbxTupAlt _) = panic "altHeapCheck"
\end{code}

383

384 385 386 387 388 389 390 391
Unboxed tuple alternatives and let-no-escapes (the two most annoying
constructs to generate code for!)  For unboxed tuple returns, there
are an arbitrary number of possibly unboxed return values, some of
which will be in registers, and the others will be on the stack.  We
always organise the stack-resident fields into pointers &
non-pointers, and pass the number of each to the heap check code.

\begin{code}
392
unbxTupleHeapCheck 
393 394 395 396
	:: [(Id, GlobalReg)]	-- Live registers
	-> WordOff	-- no. of stack slots containing ptrs
	-> WordOff	-- no. of stack slots containing nonptrs
	-> CmmStmts	-- code to insert in the failure path
397 398 399 400
	-> Code
	-> Code

unbxTupleHeapCheck regs ptrs nptrs fail_code code
401 402
  -- We can't manage more than 255 pointers/non-pointers 
  -- in a generic heap check.
403
  | ptrs > 255 || nptrs > 255 = panic "altHeapCheck"
404 405 406 407 408 409 410
  | otherwise 
  = initHeapUsage $ \ hpHw -> do
	{ codeOnly $ do { do_checks 0 {- no stack check -} hpHw
				    full_fail_code rts_label
			; tickyAllocHeap hpHw }
	; setRealHp hpHw
	; code }
411
  where
412
    full_fail_code  = fail_code `plusStmts` oneStmt assign_liveness
413
    assign_liveness = CmmAssign (CmmGlobal (VanillaReg 9 VNonGcPtr)) 	-- Ho ho ho!
414 415
				(CmmLit (mkWordCLit liveness))
    liveness 	    = mkRegLiveness regs ptrs nptrs
416
    rts_label	    = CmmLit (CmmLabel (mkCmmCodeLabel rtsPackageId (fsLit "stg_gc_ut")))
417

418 419
\end{code}

420 421 422 423 424 425 426 427 428 429 430 431 432 433 434

%************************************************************************
%*									*
		Heap/Stack Checks.
%*									*
%************************************************************************

When failing a check, we save a return address on the stack and
jump to a pre-compiled code fragment that saves the live registers
and returns to the scheduler.

The return address in most cases will be the beginning of the basic
block in which the check resides, since we need to perform the check
again on re-entry because someone else might have stolen the resource
in the meantime.
435 436

\begin{code}
437 438 439 440 441 442
do_checks :: WordOff	-- Stack headroom
	  -> WordOff	-- Heap  headroom
	  -> CmmStmts	-- Assignments to perform on failure
	  -> CmmExpr	-- Rts address to jump to on failure
	  -> Code
do_checks 0 0 _ _   = nopC
443 444 445 446 447 448

do_checks _ hp _ _
  | hp > bLOCKS_PER_MBLOCK * bLOCK_SIZE_W
  = sorry (unlines [
            "Trying to allocate more than " ++ show (bLOCKS_PER_MBLOCK * bLOCK_SIZE) ++ " bytes.", 
            "",
Simon Marlow's avatar
Simon Marlow committed
449
            "See: http://hackage.haskell.org/trac/ghc/ticket/4505",
450 451 452
            "Suggestion: read data from a file instead of having large static data",
            "structures in the code."])

453 454 455 456 457 458
do_checks stk hp reg_save_code rts_lbl
  = do_checks' (CmmLit (mkIntCLit (stk*wORD_SIZE))) 
	       (CmmLit (mkIntCLit (hp*wORD_SIZE)))
	 (stk /= 0) (hp /= 0) reg_save_code rts_lbl

-- The offsets are now in *bytes*
Ian Lynagh's avatar
Ian Lynagh committed
459
do_checks' :: CmmExpr -> CmmExpr -> Bool -> Bool -> CmmStmts -> CmmExpr -> Code
460 461 462
do_checks' stk_expr hp_expr stk_nonzero hp_nonzero reg_save_code rts_lbl
  = do	{ doGranAllocate hp_expr

463 464 465
        -- The failure block: this saves the registers and jumps to
        -- the appropriate RTS stub.
        ; exit_blk_id <- forkLabelledCode $ do {
466 467 468
			; emitStmts reg_save_code
			; stmtC (CmmJump rts_lbl []) }

469 470 471 472 473 474 475 476 477 478 479
	-- In the case of a heap-check failure, we must also set
	-- HpAlloc.  NB. HpAlloc is *only* set if Hp has been
	-- incremented by the heap check, it must not be set in the
	-- event that a stack check failed, because the RTS stub will
	-- retreat Hp by HpAlloc.
	; hp_blk_id <- if hp_nonzero
                          then forkLabelledCode $ do
				  stmtC (CmmAssign (CmmGlobal HpAlloc) hp_expr)
				  stmtC (CmmBranch exit_blk_id)
                          else return exit_blk_id

480 481 482
	-- Check for stack overflow *FIRST*; otherwise
	-- we might bumping Hp and then failing stack oflo
	; whenC stk_nonzero
483
		(stmtC (CmmCondBranch stk_oflo exit_blk_id))
484 485 486 487

	; whenC hp_nonzero
		(stmtsC [CmmAssign hpReg 
				(cmmOffsetExprB (CmmReg hpReg) hp_expr),
488
		        CmmCondBranch hp_oflo hp_blk_id])
489 490 491 492 493 494 495 496 497 498 499 500
		-- Bump heap pointer, and test for heap exhaustion
		-- Note that we don't move the heap pointer unless the 
		-- stack check succeeds.  Otherwise we might end up
		-- with slop at the end of the current block, which can 
		-- confuse the LDV profiler.
    }
  where
	-- Stk overflow if (Sp - stk_bytes < SpLim)
    stk_oflo = CmmMachOp mo_wordULt 
		  [CmmMachOp mo_wordSub [CmmReg spReg, stk_expr],
		   CmmReg (CmmGlobal SpLim)]

Michael D. Adams's avatar
Michael D. Adams committed
501
	-- Hp overflow if (Hp > HpLim)
502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518
	-- (Hp has been incremented by now)
	-- HpLim points to the LAST WORD of valid allocation space.
    hp_oflo = CmmMachOp mo_wordUGt 
		  [CmmReg hpReg, CmmReg (CmmGlobal HpLim)]
\end{code}

%************************************************************************
%*									*
     Generic Heap/Stack Checks - used in the RTS
%*									*
%************************************************************************

\begin{code}
hpChkGen :: CmmExpr -> CmmExpr -> CmmExpr -> Code
hpChkGen bytes liveness reentry
  = do_checks' (CmmLit (mkIntCLit 0)) bytes False True assigns stg_gc_gen
  where
519 520
    assigns = mkStmts [ mk_vanilla_assignment 9 liveness,
	    		mk_vanilla_assignment 10 reentry ]
521 522 523 524 525 526 527 528 529 530 531 532

-- a heap check where R1 points to the closure to enter on return, and
-- we want to assign to Sp[0] on failure (used in AutoApply.cmm:BUILD_PAP).
hpChkNodePointsAssignSp0 :: CmmExpr -> CmmExpr -> Code
hpChkNodePointsAssignSp0 bytes sp0
  = do_checks' (CmmLit (mkIntCLit 0)) bytes False True assign stg_gc_enter1
  where assign = oneStmt (CmmStore (CmmReg spReg) sp0)

stkChkGen :: CmmExpr -> CmmExpr -> CmmExpr -> Code
stkChkGen bytes liveness reentry
  = do_checks' bytes (CmmLit (mkIntCLit 0)) True False assigns stg_gc_gen
  where
533 534 535 536 537 538
    assigns = mkStmts [ mk_vanilla_assignment 9 liveness,
	    		mk_vanilla_assignment 10 reentry ]

mk_vanilla_assignment :: Int -> CmmExpr -> CmmStmt
mk_vanilla_assignment n e
  = CmmAssign (CmmGlobal (VanillaReg n (vgcFlag (cmmExprType e)))) e
539 540 541 542 543

stkChkNodePoints :: CmmExpr -> Code
stkChkNodePoints bytes
  = do_checks' bytes (CmmLit (mkIntCLit 0)) True False noStmts stg_gc_enter1

Ian Lynagh's avatar
Ian Lynagh committed
544
stg_gc_gen :: CmmExpr
545
stg_gc_gen = CmmLit (CmmLabel (mkCmmCodeLabel rtsPackageId (fsLit "stg_gc_gen")))
Ian Lynagh's avatar
Ian Lynagh committed
546
stg_gc_enter1 :: CmmExpr
547
stg_gc_enter1 = CmmReg (CmmGlobal GCEnter1)
548 549 550 551 552 553 554 555 556 557 558 559 560 561
\end{code}

%************************************************************************
%*									*
\subsection[initClosure]{Initialise a dynamic closure}
%*									*
%************************************************************************

@allocDynClosure@ puts the thing in the heap, and modifies the virtual Hp
to account for this.

\begin{code}
allocDynClosure
	:: ClosureInfo
562 563
	-> CmmExpr 		-- Cost Centre to stick in the object
	-> CmmExpr 		-- Cost Centre to blame for this alloc
564 565
				-- (usually the same; sometimes "OVERHEAD")

566 567 568
	-> [(CmmExpr, VirtualHpOffset)]	-- Offsets from start of the object
					-- ie Info ptr has offset zero.
	-> FCode VirtualHpOffset	-- Returns virt offset of object
569

Ian Lynagh's avatar
Ian Lynagh committed
570
allocDynClosure cl_info use_cc _blame_cc amodes_with_offsets
571
  = do	{ virt_hp <- getVirtHp
572 573

	-- FIND THE OFFSET OF THE INFO-PTR WORD
574 575 576 577 578 579
	; let	info_offset = virt_hp + 1
		-- info_offset is the VirtualHpOffset of the first
		-- word of the new object
		-- Remember, virtHp points to last allocated word, 
		-- ie 1 *before* the info-ptr word of new object.

580
		info_ptr = CmmLit (CmmLabel (infoTableLabelFromCI cl_info))
581 582
		hdr_w_offsets = initDynHdr info_ptr use_cc `zip` [0..]

583
	-- SAY WHAT WE ARE ABOUT TO DO
584
	; profDynAlloc cl_info use_cc	
585
        ; tickyDynAlloc cl_info
586

587 588 589
	-- ALLOCATE THE OBJECT
	; base <- getHpRelOffset info_offset
	; hpStore base (hdr_w_offsets ++ amodes_with_offsets)
590

591 592 593
	-- BUMP THE VIRTUAL HEAP POINTER
	; setVirtHp (virt_hp + closureSize cl_info)
	
594
	-- RETURN PTR TO START OF OBJECT
595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616
	; returnFC info_offset }


initDynHdr :: CmmExpr 
	   -> CmmExpr		-- Cost centre to put in object
	   -> [CmmExpr]
initDynHdr info_ptr cc
  =  [info_ptr]
     	-- ToDo: Gransim stuff
	-- ToDo: Parallel stuff
  ++ dynProfHdr cc
	-- No ticky header

hpStore :: CmmExpr -> [(CmmExpr, VirtualHpOffset)] -> Code
-- Store the item (expr,off) in base[off]
hpStore base es
  = stmtsC [ CmmStore (cmmOffsetW base off) val 
	   | (val, off) <- es ]

emitSetDynHdr :: CmmExpr -> CmmExpr -> CmmExpr -> Code
emitSetDynHdr base info_ptr ccs 
  = hpStore base (zip (initDynHdr info_ptr ccs) [0..])
617
\end{code}