CmmOpt.hs 20.2 KB
Newer Older
1 2 3 4 5 6 7
{-# OPTIONS -w #-}
-- The above warning supression flag is a temporary kludge.
-- While working on this module you are encouraged to remove it and fix
-- any warnings in the module. See
--     http://hackage.haskell.org/trac/ghc/wiki/Commentary/CodingStyle#Warnings
-- for details

8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24
-----------------------------------------------------------------------------
--
-- Cmm optimisation
--
-- (c) The University of Glasgow 2006
--
-----------------------------------------------------------------------------

module CmmOpt (
	cmmMiniInline,
	cmmMachOpFold,
	cmmLoopifyForC,
 ) where

#include "HsVersions.h"

import Cmm
25
import CmmExpr
Simon Marlow's avatar
Simon Marlow committed
26 27
import CmmUtils
import CLabel
28
import StaticFlags
29 30

import UniqFM
Simon Marlow's avatar
Simon Marlow committed
31
import Unique
32
import FastTypes
33 34
import Outputable

Simon Marlow's avatar
Simon Marlow committed
35 36 37
import Data.Bits
import Data.Word
import Data.Int
38 39 40 41

-- -----------------------------------------------------------------------------
-- The mini-inliner

Simon Marlow's avatar
Simon Marlow committed
42 43 44 45 46 47 48 49 50 51 52 53
{-
This pass inlines assignments to temporaries that are used just
once.  It works as follows:

  - count uses of each temporary
  - for each temporary that occurs just once:
	- attempt to push it forward to the statement that uses it
        - only push forward past assignments to other temporaries
	  (assumes that temporaries are single-assignment)
	- if we reach the statement that uses it, inline the rhs
	  and delete the original assignment.

54 55 56 57
[N.B. In the Quick C-- compiler, this optimization is achieved by a
 combination of two dataflow passes: forward substitution (peephole
 optimization) and dead-assignment elimination.  ---NR]

Simon Marlow's avatar
Simon Marlow committed
58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90
Possible generalisations: here is an example from factorial

Fac_zdwfac_entry:
    cmG:
        _smi = R2;
        if (_smi != 0) goto cmK;
        R1 = R3;
        jump I64[Sp];
    cmK:
        _smn = _smi * R3;
        R2 = _smi + (-1);
        R3 = _smn;
        jump Fac_zdwfac_info;

We want to inline _smi and _smn.  To inline _smn:

   - we must be able to push forward past assignments to global regs.
     We can do this if the rhs of the assignment we are pushing
     forward doesn't refer to the global reg being assigned to; easy
     to test.

To inline _smi:

   - It is a trivial replacement, reg for reg, but it occurs more than
     once.
   - We can inline trivial assignments even if the temporary occurs
     more than once, as long as we don't eliminate the original assignment
     (this doesn't help much on its own).
   - We need to be able to propagate the assignment forward through jumps;
     if we did this, we would find that it can be inlined safely in all
     its occurrences.
-}

91 92 93 94
countUses :: UserOfLocalRegs a => a -> UniqFM Int
countUses a = foldRegsUsed (\m r -> addToUFM m r (count m r + 1)) emptyUFM a
  where count m r = lookupWithDefaultUFM m (0::Int) r

95 96
cmmMiniInline :: [CmmBasicBlock] -> [CmmBasicBlock]
cmmMiniInline blocks = map do_inline blocks 
97 98
  where do_inline (BasicBlock id stmts)
          = BasicBlock id (cmmMiniInlineStmts (countUses blocks) stmts)
99 100 101

cmmMiniInlineStmts :: UniqFM Int -> [CmmStmt] -> [CmmStmt]
cmmMiniInlineStmts uses [] = []
102
cmmMiniInlineStmts uses (stmt@(CmmAssign (CmmLocal (LocalReg u _)) expr) : stmts)
103 104 105 106 107
        -- not used at all: just discard this assignment
  | Nothing <- lookupUFM uses u
  = cmmMiniInlineStmts uses stmts

        -- used once: try to inline at the use site
108 109 110 111 112 113 114 115 116 117 118
  | Just 1 <- lookupUFM uses u,
    Just stmts' <- lookForInline u expr stmts
  = 
#ifdef NCG_DEBUG
     trace ("nativeGen: inlining " ++ showSDoc (pprStmt stmt)) $
#endif
     cmmMiniInlineStmts uses stmts'

cmmMiniInlineStmts uses (stmt:stmts)
  = stmt : cmmMiniInlineStmts uses stmts

119 120 121 122 123 124 125 126 127 128 129
lookForInline u expr (stmt : rest)
  | Just 1 <- lookupUFM (countUses stmt) u, ok_to_inline
  = Just (inlineStmt u expr stmt : rest)

  | ok_to_skip
  = case lookForInline u expr rest of
           Nothing    -> Nothing
           Just stmts -> Just (stmt:stmts)

  | otherwise 
  = Nothing
130

131 132 133 134 135 136 137 138 139
  where
	-- we don't inline into CmmCall if the expression refers to global
	-- registers.  This is a HACK to avoid global registers clashing with
	-- C argument-passing registers, really the back-end ought to be able
	-- to handle it properly, but currently neither PprC nor the NCG can
	-- do it.  See also CgForeignCall:load_args_into_temps.
    ok_to_inline = case stmt of
		     CmmCall{} -> hasNoGlobalRegs expr
		     _ -> True
140

141 142 143 144 145 146 147 148 149 150
   -- We can skip over assignments to other tempoararies, because we
   -- know that expressions aren't side-effecting and temporaries are
   -- single-assignment.
    ok_to_skip = case stmt of
                 CmmNop -> True
                 CmmAssign (CmmLocal (LocalReg u' _)) rhs | u' /= u -> True
                 CmmAssign g@(CmmGlobal _) rhs -> not (g `regUsedIn` expr)
                 _other -> False


151 152 153
inlineStmt :: Unique -> CmmExpr -> CmmStmt -> CmmStmt
inlineStmt u a (CmmAssign r e) = CmmAssign r (inlineExpr u a e)
inlineStmt u a (CmmStore e1 e2) = CmmStore (inlineExpr u a e1) (inlineExpr u a e2)
154 155
inlineStmt u a (CmmCall target regs es srt ret)
   = CmmCall (infn target) regs es' srt ret
156
   where infn (CmmCallee fn cconv) = CmmCallee fn cconv
157
	 infn (CmmPrim p) = CmmPrim p
158
	 es' = [ (CmmHinted (inlineExpr u a e) hint) | (CmmHinted e hint) <- es ]
159 160 161 162 163 164
inlineStmt u a (CmmCondBranch e d) = CmmCondBranch (inlineExpr u a e) d
inlineStmt u a (CmmSwitch e d) = CmmSwitch (inlineExpr u a e) d
inlineStmt u a (CmmJump e d) = CmmJump (inlineExpr u a e) d
inlineStmt u a other_stmt = other_stmt

inlineExpr :: Unique -> CmmExpr -> CmmExpr -> CmmExpr
165
inlineExpr u a e@(CmmReg (CmmLocal (LocalReg u' _)))
166 167
  | u == u' = a
  | otherwise = e
168 169
inlineExpr u a e@(CmmRegOff (CmmLocal (LocalReg u' rep)) off)
  | u == u' = CmmMachOp (MO_Add width) [a, CmmLit (CmmInt (fromIntegral off) width)]
170
  | otherwise = e
171 172
  where
    width = typeWidth rep
173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196
inlineExpr u a (CmmLoad e rep) = CmmLoad (inlineExpr u a e) rep
inlineExpr u a (CmmMachOp op es) = CmmMachOp op (map (inlineExpr u a) es)
inlineExpr u a other_expr = other_expr

-- -----------------------------------------------------------------------------
-- MachOp constant folder

-- Now, try to constant-fold the MachOps.  The arguments have already
-- been optimized and folded.

cmmMachOpFold
    :: MachOp	    	-- The operation from an CmmMachOp
    -> [CmmExpr]   	-- The optimized arguments
    -> CmmExpr

cmmMachOpFold op arg@[CmmLit (CmmInt x rep)]
  = case op of
      MO_S_Neg r -> CmmLit (CmmInt (-x) rep)
      MO_Not r   -> CmmLit (CmmInt (complement x) rep)

	-- these are interesting: we must first narrow to the 
	-- "from" type, in order to truncate to the correct size.
	-- The final narrow/widen to the destination type
	-- is implicit in the CmmLit.
197 198 199
      MO_SF_Conv from to -> CmmLit (CmmFloat (fromInteger x) to)
      MO_SS_Conv from to -> CmmLit (CmmInt (narrowS from x) to)
      MO_UU_Conv from to -> CmmLit (CmmInt (narrowU from x) to)
200 201 202 203 204

      _ -> panic "cmmMachOpFold: unknown unary op"


-- Eliminate conversion NOPs
205 206
cmmMachOpFold (MO_SS_Conv rep1 rep2) [x] | rep1 == rep2 = x
cmmMachOpFold (MO_UU_Conv rep1 rep2) [x] | rep1 == rep2 = x
207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224

-- Eliminate nested conversions where possible
cmmMachOpFold conv_outer args@[CmmMachOp conv_inner [x]]
  | Just (rep1,rep2,signed1) <- isIntConversion conv_inner,
    Just (_,   rep3,signed2) <- isIntConversion conv_outer
  = case () of
	-- widen then narrow to the same size is a nop
      _ | rep1 < rep2 && rep1 == rep3 -> x
	-- Widen then narrow to different size: collapse to single conversion
	-- but remember to use the signedness from the widening, just in case
	-- the final conversion is a widen.
	| rep1 < rep2 && rep2 > rep3 ->
	    cmmMachOpFold (intconv signed1 rep1 rep3) [x]
	-- Nested widenings: collapse if the signedness is the same
	| rep1 < rep2 && rep2 < rep3 && signed1 == signed2 ->
	    cmmMachOpFold (intconv signed1 rep1 rep3) [x]
	-- Nested narrowings: collapse
	| rep1 > rep2 && rep2 > rep3 ->
225
	    cmmMachOpFold (MO_UU_Conv rep1 rep3) [x]
226 227 228
	| otherwise ->
	    CmmMachOp conv_outer args
  where
229
	isIntConversion (MO_UU_Conv rep1 rep2) 
230
	  = Just (rep1,rep2,False)
231
	isIntConversion (MO_SS_Conv rep1 rep2)
232 233 234
	  = Just (rep1,rep2,True)
	isIntConversion _ = Nothing

235 236
	intconv True  = MO_SS_Conv
	intconv False = MO_UU_Conv
237 238 239 240 241 242 243 244 245

-- ToDo: a narrow of a load can be collapsed into a narrow load, right?
-- but what if the architecture only supports word-sized loads, should
-- we do the transformation anyway?

cmmMachOpFold mop args@[CmmLit (CmmInt x xrep), CmmLit (CmmInt y _)]
  = case mop of
	-- for comparisons: don't forget to narrow the arguments before
	-- comparing, since they might be out of range.
246 247
    	MO_Eq r   -> CmmLit (CmmInt (if x_u == y_u then 1 else 0) wordWidth)
    	MO_Ne r   -> CmmLit (CmmInt (if x_u /= y_u then 1 else 0) wordWidth)
248

249 250 251 252
    	MO_U_Gt r -> CmmLit (CmmInt (if x_u >  y_u then 1 else 0) wordWidth)
    	MO_U_Ge r -> CmmLit (CmmInt (if x_u >= y_u then 1 else 0) wordWidth)
    	MO_U_Lt r -> CmmLit (CmmInt (if x_u <  y_u then 1 else 0) wordWidth)
    	MO_U_Le r -> CmmLit (CmmInt (if x_u <= y_u then 1 else 0) wordWidth)
253

254 255 256 257
    	MO_S_Gt r -> CmmLit (CmmInt (if x_s >  y_s then 1 else 0) wordWidth) 
    	MO_S_Ge r -> CmmLit (CmmInt (if x_s >= y_s then 1 else 0) wordWidth)
    	MO_S_Lt r -> CmmLit (CmmInt (if x_s <  y_s then 1 else 0) wordWidth)
    	MO_S_Le r -> CmmLit (CmmInt (if x_s <= y_s then 1 else 0) wordWidth)
258 259 260 261

    	MO_Add r -> CmmLit (CmmInt (x + y) r)
    	MO_Sub r -> CmmLit (CmmInt (x - y) r)
    	MO_Mul r -> CmmLit (CmmInt (x * y) r)
262 263
    	MO_U_Quot r | y /= 0 -> CmmLit (CmmInt (x_u `quot` y_u) r)
    	MO_U_Rem  r | y /= 0 -> CmmLit (CmmInt (x_u `rem`  y_u) r)
264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305
    	MO_S_Quot r | y /= 0 -> CmmLit (CmmInt (x `quot` y) r)
    	MO_S_Rem  r | y /= 0 -> CmmLit (CmmInt (x `rem` y) r)

	MO_And   r -> CmmLit (CmmInt (x .&. y) r)
	MO_Or    r -> CmmLit (CmmInt (x .|. y) r)
	MO_Xor   r -> CmmLit (CmmInt (x `xor` y) r)

        MO_Shl   r -> CmmLit (CmmInt (x `shiftL` fromIntegral y) r)
        MO_U_Shr r -> CmmLit (CmmInt (x_u `shiftR` fromIntegral y) r)
        MO_S_Shr r -> CmmLit (CmmInt (x `shiftR` fromIntegral y) r)

	other      -> CmmMachOp mop args

   where
	x_u = narrowU xrep x
	y_u = narrowU xrep y
	x_s = narrowS xrep x
	y_s = narrowS xrep y
	

-- When possible, shift the constants to the right-hand side, so that we
-- can match for strength reductions.  Note that the code generator will
-- also assume that constants have been shifted to the right when
-- possible.

cmmMachOpFold op [x@(CmmLit _), y]
   | not (isLit y) && isCommutableMachOp op 
   = cmmMachOpFold op [y, x]

-- Turn (a+b)+c into a+(b+c) where possible.  Because literals are
-- moved to the right, it is more likely that we will find
-- opportunities for constant folding when the expression is
-- right-associated.
--
-- ToDo: this appears to introduce a quadratic behaviour due to the
-- nested cmmMachOpFold.  Can we fix this?
--
-- Why do we check isLit arg1?  If arg1 is a lit, it means that arg2
-- is also a lit (otherwise arg1 would be on the right).  If we
-- put arg1 on the left of the rearranged expression, we'll get into a
-- loop:  (x1+x2)+x3 => x1+(x2+x3)  => (x2+x3)+x1 => x2+(x3+x1) ...
--
306 307 308
-- Also don't do it if arg1 is PicBaseReg, so that we don't separate the
-- PicBaseReg from the corresponding label (or label difference).
--
309
cmmMachOpFold mop1 [CmmMachOp mop2 [arg1,arg2], arg3]
310 311
   | mop1 == mop2 && isAssociativeMachOp mop1
     && not (isLit arg1) && not (isPicReg arg1)
312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332
   = cmmMachOpFold mop1 [arg1, cmmMachOpFold mop2 [arg2,arg3]]

-- Make a RegOff if we can
cmmMachOpFold (MO_Add _) [CmmReg reg, CmmLit (CmmInt n rep)]
  = CmmRegOff reg (fromIntegral (narrowS rep n))
cmmMachOpFold (MO_Add _) [CmmRegOff reg off, CmmLit (CmmInt n rep)]
  = CmmRegOff reg (off + fromIntegral (narrowS rep n))
cmmMachOpFold (MO_Sub _) [CmmReg reg, CmmLit (CmmInt n rep)]
  = CmmRegOff reg (- fromIntegral (narrowS rep n))
cmmMachOpFold (MO_Sub _) [CmmRegOff reg off, CmmLit (CmmInt n rep)]
  = CmmRegOff reg (off - fromIntegral (narrowS rep n))

-- Fold label(+/-)offset into a CmmLit where possible

cmmMachOpFold (MO_Add _) [CmmLit (CmmLabel lbl), CmmLit (CmmInt i rep)]
  = CmmLit (CmmLabelOff lbl (fromIntegral (narrowU rep i)))
cmmMachOpFold (MO_Add _) [CmmLit (CmmInt i rep), CmmLit (CmmLabel lbl)]
  = CmmLit (CmmLabelOff lbl (fromIntegral (narrowU rep i)))
cmmMachOpFold (MO_Sub _) [CmmLit (CmmLabel lbl), CmmLit (CmmInt i rep)]
  = CmmLit (CmmLabelOff lbl (fromIntegral (negate (narrowU rep i))))

333

334 335 336 337 338 339
-- Comparison of literal with widened operand: perform the comparison
-- at the smaller width, as long as the literal is within range.

-- We can't do the reverse trick, when the operand is narrowed:
-- narrowing throws away bits from the operand, there's no way to do
-- the same comparison at the larger size.
340 341 342 343 344

#if i386_TARGET_ARCH || x86_64_TARGET_ARCH
-- powerPC NCG has a TODO for I8/I16 comparisons, so don't try

cmmMachOpFold cmp [CmmMachOp conv [x], CmmLit (CmmInt i _)]
345 346 347 348 349 350 351 352
  |     -- if the operand is widened:
    Just (rep, signed, narrow_fn) <- maybe_conversion conv,
        -- and this is a comparison operation:
    Just narrow_cmp <- maybe_comparison cmp rep signed,
        -- and the literal fits in the smaller size:
    i == narrow_fn rep i
        -- then we can do the comparison at the smaller size
  = cmmMachOpFold narrow_cmp [x, CmmLit (CmmInt i rep)]
353
 where
354
    maybe_conversion (MO_UU_Conv from to)
355 356
        | to > from
        = Just (from, False, narrowU)
357 358
    maybe_conversion (MO_SS_Conv from to)
        | to > from
359
        = Just (from, True, narrowS)
360

Simon Marlow's avatar
Simon Marlow committed
361 362
        -- don't attempt to apply this optimisation when the source
        -- is a float; see #1916
363 364
    maybe_conversion _ = Nothing
    
365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381
        -- careful (#2080): if the original comparison was signed, but
        -- we were doing an unsigned widen, then we must do an
        -- unsigned comparison at the smaller size.
    maybe_comparison (MO_U_Gt _) rep _     = Just (MO_U_Gt rep)
    maybe_comparison (MO_U_Ge _) rep _     = Just (MO_U_Ge rep)
    maybe_comparison (MO_U_Lt _) rep _     = Just (MO_U_Lt rep)
    maybe_comparison (MO_U_Le _) rep _     = Just (MO_U_Le rep)
    maybe_comparison (MO_Eq   _) rep _     = Just (MO_Eq   rep)
    maybe_comparison (MO_S_Gt _) rep True  = Just (MO_S_Gt rep)
    maybe_comparison (MO_S_Ge _) rep True  = Just (MO_S_Ge rep)
    maybe_comparison (MO_S_Lt _) rep True  = Just (MO_S_Lt rep)
    maybe_comparison (MO_S_Le _) rep True  = Just (MO_S_Le rep)
    maybe_comparison (MO_S_Gt _) rep False = Just (MO_U_Gt rep)
    maybe_comparison (MO_S_Ge _) rep False = Just (MO_U_Ge rep)
    maybe_comparison (MO_S_Lt _) rep False = Just (MO_U_Lt rep)
    maybe_comparison (MO_S_Le _) rep False = Just (MO_U_Le rep)
    maybe_comparison _ _ _ = Nothing
382 383 384

#endif

385 386 387 388 389 390 391 392 393 394 395 396 397 398
-- We can often do something with constants of 0 and 1 ...

cmmMachOpFold mop args@[x, y@(CmmLit (CmmInt 0 _))]
  = case mop of
    	MO_Add   r -> x
    	MO_Sub   r -> x
    	MO_Mul   r -> y
    	MO_And   r -> y
    	MO_Or    r -> x
    	MO_Xor   r -> x
    	MO_Shl   r -> x
    	MO_S_Shr r -> x
    	MO_U_Shr r -> x
        MO_Ne    r | isComparisonExpr x -> x
399
	MO_Eq    r | Just x' <- maybeInvertCmmExpr x -> x'
400 401
	MO_U_Gt  r | isComparisonExpr x -> x
	MO_S_Gt  r | isComparisonExpr x -> x
402 403 404 405
	MO_U_Lt  r | isComparisonExpr x -> CmmLit (CmmInt 0 wordWidth)
	MO_S_Lt  r | isComparisonExpr x -> CmmLit (CmmInt 0 wordWidth)
	MO_U_Ge  r | isComparisonExpr x -> CmmLit (CmmInt 1 wordWidth)
	MO_S_Ge  r | isComparisonExpr x -> CmmLit (CmmInt 1 wordWidth)
406 407
	MO_U_Le  r | Just x' <- maybeInvertCmmExpr x -> x'
	MO_S_Le  r | Just x' <- maybeInvertCmmExpr x -> x'
408 409 410 411 412 413 414 415 416
    	other    -> CmmMachOp mop args

cmmMachOpFold mop args@[x, y@(CmmLit (CmmInt 1 rep))]
  = case mop of
    	MO_Mul    r -> x
    	MO_S_Quot r -> x
    	MO_U_Quot r -> x
    	MO_S_Rem  r -> CmmLit (CmmInt 0 rep)
    	MO_U_Rem  r -> CmmLit (CmmInt 0 rep)
417
        MO_Ne    r | Just x' <- maybeInvertCmmExpr x -> x'
418
	MO_Eq    r | isComparisonExpr x -> x
419 420
	MO_U_Lt  r | Just x' <- maybeInvertCmmExpr x -> x'
	MO_S_Lt  r | Just x' <- maybeInvertCmmExpr x -> x'
421 422 423 424
	MO_U_Gt  r | isComparisonExpr x -> CmmLit (CmmInt 0 wordWidth)
	MO_S_Gt  r | isComparisonExpr x -> CmmLit (CmmInt 0 wordWidth)
	MO_U_Le  r | isComparisonExpr x -> CmmLit (CmmInt 1 wordWidth)
	MO_S_Le  r | isComparisonExpr x -> CmmLit (CmmInt 1 wordWidth)
425 426 427 428 429 430 431 432 433
	MO_U_Ge  r | isComparisonExpr x -> x
	MO_S_Ge  r | isComparisonExpr x -> x
    	other       -> CmmMachOp mop args

-- Now look for multiplication/division by powers of 2 (integers).

cmmMachOpFold mop args@[x, y@(CmmLit (CmmInt n _))]
  = case mop of
    	MO_Mul rep
434 435
	   | Just p <- exactLog2 n ->
                 CmmMachOp (MO_Shl rep) [x, CmmLit (CmmInt p rep)]
436 437 438
    	MO_U_Quot rep
	   | Just p <- exactLog2 n ->
                 CmmMachOp (MO_U_Shr rep) [x, CmmLit (CmmInt p rep)]
439
    	MO_S_Quot rep
440 441 442 443
	   | Just p <- exactLog2 n, 
	     CmmReg _ <- x ->	-- We duplicate x below, hence require
				-- it is a reg.  FIXME: remove this restriction.
		-- shift right is not the same as quot, because it rounds
Simon Marlow's avatar
Simon Marlow committed
444
		-- to minus infinity, whereasq quot rounds toward zero.
445 446 447 448 449 450 451 452 453 454 455 456 457 458
		-- To fix this up, we add one less than the divisor to the
		-- dividend if it is a negative number.
		--
		-- to avoid a test/jump, we use the following sequence:
		-- 	x1 = x >> word_size-1  (all 1s if -ve, all 0s if +ve)
		--      x2 = y & (divisor-1)
		--      result = (x+x2) >>= log2(divisor)
		-- this could be done a bit more simply using conditional moves,
		-- but we're processor independent here.
		--
		-- we optimise the divide by 2 case slightly, generating
		--      x1 = x >> word_size-1  (unsigned)
		--      return = (x + x1) >>= log2(divisor)
		let 
459
		    bits = fromIntegral (widthInBits rep) - 1
460 461 462 463 464 465 466 467
		    shr = if p == 1 then MO_U_Shr rep else MO_S_Shr rep
		    x1 = CmmMachOp shr [x, CmmLit (CmmInt bits rep)]
		    x2 = if p == 1 then x1 else
			 CmmMachOp (MO_And rep) [x1, CmmLit (CmmInt (n-1) rep)]
		    x3 = CmmMachOp (MO_Add rep) [x, x2]
		in
                CmmMachOp (MO_S_Shr rep) [x3, CmmLit (CmmInt p rep)]
    	other
468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485
           -> unchanged
    where
       unchanged = CmmMachOp mop args

-- Anything else is just too hard.

cmmMachOpFold mop args = CmmMachOp mop args

-- -----------------------------------------------------------------------------
-- exactLog2

-- This algorithm for determining the $\log_2$ of exact powers of 2 comes
-- from GCC.  It requires bit manipulation primitives, and we use GHC
-- extensions.  Tough.
-- 
-- Used to be in MachInstrs --SDM.
-- ToDo: remove use of unboxery --SDM.

486 487 488 489 490
-- Unboxery removed in favor of FastInt; but is the function supposed to fail
-- on inputs >= 2147483648, or was that just an implementation artifact?
-- And is this speed-critical, or can we just use Integer operations
-- (including Data.Bits)?
--  --Isaac Dupree
491 492

exactLog2 :: Integer -> Maybe Integer
493 494
exactLog2 x_
  = if (x_ <= 0 || x_ >= 2147483648) then
495 496
       Nothing
    else
497 498
       case iUnbox (fromInteger x_) of { x ->
       if (x `bitAndFastInt` negateFastInt x) /=# x then
499 500
	  Nothing
       else
501
	  Just (toInteger (iBox (pow2 x)))
502 503
       }
  where
504 505
    pow2 x | x ==# _ILIT(1) = _ILIT(0)
           | otherwise = _ILIT(1) +# pow2 (x `shiftR_FastInt` _ILIT(1))
506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533


-- -----------------------------------------------------------------------------
-- Loopify for C

{-
 This is a simple pass that replaces tail-recursive functions like this:

   fac() {
     ...
     jump fac();
   }

 with this:

  fac() {
   L:
     ...
     goto L;
  }

  the latter generates better C code, because the C compiler treats it
  like a loop, and brings full loop optimisation to bear.

  In my measurements this makes little or no difference to anything
  except factorial, but what the hell.
-}

534
cmmLoopifyForC :: RawCmmTop -> RawCmmTop
535 536
cmmLoopifyForC p@(CmmProc info entry_lbl []
                 (ListGraph blocks@(BasicBlock top_id _ : _)))
537 538 539
  | null info = p  -- only if there's an info table, ignore case alts
  | otherwise =  
--  pprTrace "jump_lbl" (ppr jump_lbl <+> ppr entry_lbl) $
540
  CmmProc info entry_lbl [] (ListGraph blocks')
541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562
  where blocks' = [ BasicBlock id (map do_stmt stmts)
		  | BasicBlock id stmts <- blocks ]

        do_stmt (CmmJump (CmmLit (CmmLabel lbl)) _) | lbl == jump_lbl
		= CmmBranch top_id
	do_stmt stmt = stmt

	jump_lbl | tablesNextToCode = entryLblToInfoLbl entry_lbl
		 | otherwise        = entry_lbl

cmmLoopifyForC top = top

-- -----------------------------------------------------------------------------
-- Utils

isLit (CmmLit _) = True
isLit _          = False

isComparisonExpr :: CmmExpr -> Bool
isComparisonExpr (CmmMachOp op _) = isComparisonMachOp op
isComparisonExpr _other 	    = False

563
isPicReg (CmmReg (CmmGlobal PicBaseReg)) = True
564
isPicReg _ = False
565