MachCodeGen.hs 156 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
-----------------------------------------------------------------------------
--
-- Generating machine code (instruction selection)
--
-- (c) The University of Glasgow 1996-2004
--
-----------------------------------------------------------------------------

-- This is a big module, but, if you pay attention to
-- (a) the sectioning, (b) the type signatures, and
-- (c) the #if blah_TARGET_ARCH} things, the
-- structure should not be too overwhelming.

14
{-# OPTIONS -w #-}
15
16
17
-- The above warning supression flag is a temporary kludge.
-- While working on this module you are encouraged to remove it and fix
-- any warnings in the module. See
Ian Lynagh's avatar
Ian Lynagh committed
18
--     http://hackage.haskell.org/trac/ghc/wiki/Commentary/CodingStyle#Warnings
19
20
-- for details

21
22
23
24
module MachCodeGen ( cmmTopCodeGen, InstrBlock ) where

#include "HsVersions.h"
#include "nativeGen/NCG.h"
25
#include "MachDeps.h"
26
27
28
29
30

-- NCG stuff:
import MachInstrs
import MachRegs
import NCGMonad
31
import PositionIndependentCode
32
import RegAllocInfo ( mkBranchInstr )
33
34
35
36
37
38

-- Our intermediate code:
import PprCmm		( pprExpr )
import Cmm
import MachOp
import CLabel
39
import ClosureInfo	( C_SRT(..) )
40
41

-- The rest:
42
import StaticFlags	( opt_PIC )
43
44
45
46
47
48
import ForeignCall	( CCallConv(..) )
import OrdList
import Pretty
import Outputable
import FastString
import FastTypes	( isFastTrue )
49
import Constants	( wORD_SIZE )
50
51

#ifdef DEBUG
52
import Outputable	( assertPanic )
Simon Marlow's avatar
Simon Marlow committed
53
import Debug.Trace	( trace )
54
#endif
55
import Debug.Trace	( trace )
56
57

import Control.Monad	( mapAndUnzipM )
Simon Marlow's avatar
Simon Marlow committed
58
59
60
import Data.Maybe	( fromJust )
import Data.Bits
import Data.Word
61
import Data.Int
62
63
64
65
66
67
68
69
70
71
72

-- -----------------------------------------------------------------------------
-- Top-level of the instruction selector

-- | 'InstrBlock's are the insn sequences generated by the insn selectors.
-- They are really trees of insns to facilitate fast appending, where a
-- left-to-right traversal (pre-order?) yields the insns in the correct
-- order.

type InstrBlock = OrdList Instr

73
cmmTopCodeGen :: RawCmmTop -> NatM [NatCmmTop]
74
cmmTopCodeGen (CmmProc info lab params (ListGraph blocks)) = do
75
  (nat_blocks,statics) <- mapAndUnzipM basicBlockCodeGen blocks
76
  picBaseMb <- getPicBaseMaybeNat
77
  let proc = CmmProc info lab params (ListGraph $ concat nat_blocks)
78
79
80
81
82
      tops = proc : concat statics
  case picBaseMb of
      Just picBase -> initializePicBase picBase tops
      Nothing -> return tops
  
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
cmmTopCodeGen (CmmData sec dat) = do
  return [CmmData sec dat]  -- no translation, we just use CmmStatic

basicBlockCodeGen :: CmmBasicBlock -> NatM ([NatBasicBlock],[NatCmmTop])
basicBlockCodeGen (BasicBlock id stmts) = do
  instrs <- stmtsToInstrs stmts
  -- code generation may introduce new basic block boundaries, which
  -- are indicated by the NEWBLOCK instruction.  We must split up the
  -- instruction stream into basic blocks again.  Also, we extract
  -- LDATAs here too.
  let
	(top,other_blocks,statics) = foldrOL mkBlocks ([],[],[]) instrs
	
	mkBlocks (NEWBLOCK id) (instrs,blocks,statics) 
	  = ([], BasicBlock id instrs : blocks, statics)
	mkBlocks (LDATA sec dat) (instrs,blocks,statics) 
	  = (instrs, blocks, CmmData sec dat:statics)
	mkBlocks instr (instrs,blocks,statics)
	  = (instr:instrs, blocks, statics)
  -- in
  return (BasicBlock id top : other_blocks, statics)

stmtsToInstrs :: [CmmStmt] -> NatM InstrBlock
stmtsToInstrs stmts
   = do instrss <- mapM stmtToInstrs stmts
        return (concatOL instrss)

stmtToInstrs :: CmmStmt -> NatM InstrBlock
stmtToInstrs stmt = case stmt of
    CmmNop	   -> return nilOL
    CmmComment s   -> return (unitOL (COMMENT s))

    CmmAssign reg src
      | isFloatingRep kind -> assignReg_FltCode kind reg src
117
118
119
#if WORD_SIZE_IN_BITS==32
      | kind == I64 	   -> assignReg_I64Code      reg src
#endif
120
121
122
123
124
      | otherwise	   -> assignReg_IntCode kind reg src
	where kind = cmmRegRep reg

    CmmStore addr src
      | isFloatingRep kind -> assignMem_FltCode kind addr src
125
126
127
#if WORD_SIZE_IN_BITS==32
      | kind == I64 	 -> assignMem_I64Code      addr src
#endif
128
129
130
      | otherwise	 -> assignMem_IntCode kind addr src
	where kind = cmmExprRep src

131
    CmmCall target result_regs args _ _
132
       -> genCCall target result_regs args
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173

    CmmBranch id	  -> genBranch id
    CmmCondBranch arg id  -> genCondJump id arg
    CmmSwitch arg ids     -> genSwitch arg ids
    CmmJump arg params	  -> genJump arg

-- -----------------------------------------------------------------------------
-- General things for putting together code sequences

-- Expand CmmRegOff.  ToDo: should we do it this way around, or convert
-- CmmExprs into CmmRegOff?
mangleIndexTree :: CmmExpr -> CmmExpr
mangleIndexTree (CmmRegOff reg off)
  = CmmMachOp (MO_Add rep) [CmmReg reg, CmmLit (CmmInt (fromIntegral off) rep)]
  where rep = cmmRegRep reg

-- -----------------------------------------------------------------------------
--  Code gen for 64-bit arithmetic on 32-bit platforms

{-
Simple support for generating 64-bit code (ie, 64 bit values and 64
bit assignments) on 32-bit platforms.  Unlike the main code generator
we merely shoot for generating working code as simply as possible, and
pay little attention to code quality.  Specifically, there is no
attempt to deal cleverly with the fixed-vs-floating register
distinction; all values are generated into (pairs of) floating
registers, even if this would mean some redundant reg-reg moves as a
result.  Only one of the VRegUniques is returned, since it will be
of the VRegUniqueLo form, and the upper-half VReg can be determined
by applying getHiVRegFromLo to it.
-}

data ChildCode64 	-- a.k.a "Register64"
   = ChildCode64 
        InstrBlock 	-- code
        Reg	 	-- the lower 32-bit temporary which contains the
			-- result; use getHiVRegFromLo to find the other
			-- VRegUnique.  Rules of this simplified insn
			-- selection game are therefore that the returned
			-- Reg may be modified

174
#if WORD_SIZE_IN_BITS==32
175
176
assignMem_I64Code :: CmmExpr -> CmmExpr -> NatM InstrBlock
assignReg_I64Code :: CmmReg  -> CmmExpr -> NatM InstrBlock
177
178
179
#endif

#ifndef x86_64_TARGET_ARCH
180
iselExpr64        :: CmmExpr -> NatM ChildCode64
181
#endif
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199

-- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

#if i386_TARGET_ARCH

assignMem_I64Code addrTree valueTree = do
  Amode addr addr_code <- getAmode addrTree
  ChildCode64 vcode rlo <- iselExpr64 valueTree
  let 
        rhi = getHiVRegFromLo rlo

        -- Little-endian store
        mov_lo = MOV I32 (OpReg rlo) (OpAddr addr)
        mov_hi = MOV I32 (OpReg rhi) (OpAddr (fromJust (addrOffset addr 4)))
  -- in
  return (vcode `appOL` addr_code `snocOL` mov_lo `snocOL` mov_hi)


200
assignReg_I64Code (CmmLocal (LocalReg u_dst pk _)) valueTree = do
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
   ChildCode64 vcode r_src_lo <- iselExpr64 valueTree
   let 
         r_dst_lo = mkVReg u_dst I32
         r_dst_hi = getHiVRegFromLo r_dst_lo
         r_src_hi = getHiVRegFromLo r_src_lo
         mov_lo = MOV I32 (OpReg r_src_lo) (OpReg r_dst_lo)
         mov_hi = MOV I32 (OpReg r_src_hi) (OpReg r_dst_hi)
   -- in
   return (
        vcode `snocOL` mov_lo `snocOL` mov_hi
     )

assignReg_I64Code lvalue valueTree
   = panic "assignReg_I64Code(i386): invalid lvalue"

------------

iselExpr64 (CmmLit (CmmInt i _)) = do
  (rlo,rhi) <- getNewRegPairNat I32
  let
	r = fromIntegral (fromIntegral i :: Word32)
	q = fromIntegral ((fromIntegral i `shiftR` 32) :: Word32)
	code = toOL [
		MOV I32 (OpImm (ImmInteger r)) (OpReg rlo),
		MOV I32 (OpImm (ImmInteger q)) (OpReg rhi)
		]
  -- in
  return (ChildCode64 code rlo)

iselExpr64 (CmmLoad addrTree I64) = do
   Amode addr addr_code <- getAmode addrTree
   (rlo,rhi) <- getNewRegPairNat I32
   let 
        mov_lo = MOV I32 (OpAddr addr) (OpReg rlo)
        mov_hi = MOV I32 (OpAddr (fromJust (addrOffset addr 4))) (OpReg rhi)
   -- in
   return (
            ChildCode64 (addr_code `snocOL` mov_lo `snocOL` mov_hi) 
                        rlo
     )

242
iselExpr64 (CmmReg (CmmLocal (LocalReg vu I64 _)))
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
   = return (ChildCode64 nilOL (mkVReg vu I32))
         
-- we handle addition, but rather badly
iselExpr64 (CmmMachOp (MO_Add _) [e1, CmmLit (CmmInt i _)]) = do
   ChildCode64 code1 r1lo <- iselExpr64 e1
   (rlo,rhi) <- getNewRegPairNat I32
   let
	r = fromIntegral (fromIntegral i :: Word32)
	q = fromIntegral ((fromIntegral i `shiftR` 32) :: Word32)
	r1hi = getHiVRegFromLo r1lo
	code =  code1 `appOL`
		toOL [ MOV I32 (OpReg r1lo) (OpReg rlo),
		       ADD I32 (OpImm (ImmInteger r)) (OpReg rlo),
		       MOV I32 (OpReg r1hi) (OpReg rhi),
		       ADC I32 (OpImm (ImmInteger q)) (OpReg rhi) ]
   -- in
   return (ChildCode64 code rlo)

iselExpr64 (CmmMachOp (MO_Add _) [e1,e2]) = do
   ChildCode64 code1 r1lo <- iselExpr64 e1
   ChildCode64 code2 r2lo <- iselExpr64 e2
   (rlo,rhi) <- getNewRegPairNat I32
   let
	r1hi = getHiVRegFromLo r1lo
	r2hi = getHiVRegFromLo r2lo
	code =  code1 `appOL`
		code2 `appOL`
		toOL [ MOV I32 (OpReg r1lo) (OpReg rlo),
		       ADD I32 (OpReg r2lo) (OpReg rlo),
		       MOV I32 (OpReg r1hi) (OpReg rhi),
		       ADC I32 (OpReg r2hi) (OpReg rhi) ]
   -- in
   return (ChildCode64 code rlo)

277
278
279
280
281
282
283
284
285
286
287
iselExpr64 (CmmMachOp (MO_U_Conv _ I64) [expr]) = do
     fn <- getAnyReg expr
     r_dst_lo <-  getNewRegNat I32
     let r_dst_hi = getHiVRegFromLo r_dst_lo
         code = fn r_dst_lo
     return (
             ChildCode64 (code `snocOL` 
                          MOV I32 (OpImm (ImmInt 0)) (OpReg r_dst_hi))
                          r_dst_lo
            )

288
289
290
291
292
293
294
295
296
iselExpr64 expr
   = pprPanic "iselExpr64(i386)" (ppr expr)

#endif /* i386_TARGET_ARCH */

-- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

#if sparc_TARGET_ARCH

297
298
299
300
301
assignMem_I64Code addrTree valueTree = do
     Amode addr addr_code <- getAmode addrTree
     ChildCode64 vcode rlo <- iselExpr64 valueTree  
     (src, code) <- getSomeReg addrTree
     let 
302
303
         rhi = getHiVRegFromLo rlo
         -- Big-endian store
304
305
306
         mov_hi = ST I32 rhi (AddrRegImm src (ImmInt 0))
         mov_lo = ST I32 rlo (AddrRegImm src (ImmInt 4))
     return (vcode `appOL` code `snocOL` mov_hi `snocOL` mov_lo)
307

308
309
assignReg_I64Code (CmmLocal (LocalReg u_dst pk)) valueTree = do
     ChildCode64 vcode r_src_lo <- iselExpr64 valueTree    
310
     let 
311
         r_dst_lo = mkVReg u_dst pk
312
313
314
315
316
         r_dst_hi = getHiVRegFromLo r_dst_lo
         r_src_hi = getHiVRegFromLo r_src_lo
         mov_lo = mkMOV r_src_lo r_dst_lo
         mov_hi = mkMOV r_src_hi r_dst_hi
         mkMOV sreg dreg = OR False g0 (RIReg sreg) dreg
317
     return (vcode `snocOL` mov_hi `snocOL` mov_lo)
318
assignReg_I64Code lvalue valueTree
319
   = panic "assignReg_I64Code(sparc): invalid lvalue"
320
321
322
323


-- Don't delete this -- it's very handy for debugging.
--iselExpr64 expr 
324
--   | trace ("iselExpr64: " ++ showSDoc (ppr expr)) False
325
326
--   = panic "iselExpr64(???)"

327
328
329
iselExpr64 (CmmLoad addrTree I64) = do
     Amode (AddrRegReg r1 r2) addr_code <- getAmode addrTree
     rlo <- getNewRegNat I32
330
     let rhi = getHiVRegFromLo rlo
331
332
333
334
335
336
         mov_hi = LD I32 (AddrRegImm r1 (ImmInt 0)) rhi
         mov_lo = LD I32 (AddrRegImm r1 (ImmInt 4)) rlo
     return (
            ChildCode64 (addr_code `snocOL` mov_hi `snocOL` mov_lo) 
                         rlo
          )
337

338
339
iselExpr64 (CmmReg (CmmLocal (LocalReg uq I64))) = do
     r_dst_lo <-  getNewRegNat I32
340
     let r_dst_hi = getHiVRegFromLo r_dst_lo
341
         r_src_lo = mkVReg uq I32
342
343
344
345
         r_src_hi = getHiVRegFromLo r_src_lo
         mov_lo = mkMOV r_src_lo r_dst_lo
         mov_hi = mkMOV r_src_hi r_dst_hi
         mkMOV sreg dreg = OR False g0 (RIReg sreg) dreg
346
347
     return (
            ChildCode64 (toOL [mov_hi, mov_lo]) r_dst_lo
348
349
350
         )

iselExpr64 expr
351
   = pprPanic "iselExpr64(sparc)" (ppr expr)
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380

#endif /* sparc_TARGET_ARCH */

-- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

#if powerpc_TARGET_ARCH

getI64Amodes :: CmmExpr -> NatM (AddrMode, AddrMode, InstrBlock)
getI64Amodes addrTree = do
    Amode hi_addr addr_code <- getAmode addrTree
    case addrOffset hi_addr 4 of
        Just lo_addr -> return (hi_addr, lo_addr, addr_code)
        Nothing      -> do (hi_ptr, code) <- getSomeReg addrTree
                           return (AddrRegImm hi_ptr (ImmInt 0),
                                   AddrRegImm hi_ptr (ImmInt 4),
                                   code)

assignMem_I64Code addrTree valueTree = do
        (hi_addr, lo_addr, addr_code) <- getI64Amodes addrTree
	ChildCode64 vcode rlo <- iselExpr64 valueTree
	let 
		rhi = getHiVRegFromLo rlo

		-- Big-endian store
		mov_hi = ST I32 rhi hi_addr
		mov_lo = ST I32 rlo lo_addr
	-- in
	return (vcode `appOL` addr_code `snocOL` mov_lo `snocOL` mov_hi)

381
assignReg_I64Code (CmmLocal (LocalReg u_dst pk _)) valueTree = do
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
   ChildCode64 vcode r_src_lo <- iselExpr64 valueTree
   let 
         r_dst_lo = mkVReg u_dst I32
         r_dst_hi = getHiVRegFromLo r_dst_lo
         r_src_hi = getHiVRegFromLo r_src_lo
         mov_lo = MR r_dst_lo r_src_lo
         mov_hi = MR r_dst_hi r_src_hi
   -- in
   return (
        vcode `snocOL` mov_lo `snocOL` mov_hi
     )

assignReg_I64Code lvalue valueTree
   = panic "assignReg_I64Code(powerpc): invalid lvalue"


-- Don't delete this -- it's very handy for debugging.
--iselExpr64 expr 
--   | trace ("iselExpr64: " ++ showSDoc (pprCmmExpr expr)) False
--   = panic "iselExpr64(???)"

iselExpr64 (CmmLoad addrTree I64) = do
    (hi_addr, lo_addr, addr_code) <- getI64Amodes addrTree
    (rlo, rhi) <- getNewRegPairNat I32
    let mov_hi = LD I32 rhi hi_addr
        mov_lo = LD I32 rlo lo_addr
    return $ ChildCode64 (addr_code `snocOL` mov_lo `snocOL` mov_hi) 
                         rlo

411
iselExpr64 (CmmReg (CmmLocal (LocalReg vu I64 _)))
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
   = return (ChildCode64 nilOL (mkVReg vu I32))

iselExpr64 (CmmLit (CmmInt i _)) = do
  (rlo,rhi) <- getNewRegPairNat I32
  let
	half0 = fromIntegral (fromIntegral i :: Word16)
	half1 = fromIntegral ((fromIntegral i `shiftR` 16) :: Word16)
	half2 = fromIntegral ((fromIntegral i `shiftR` 32) :: Word16)
	half3 = fromIntegral ((fromIntegral i `shiftR` 48) :: Word16)
	
	code = toOL [
		LIS rlo (ImmInt half1),
		OR rlo rlo (RIImm $ ImmInt half0),
		LIS rhi (ImmInt half3),
		OR rlo rlo (RIImm $ ImmInt half2)
		]
  -- in
  return (ChildCode64 code rlo)

iselExpr64 (CmmMachOp (MO_Add _) [e1,e2]) = do
   ChildCode64 code1 r1lo <- iselExpr64 e1
   ChildCode64 code2 r2lo <- iselExpr64 e2
   (rlo,rhi) <- getNewRegPairNat I32
   let
	r1hi = getHiVRegFromLo r1lo
	r2hi = getHiVRegFromLo r2lo
	code =  code1 `appOL`
		code2 `appOL`
		toOL [ ADDC rlo r1lo r2lo,
		       ADDE rhi r1hi r2hi ]
   -- in
   return (ChildCode64 code rlo)

445
446
447
448
449
450
451
iselExpr64 (CmmMachOp (MO_U_Conv I32 I64) [expr]) = do
    (expr_reg,expr_code) <- getSomeReg expr
    (rlo, rhi) <- getNewRegPairNat I32
    let mov_hi = LI rhi (ImmInt 0)
        mov_lo = MR rlo expr_reg
    return $ ChildCode64 (expr_code `snocOL` mov_lo `snocOL` mov_hi)
                         rlo
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
iselExpr64 expr
   = pprPanic "iselExpr64(powerpc)" (ppr expr)

#endif /* powerpc_TARGET_ARCH */


-- -----------------------------------------------------------------------------
-- The 'Register' type

-- 'Register's passed up the tree.  If the stix code forces the register
-- to live in a pre-decided machine register, it comes out as @Fixed@;
-- otherwise, it comes out as @Any@, and the parent can decide which
-- register to put it in.

data Register
  = Fixed   MachRep Reg InstrBlock
  | Any	    MachRep (Reg -> InstrBlock)

swizzleRegisterRep :: Register -> MachRep -> Register
swizzleRegisterRep (Fixed _ reg code) rep = Fixed rep reg code
swizzleRegisterRep (Any _ codefn)     rep = Any rep codefn


475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
-- -----------------------------------------------------------------------------
-- Utils based on getRegister, below

-- The dual to getAnyReg: compute an expression into a register, but
-- we don't mind which one it is.
getSomeReg :: CmmExpr -> NatM (Reg, InstrBlock)
getSomeReg expr = do
  r <- getRegister expr
  case r of
    Any rep code -> do
	tmp <- getNewRegNat rep
	return (tmp, code tmp)
    Fixed _ reg code -> 
	return (reg, code)

490
491
492
493
494
-- -----------------------------------------------------------------------------
-- Grab the Reg for a CmmReg

getRegisterReg :: CmmReg -> Reg

495
getRegisterReg (CmmLocal (LocalReg u pk _))
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
  = mkVReg u pk

getRegisterReg (CmmGlobal mid)
  = case get_GlobalReg_reg_or_addr mid of
       Left (RealReg rrno) -> RealReg rrno
       _other -> pprPanic "getRegisterReg-memory" (ppr $ CmmGlobal mid)
          -- By this stage, the only MagicIds remaining should be the
          -- ones which map to a real machine register on this
          -- platform.  Hence ...


-- -----------------------------------------------------------------------------
-- Generate code to get a subtree into a Register

-- Don't delete this -- it's very handy for debugging.
--getRegister expr 
--   | trace ("getRegister: " ++ showSDoc (pprCmmExpr expr)) False
--   = panic "getRegister(???)"

getRegister :: CmmExpr -> NatM Register

517
518
519
#if !x86_64_TARGET_ARCH
    -- on x86_64, we have %rip for PicBaseReg, but it's not a full-featured
    -- register, it can only be used for rip-relative addressing.
520
521
522
523
getRegister (CmmReg (CmmGlobal PicBaseReg))
  = do
      reg <- getPicBaseNat wordRep
      return (Fixed wordRep reg nilOL)
524
#endif
525

526
527
528
529
530
531
getRegister (CmmReg reg) 
  = return (Fixed (cmmRegRep reg) (getRegisterReg reg) nilOL)

getRegister tree@(CmmRegOff _ _) 
  = getRegister (mangleIndexTree tree)

532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556

#if WORD_SIZE_IN_BITS==32
    -- for 32-bit architectuers, support some 64 -> 32 bit conversions:
    -- TO_W_(x), TO_W_(x >> 32)

getRegister (CmmMachOp (MO_U_Conv I64 I32)
             [CmmMachOp (MO_U_Shr I64) [x,CmmLit (CmmInt 32 _)]]) = do
  ChildCode64 code rlo <- iselExpr64 x
  return $ Fixed I32 (getHiVRegFromLo rlo) code

getRegister (CmmMachOp (MO_S_Conv I64 I32)
             [CmmMachOp (MO_U_Shr I64) [x,CmmLit (CmmInt 32 _)]]) = do
  ChildCode64 code rlo <- iselExpr64 x
  return $ Fixed I32 (getHiVRegFromLo rlo) code

getRegister (CmmMachOp (MO_U_Conv I64 I32) [x]) = do
  ChildCode64 code rlo <- iselExpr64 x
  return $ Fixed I32 rlo code

getRegister (CmmMachOp (MO_S_Conv I64 I32) [x]) = do
  ChildCode64 code rlo <- iselExpr64 x
  return $ Fixed I32 rlo code       

#endif

557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
-- end of machine-"independent" bit; here we go on the rest...

#if alpha_TARGET_ARCH

getRegister (StDouble d)
  = getBlockIdNat 	    	    `thenNat` \ lbl ->
    getNewRegNat PtrRep    	    `thenNat` \ tmp ->
    let code dst = mkSeqInstrs [
	    LDATA RoDataSegment lbl [
		    DATA TF [ImmLab (rational d)]
		],
	    LDA tmp (AddrImm (ImmCLbl lbl)),
	    LD TF dst (AddrReg tmp)]
    in
    	return (Any F64 code)

getRegister (StPrim primop [x]) -- unary PrimOps
  = case primop of
      IntNegOp -> trivialUCode (NEG Q False) x

      NotOp    -> trivialUCode NOT x

      FloatNegOp  -> trivialUFCode FloatRep  (FNEG TF) x
      DoubleNegOp -> trivialUFCode F64 (FNEG TF) x

      OrdOp -> coerceIntCode IntRep x
      ChrOp -> chrCode x

      Float2IntOp  -> coerceFP2Int    x
      Int2FloatOp  -> coerceInt2FP pr x
      Double2IntOp -> coerceFP2Int    x
      Int2DoubleOp -> coerceInt2FP pr x

      Double2FloatOp -> coerceFltCode x
      Float2DoubleOp -> coerceFltCode x

      other_op -> getRegister (StCall fn CCallConv F64 [x])
	where
	  fn = case other_op of
		 FloatExpOp    -> FSLIT("exp")
		 FloatLogOp    -> FSLIT("log")
		 FloatSqrtOp   -> FSLIT("sqrt")
		 FloatSinOp    -> FSLIT("sin")
		 FloatCosOp    -> FSLIT("cos")
		 FloatTanOp    -> FSLIT("tan")
		 FloatAsinOp   -> FSLIT("asin")
		 FloatAcosOp   -> FSLIT("acos")
		 FloatAtanOp   -> FSLIT("atan")
		 FloatSinhOp   -> FSLIT("sinh")
		 FloatCoshOp   -> FSLIT("cosh")
		 FloatTanhOp   -> FSLIT("tanh")
		 DoubleExpOp   -> FSLIT("exp")
		 DoubleLogOp   -> FSLIT("log")
		 DoubleSqrtOp  -> FSLIT("sqrt")
		 DoubleSinOp   -> FSLIT("sin")
		 DoubleCosOp   -> FSLIT("cos")
		 DoubleTanOp   -> FSLIT("tan")
		 DoubleAsinOp  -> FSLIT("asin")
		 DoubleAcosOp  -> FSLIT("acos")
		 DoubleAtanOp  -> FSLIT("atan")
		 DoubleSinhOp  -> FSLIT("sinh")
		 DoubleCoshOp  -> FSLIT("cosh")
		 DoubleTanhOp  -> FSLIT("tanh")
  where
    pr = panic "MachCode.getRegister: no primrep needed for Alpha"

getRegister (StPrim primop [x, y]) -- dyadic PrimOps
  = case primop of
      CharGtOp -> trivialCode (CMP LTT) y x
      CharGeOp -> trivialCode (CMP LE) y x
      CharEqOp -> trivialCode (CMP EQQ) x y
      CharNeOp -> int_NE_code x y
      CharLtOp -> trivialCode (CMP LTT) x y
      CharLeOp -> trivialCode (CMP LE) x y

      IntGtOp  -> trivialCode (CMP LTT) y x
      IntGeOp  -> trivialCode (CMP LE) y x
      IntEqOp  -> trivialCode (CMP EQQ) x y
      IntNeOp  -> int_NE_code x y
      IntLtOp  -> trivialCode (CMP LTT) x y
      IntLeOp  -> trivialCode (CMP LE) x y

      WordGtOp -> trivialCode (CMP ULT) y x
      WordGeOp -> trivialCode (CMP ULE) x y
      WordEqOp -> trivialCode (CMP EQQ)  x y
      WordNeOp -> int_NE_code x y
      WordLtOp -> trivialCode (CMP ULT) x y
      WordLeOp -> trivialCode (CMP ULE) x y

      AddrGtOp -> trivialCode (CMP ULT) y x
      AddrGeOp -> trivialCode (CMP ULE) y x
      AddrEqOp -> trivialCode (CMP EQQ)  x y
      AddrNeOp -> int_NE_code x y
      AddrLtOp -> trivialCode (CMP ULT) x y
      AddrLeOp -> trivialCode (CMP ULE) x y
	
      FloatGtOp -> cmpF_code (FCMP TF LE) EQQ x y
      FloatGeOp -> cmpF_code (FCMP TF LTT) EQQ x y
      FloatEqOp -> cmpF_code (FCMP TF EQQ) NE x y
      FloatNeOp -> cmpF_code (FCMP TF EQQ) EQQ x y
      FloatLtOp -> cmpF_code (FCMP TF LTT) NE x y
      FloatLeOp -> cmpF_code (FCMP TF LE) NE x y

      DoubleGtOp -> cmpF_code (FCMP TF LE) EQQ x y
      DoubleGeOp -> cmpF_code (FCMP TF LTT) EQQ x y
      DoubleEqOp -> cmpF_code (FCMP TF EQQ) NE x y
      DoubleNeOp -> cmpF_code (FCMP TF EQQ) EQQ x y
      DoubleLtOp -> cmpF_code (FCMP TF LTT) NE x y
      DoubleLeOp -> cmpF_code (FCMP TF LE) NE x y

      IntAddOp  -> trivialCode (ADD Q False) x y
      IntSubOp  -> trivialCode (SUB Q False) x y
      IntMulOp  -> trivialCode (MUL Q False) x y
      IntQuotOp -> trivialCode (DIV Q False) x y
      IntRemOp  -> trivialCode (REM Q False) x y

      WordAddOp  -> trivialCode (ADD Q False) x y
      WordSubOp  -> trivialCode (SUB Q False) x y
      WordMulOp  -> trivialCode (MUL Q False) x y
      WordQuotOp -> trivialCode (DIV Q True) x y
      WordRemOp  -> trivialCode (REM Q True) x y

      FloatAddOp -> trivialFCode  FloatRep (FADD TF) x y
      FloatSubOp -> trivialFCode  FloatRep (FSUB TF) x y
      FloatMulOp -> trivialFCode  FloatRep (FMUL TF) x y
      FloatDivOp -> trivialFCode  FloatRep (FDIV TF) x y

      DoubleAddOp -> trivialFCode  F64 (FADD TF) x y
      DoubleSubOp -> trivialFCode  F64 (FSUB TF) x y
      DoubleMulOp -> trivialFCode  F64 (FMUL TF) x y
      DoubleDivOp -> trivialFCode  F64 (FDIV TF) x y

      AddrAddOp  -> trivialCode (ADD Q False) x y
      AddrSubOp  -> trivialCode (SUB Q False) x y
      AddrRemOp  -> trivialCode (REM Q True) x y

      AndOp  -> trivialCode AND x y
      OrOp   -> trivialCode OR  x y
      XorOp  -> trivialCode XOR x y
      SllOp  -> trivialCode SLL x y
      SrlOp  -> trivialCode SRL x y

      ISllOp -> trivialCode SLL x y -- was: panic "AlphaGen:isll"
      ISraOp -> trivialCode SRA x y -- was: panic "AlphaGen:isra"
      ISrlOp -> trivialCode SRL x y -- was: panic "AlphaGen:isrl"

      FloatPowerOp  -> getRegister (StCall FSLIT("pow") CCallConv F64 [x,y])
      DoublePowerOp -> getRegister (StCall FSLIT("pow") CCallConv F64 [x,y])
  where
    {- ------------------------------------------------------------
	Some bizarre special code for getting condition codes into
	registers.  Integer non-equality is a test for equality
	followed by an XOR with 1.  (Integer comparisons always set
	the result register to 0 or 1.)  Floating point comparisons of
	any kind leave the result in a floating point register, so we
	need to wrangle an integer register out of things.
    -}
    int_NE_code :: StixTree -> StixTree -> NatM Register

    int_NE_code x y
      = trivialCode (CMP EQQ) x y	`thenNat` \ register ->
	getNewRegNat IntRep		`thenNat` \ tmp ->
	let
	    code = registerCode register tmp
	    src  = registerName register tmp
	    code__2 dst = code . mkSeqInstr (XOR src (RIImm (ImmInt 1)) dst)
	in
	return (Any IntRep code__2)

    {- ------------------------------------------------------------
	Comments for int_NE_code also apply to cmpF_code
    -}
    cmpF_code
	:: (Reg -> Reg -> Reg -> Instr)
	-> Cond
	-> StixTree -> StixTree
	-> NatM Register

    cmpF_code instr cond x y
      = trivialFCode pr instr x y	`thenNat` \ register ->
	getNewRegNat F64		`thenNat` \ tmp ->
	getBlockIdNat			`thenNat` \ lbl ->
	let
	    code = registerCode register tmp
	    result  = registerName register tmp

	    code__2 dst = code . mkSeqInstrs [
		OR zeroh (RIImm (ImmInt 1)) dst,
		BF cond  result (ImmCLbl lbl),
		OR zeroh (RIReg zeroh) dst,
		NEWBLOCK lbl]
	in
	return (Any IntRep code__2)
      where
	pr = panic "trivialU?FCode: does not use PrimRep on Alpha"
      ------------------------------------------------------------

getRegister (CmmLoad pk mem)
  = getAmode mem    	    	    `thenNat` \ amode ->
    let
    	code = amodeCode amode
    	src   = amodeAddr amode
    	size = primRepToSize pk
    	code__2 dst = code . mkSeqInstr (LD size dst src)
    in
    return (Any pk code__2)

getRegister (StInt i)
  | fits8Bits i
  = let
    	code dst = mkSeqInstr (OR zeroh (RIImm src) dst)
    in
    return (Any IntRep code)
  | otherwise
  = let
    	code dst = mkSeqInstr (LDI Q dst src)
    in
    return (Any IntRep code)
  where
    src = ImmInt (fromInteger i)

getRegister leaf
  | isJust imm
  = let
    	code dst = mkSeqInstr (LDA dst (AddrImm imm__2))
    in
    return (Any PtrRep code)
  where
    imm = maybeImm leaf
    imm__2 = case imm of Just x -> x

#endif /* alpha_TARGET_ARCH */

-- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

#if i386_TARGET_ARCH

getRegister (CmmLit (CmmFloat f F32)) = do
    lbl <- getNewLabelNat
796
797
    dflags <- getDynFlagsNat
    dynRef <- cmmMakeDynamicReference dflags addImportNat DataReference lbl
798
799
    Amode addr addr_code <- getAmode dynRef
    let code dst =
800
801
	    LDATA ReadOnlyData
			[CmmDataLabel lbl,
802
803
804
			 CmmStaticLit (CmmFloat f F32)]
	    `consOL` (addr_code `snocOL`
	    GLD F32 addr dst)
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
    -- in
    return (Any F32 code)


getRegister (CmmLit (CmmFloat d F64))
  | d == 0.0
  = let code dst = unitOL (GLDZ dst)
    in  return (Any F64 code)

  | d == 1.0
  = let code dst = unitOL (GLD1 dst)
    in  return (Any F64 code)

  | otherwise = do
    lbl <- getNewLabelNat
820
821
    dflags <- getDynFlagsNat
    dynRef <- cmmMakeDynamicReference dflags addImportNat DataReference lbl
822
823
    Amode addr addr_code <- getAmode dynRef
    let code dst =
824
825
	    LDATA ReadOnlyData
			[CmmDataLabel lbl,
826
827
828
			 CmmStaticLit (CmmFloat d F64)]
	    `consOL` (addr_code `snocOL`
	    GLD F64 addr dst)
829
830
831
    -- in
    return (Any F64 code)

832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
#endif /* i386_TARGET_ARCH */

#if x86_64_TARGET_ARCH

getRegister (CmmLit (CmmFloat 0.0 rep)) = do
   let code dst = unitOL  (XOR rep (OpReg dst) (OpReg dst))
	-- I don't know why there are xorpd, xorps, and pxor instructions.
	-- They all appear to do the same thing --SDM
   return (Any rep code)

getRegister (CmmLit (CmmFloat f rep)) = do
    lbl <- getNewLabelNat
    let code dst = toOL [
	    LDATA ReadOnlyData
			[CmmDataLabel lbl,
			 CmmStaticLit (CmmFloat f rep)],
848
	    MOV rep (OpAddr (ripRel (ImmCLbl lbl))) (OpReg dst)
849
850
851
852
853
854
855
	    ]
    -- in
    return (Any rep code)

#endif /* x86_64_TARGET_ARCH */

#if i386_TARGET_ARCH || x86_64_TARGET_ARCH
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873

-- catch simple cases of zero- or sign-extended load
getRegister (CmmMachOp (MO_U_Conv I8 I32) [CmmLoad addr _]) = do
  code <- intLoadCode (MOVZxL I8) addr
  return (Any I32 code)

getRegister (CmmMachOp (MO_S_Conv I8 I32) [CmmLoad addr _]) = do
  code <- intLoadCode (MOVSxL I8) addr
  return (Any I32 code)

getRegister (CmmMachOp (MO_U_Conv I16 I32) [CmmLoad addr _]) = do
  code <- intLoadCode (MOVZxL I16) addr
  return (Any I32 code)

getRegister (CmmMachOp (MO_S_Conv I16 I32) [CmmLoad addr _]) = do
  code <- intLoadCode (MOVSxL I16) addr
  return (Any I32 code)

874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
#endif

#if x86_64_TARGET_ARCH

-- catch simple cases of zero- or sign-extended load
getRegister (CmmMachOp (MO_U_Conv I8 I64) [CmmLoad addr _]) = do
  code <- intLoadCode (MOVZxL I8) addr
  return (Any I64 code)

getRegister (CmmMachOp (MO_S_Conv I8 I64) [CmmLoad addr _]) = do
  code <- intLoadCode (MOVSxL I8) addr
  return (Any I64 code)

getRegister (CmmMachOp (MO_U_Conv I16 I64) [CmmLoad addr _]) = do
  code <- intLoadCode (MOVZxL I16) addr
  return (Any I64 code)

getRegister (CmmMachOp (MO_S_Conv I16 I64) [CmmLoad addr _]) = do
  code <- intLoadCode (MOVSxL I16) addr
  return (Any I64 code)

getRegister (CmmMachOp (MO_U_Conv I32 I64) [CmmLoad addr _]) = do
  code <- intLoadCode (MOV I32) addr -- 32-bit loads zero-extend
  return (Any I64 code)

getRegister (CmmMachOp (MO_S_Conv I32 I64) [CmmLoad addr _]) = do
  code <- intLoadCode (MOVSxL I32) addr
  return (Any I64 code)

#endif

905
906
907
908
909
910
911
#if x86_64_TARGET_ARCH
getRegister (CmmMachOp (MO_Add I64) [CmmReg (CmmGlobal PicBaseReg),
                                     CmmLit displacement])
    = return $ Any I64 (\dst -> unitOL $
        LEA I64 (OpAddr (ripRel (litToImm displacement))) (OpReg dst))
#endif

912
913
#if x86_64_TARGET_ARCH
getRegister (CmmMachOp (MO_S_Neg F32) [x]) = do
914
  x_code <- getAnyReg x
915
916
  lbl <- getNewLabelNat
  let
917
    code dst = x_code dst `appOL` toOL [
918
919
920
921
922
923
924
925
926
	-- This is how gcc does it, so it can't be that bad:
	LDATA ReadOnlyData16 [
		CmmAlign 16,
		CmmDataLabel lbl,
		CmmStaticLit (CmmInt 0x80000000 I32),
		CmmStaticLit (CmmInt 0 I32),
		CmmStaticLit (CmmInt 0 I32),
		CmmStaticLit (CmmInt 0 I32)
	],
927
	XOR F32 (OpAddr (ripRel (ImmCLbl lbl))) (OpReg dst)
928
929
930
931
932
933
934
		-- xorps, so we need the 128-bit constant
		-- ToDo: rip-relative
	]
  --
  return (Any F32 code)

getRegister (CmmMachOp (MO_S_Neg F64) [x]) = do
935
  x_code <- getAnyReg x
936
937
938
  lbl <- getNewLabelNat
  let
	-- This is how gcc does it, so it can't be that bad:
939
    code dst = x_code dst `appOL` toOL [
940
941
942
943
944
945
946
	LDATA ReadOnlyData16 [
		CmmAlign 16,
		CmmDataLabel lbl,
		CmmStaticLit (CmmInt 0x8000000000000000 I64),
		CmmStaticLit (CmmInt 0 I64)
	],
		-- gcc puts an unpck here.  Wonder if we need it.
947
	XOR F64 (OpAddr (ripRel (ImmCLbl lbl))) (OpReg dst)
948
949
950
951
952
953
954
		-- xorpd, so we need the 128-bit constant
	]
  --
  return (Any F64 code)
#endif

#if i386_TARGET_ARCH || x86_64_TARGET_ARCH
955
956
957

getRegister (CmmMachOp mop [x]) -- unary MachOps
  = case mop of
958
#if i386_TARGET_ARCH
959
960
      MO_S_Neg F32 -> trivialUFCode F32 (GNEG F32) x
      MO_S_Neg F64 -> trivialUFCode F64 (GNEG F64) x
961
#endif
962
963
964
965
966

      MO_S_Neg rep -> trivialUCode rep (NEGI rep) x
      MO_Not rep   -> trivialUCode rep (NOT  rep) x

      -- Nop conversions
967
968
969
970
971
972
      MO_U_Conv I32 I8  -> toI8Reg  I32 x
      MO_S_Conv I32 I8  -> toI8Reg  I32 x
      MO_U_Conv I16 I8  -> toI8Reg  I16 x
      MO_S_Conv I16 I8  -> toI8Reg  I16 x
      MO_U_Conv I32 I16 -> toI16Reg I32 x
      MO_S_Conv I32 I16 -> toI16Reg I32 x
973
974
975
#if x86_64_TARGET_ARCH
      MO_U_Conv I64 I32 -> conversionNop I64 x
      MO_S_Conv I64 I32 -> conversionNop I64 x
976
977
978
979
      MO_U_Conv I64 I16 -> toI16Reg I64 x
      MO_S_Conv I64 I16 -> toI16Reg I64 x
      MO_U_Conv I64 I8  -> toI8Reg  I64 x
      MO_S_Conv I64 I8  -> toI8Reg  I64 x
980
981
#endif

982
983
984
985
986
987
988
989
990
991
992
993
      MO_U_Conv rep1 rep2 | rep1 == rep2 -> conversionNop rep1 x
      MO_S_Conv rep1 rep2 | rep1 == rep2 -> conversionNop rep1 x

      -- widenings
      MO_U_Conv I8  I32 -> integerExtend I8  I32 MOVZxL x
      MO_U_Conv I16 I32 -> integerExtend I16 I32 MOVZxL x
      MO_U_Conv I8  I16 -> integerExtend I8  I16 MOVZxL x

      MO_S_Conv I8  I32 -> integerExtend I8  I32 MOVSxL x
      MO_S_Conv I16 I32 -> integerExtend I16 I32 MOVSxL x
      MO_S_Conv I8  I16 -> integerExtend I8  I16 MOVSxL x

994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
#if x86_64_TARGET_ARCH
      MO_U_Conv I8  I64 -> integerExtend I8  I64 MOVZxL x
      MO_U_Conv I16 I64 -> integerExtend I16 I64 MOVZxL x
      MO_U_Conv I32 I64 -> integerExtend I32 I64 MOVZxL x
      MO_S_Conv I8  I64 -> integerExtend I8  I64 MOVSxL x
      MO_S_Conv I16 I64 -> integerExtend I16 I64 MOVSxL x
      MO_S_Conv I32 I64 -> integerExtend I32 I64 MOVSxL x
	-- for 32-to-64 bit zero extension, amd64 uses an ordinary movl.
	-- However, we don't want the register allocator to throw it
	-- away as an unnecessary reg-to-reg move, so we keep it in
	-- the form of a movzl and print it as a movl later.
#endif

#if i386_TARGET_ARCH
1008
1009
      MO_S_Conv F32 F64 -> conversionNop F64 x
      MO_S_Conv F64 F32 -> conversionNop F32 x
1010
1011
1012
1013
1014
#else
      MO_S_Conv F32 F64 -> coerceFP2FP F64 x
      MO_S_Conv F64 F32 -> coerceFP2FP F32 x
#endif

1015
1016
1017
1018
      MO_S_Conv from to
	| isFloatingRep from -> coerceFP2Int from to x
	| isFloatingRep to   -> coerceInt2FP from to x

1019
      other -> pprPanic "getRegister" (pprMachOp mop)
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
   where
	-- signed or unsigned extension.
	integerExtend from to instr expr = do
	    (reg,e_code) <- if from == I8 then getByteReg expr
					  else getSomeReg expr
	    let 
		code dst = 
		  e_code `snocOL`
		  instr from (OpReg reg) (OpReg dst)
	    return (Any to code)

1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
	toI8Reg new_rep expr
            = do codefn <- getAnyReg expr
		 return (Any new_rep codefn)
		-- HACK: use getAnyReg to get a byte-addressable register.
		-- If the source was a Fixed register, this will add the
		-- mov instruction to put it into the desired destination.
		-- We're assuming that the destination won't be a fixed
		-- non-byte-addressable register; it won't be, because all
		-- fixed registers are word-sized.

	toI16Reg = toI8Reg -- for now

1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
        conversionNop new_rep expr
            = do e_code <- getRegister expr
                 return (swizzleRegisterRep e_code new_rep)


getRegister e@(CmmMachOp mop [x, y]) -- dyadic MachOps
  = ASSERT2(cmmExprRep x /= I8, pprExpr e)
    case mop of
      MO_Eq F32   -> condFltReg EQQ x y
      MO_Ne F32   -> condFltReg NE x y
      MO_S_Gt F32 -> condFltReg GTT x y
      MO_S_Ge F32 -> condFltReg GE x y
      MO_S_Lt F32 -> condFltReg LTT x y
      MO_S_Le F32 -> condFltReg LE x y

      MO_Eq F64   -> condFltReg EQQ x y
      MO_Ne F64   -> condFltReg NE x y
      MO_S_Gt F64 -> condFltReg GTT x y
      MO_S_Ge F64 -> condFltReg GE x y
      MO_S_Lt F64 -> condFltReg LTT x y
      MO_S_Le F64 -> condFltReg LE x y

      MO_Eq rep   -> condIntReg EQQ x y
      MO_Ne rep   -> condIntReg NE x y

      MO_S_Gt rep -> condIntReg GTT x y
      MO_S_Ge rep -> condIntReg GE x y
      MO_S_Lt rep -> condIntReg LTT x y
      MO_S_Le rep -> condIntReg LE x y

      MO_U_Gt rep -> condIntReg GU  x y
      MO_U_Ge rep -> condIntReg GEU x y
      MO_U_Lt rep -> condIntReg LU  x y
      MO_U_Le rep -> condIntReg LEU x y

1078
1079
1080
#if i386_TARGET_ARCH
      MO_Add F32 -> trivialFCode F32 GADD x y
      MO_Sub F32 -> trivialFCode F32 GSUB x y
1081
1082
1083
1084

      MO_Add F64 -> trivialFCode F64 GADD x y
      MO_Sub F64 -> trivialFCode F64 GSUB x y

1085
      MO_S_Quot F32 -> trivialFCode F32 GDIV x y
1086
      MO_S_Quot F64 -> trivialFCode F64 GDIV x y
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
#endif

#if x86_64_TARGET_ARCH
      MO_Add F32 -> trivialFCode F32 ADD x y
      MO_Sub F32 -> trivialFCode F32 SUB x y

      MO_Add F64 -> trivialFCode F64 ADD x y
      MO_Sub F64 -> trivialFCode F64 SUB x y

      MO_S_Quot F32 -> trivialFCode F32 FDIV x y
      MO_S_Quot F64 -> trivialFCode F64 FDIV x y
#endif
1099
1100
1101
1102
1103
1104
1105
1106
1107

      MO_Add rep -> add_code rep x y
      MO_Sub rep -> sub_code rep x y

      MO_S_Quot rep -> div_code rep True  True  x y
      MO_S_Rem  rep -> div_code rep True  False x y
      MO_U_Quot rep -> div_code rep False True  x y
      MO_U_Rem  rep -> div_code rep False False x y

1108
#if i386_TARGET_ARCH
1109
1110
      MO_Mul   F32 -> trivialFCode F32 GMUL x y
      MO_Mul   F64 -> trivialFCode F64 GMUL x y
1111
1112
1113
1114
1115
1116
1117
#endif

#if x86_64_TARGET_ARCH
      MO_Mul   F32 -> trivialFCode F32 MUL x y
      MO_Mul   F64 -> trivialFCode F64 MUL x y
#endif

1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
      MO_Mul   rep -> let op = IMUL rep in 
		      trivialCode rep op (Just op) x y

      MO_S_MulMayOflo rep -> imulMayOflo rep x y

      MO_And rep -> let op = AND rep in 
		    trivialCode rep op (Just op) x y
      MO_Or  rep -> let op = OR  rep in
		    trivialCode rep op (Just op) x y
      MO_Xor rep -> let op = XOR rep in
		    trivialCode rep op (Just op) x y

	{- Shift ops on x86s have constraints on their source, it
	   either has to be Imm, CL or 1
	    => trivialCode is not restrictive enough (sigh.)
	-}	   
      MO_Shl rep   -> shift_code rep (SHL rep) x y {-False-}
      MO_U_Shr rep -> shift_code rep (SHR rep) x y {-False-}
      MO_S_Shr rep -> shift_code rep (SAR rep) x y {-False-}

      other -> pprPanic "getRegister(x86) - binary CmmMachOp (1)" (pprMachOp mop)
  where
    --------------------
    imulMayOflo :: MachRep -> CmmExpr -> CmmExpr -> NatM Register
1142
    imulMayOflo rep a b = do
1143
         (a_reg, a_code) <- getNonClobberedReg a
1144
         b_code <- getAnyReg b
1145
         let 
1146
1147
1148
1149
1150
1151
	     shift_amt  = case rep of
			   I32 -> 31
			   I64 -> 63
			   _ -> panic "shift_amt"

             code = a_code `appOL` b_code eax `appOL`
1152
                        toOL [
1153
1154
1155
1156
1157
1158
			   IMUL2 rep (OpReg a_reg),   -- result in %edx:%eax
                           SAR rep (OpImm (ImmInt shift_amt)) (OpReg eax),
				-- sign extend lower part
                           SUB rep (OpReg edx) (OpReg eax)
				-- compare against upper
                           -- eax==0 if high part == sign extended low part
1159
1160
                        ]
         -- in
1161
	 return (Fixed rep eax code)
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179

    --------------------
    shift_code :: MachRep
	       -> (Operand -> Operand -> Instr)
	       -> CmmExpr
	       -> CmmExpr
	       -> NatM Register

    {- Case1: shift length as immediate -}
    shift_code rep instr x y@(CmmLit lit) = do
	  x_code <- getAnyReg x
	  let
	       code dst
		  = x_code dst `snocOL` 
		    instr (OpImm (litToImm lit)) (OpReg dst)
	  -- in
	  return (Any rep code)
        
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
    {- Case2: shift length is complex (non-immediate)
      * y must go in %ecx.
      * we cannot do y first *and* put its result in %ecx, because
        %ecx might be clobbered by x.
      * if we do y second, then x cannot be 
        in a clobbered reg.  Also, we cannot clobber x's reg
        with the instruction itself.
      * so we can either:
        - do y first, put its result in a fresh tmp, then copy it to %ecx later
        - do y second and put its result into %ecx.  x gets placed in a fresh
          tmp.  This is likely to be better, becuase the reg alloc can
          eliminate this reg->reg move here (it won't eliminate the other one,
          because the move is into the fixed %ecx).
    -}
1194
    shift_code rep instr x y{-amount-} = do
1195
1196
        x_code <- getAnyReg x
	tmp <- getNewRegNat rep
1197
1198
        y_code <- getAnyReg y
	let 
1199
	   code = x_code tmp `appOL`
1200
		  y_code ecx `snocOL`
1201
		  instr (OpReg ecx) (OpReg tmp)
1202
        -- in
1203
        return (Fixed rep tmp code)
1204
1205
1206

    --------------------
    add_code :: MachRep -> CmmExpr -> CmmExpr -> NatM Register
1207
1208
    add_code rep x (CmmLit (CmmInt y _))
	| not (is64BitInteger y) = add_int rep x y
1209
1210
1211
1212
    add_code rep x y = trivialCode rep (ADD rep) (Just (ADD rep)) x y

    --------------------
    sub_code :: MachRep -> CmmExpr -> CmmExpr -> NatM Register
1213
1214
    sub_code rep x (CmmLit (CmmInt y _))
	| not (is64BitInteger (-y)) = add_int rep x (-y)
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
    sub_code rep x y = trivialCode rep (SUB rep) Nothing x y

    -- our three-operand add instruction:
    add_int rep x y = do
	(x_reg, x_code) <- getSomeReg x
	let
	    imm = ImmInt (fromInteger y)
	    code dst
               = x_code `snocOL`
		 LEA rep
1225
			(OpAddr (AddrBaseIndex (EABaseReg x_reg) EAIndexNone imm))
1226
1227
1228
1229
1230
1231
                        (OpReg dst)
	-- 
	return (Any rep code)

    ----------------------
    div_code rep signed quotient x y = do
1232
	   (y_op, y_code) <- getRegOrMem y -- cannot be clobbered
1233
1234
	   x_code <- getAnyReg x
	   let
1235
	     widen | signed    = CLTD rep
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
		   | otherwise = XOR rep (OpReg edx) (OpReg edx)

	     instr | signed    = IDIV
		   | otherwise = DIV

	     code = y_code `appOL`
		    x_code eax `appOL`
		    toOL [widen, instr rep y_op]

	     result | quotient  = eax
		    | otherwise = edx

	   -- in
           return (Fixed rep result code)


getRegister (CmmLoad mem pk)
  | isFloatingRep pk
  = do
    Amode src mem_code <- getAmode mem
    let
    	code dst = mem_code `snocOL` 
1258
1259
		   IF_ARCH_i386(GLD pk src dst,
			        MOV pk (OpAddr src) (OpReg dst))
1260
1261
1262
    --
    return (Any pk code)

1263
#if i386_TARGET_ARCH
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
getRegister (CmmLoad mem pk)
  | pk /= I64
  = do 
    code <- intLoadCode (instr pk) mem
    return (Any pk code)
  where
	instr I8  = MOVZxL pk
	instr I16 = MOV I16
	instr I32 = MOV I32
	-- we always zero-extend 8-bit loads, if we
	-- can't think of anything better.  This is because
	-- we can't guarantee access to an 8-bit variant of every register
	-- (esi and edi don't have 8-bit variants), so to make things
	-- simpler we do our 8-bit arithmetic with full 32-bit registers.
1278
1279
1280
1281
1282
1283
1284
1285
1286
#endif

#if x86_64_TARGET_ARCH
-- Simpler memory load code on x86_64
getRegister (CmmLoad mem pk)
  = do 
    code <- intLoadCode (MOV pk) mem
    return (Any pk code)
#endif
1287
1288
1289

getRegister (CmmLit (CmmInt 0 rep))
  = let
1290
1291
1292
	-- x86_64: 32-bit xor is one byte shorter, and zero-extends to 64 bits
	adj_rep = case rep of I64 -> I32; _ -> rep
	rep1 = IF_ARCH_i386( rep, adj_rep ) 
1293
    	code dst 
1294
           = unitOL (XOR rep1 (OpReg dst) (OpReg dst))
1295
1296
1297
    in
    	return (Any rep code)

1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
#if x86_64_TARGET_ARCH
  -- optimisation for loading small literals on x86_64: take advantage
  -- of the automatic zero-extension from 32 to 64 bits, because the 32-bit
  -- instruction forms are shorter.
getRegister (CmmLit lit) 
  | I64 <- cmmLitRep lit, not (isBigLit lit)
  = let 
	imm = litToImm lit
	code dst = unitOL (MOV I32 (OpImm imm) (OpReg dst))
    in
    	return (Any I64 code)
  where
   isBigLit (CmmInt i I64) = i < 0 || i > 0xffffffff
   isBigLit _ = False
	-- note1: not the same as is64BitLit, because that checks for
	-- signed literals that fit in 32 bits, but we want unsigned
	-- literals here.
	-- note2: all labels are small, because we're assuming the
	-- small memory model (see gcc docs, -mcmodel=small).
#endif

1319
1320
1321
1322
1323
1324
1325
1326
getRegister (CmmLit lit)
  = let 
	rep = cmmLitRep lit
	imm = litToImm lit
	code dst = unitOL (MOV rep (OpImm imm) (OpReg dst))
    in
    	return (Any rep code)

1327
getRegister other = pprPanic "getRegister(x86)" (ppr other)
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350


intLoadCode :: (Operand -> Operand -> Instr) -> CmmExpr
   -> NatM (Reg -> InstrBlock)
intLoadCode instr mem = do
  Amode src mem_code <- getAmode mem
  return (\dst -> mem_code `snocOL` instr (OpAddr src) (OpReg dst))

-- Compute an expression into *any* register, adding the appropriate
-- move instruction if necessary.
getAnyReg :: CmmExpr -> NatM (Reg -> InstrBlock)
getAnyReg expr = do
  r <- getRegister expr
  anyReg r

anyReg :: Register -> NatM (Reg -> InstrBlock)
anyReg (Any _ code)          = return code
anyReg (Fixed rep reg fcode) = return (\dst -> fcode `snocOL` reg2reg rep reg dst)

-- A bit like getSomeReg, but we want a reg that can be byte-addressed.
-- Fixed registers might not be byte-addressable, so we make sure we've
-- got a temporary, inserting an extra reg copy if necessary.
getByteReg :: CmmExpr -> NatM (Reg, InstrBlock)
1351
1352
1353
#if x86_64_TARGET_ARCH
getByteReg = getSomeReg -- all regs are byte-addressable on x86_64
#else
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
getByteReg expr = do
  r <- getRegister expr
  case r of
    Any rep code -> do
	tmp <- getNewRegNat rep
	return (tmp, code tmp)
    Fixed rep reg code 
	| isVirtualReg reg -> return (reg,code)
	| otherwise -> do
	    tmp <- getNewRegNat rep
	    return (tmp, code `snocOL` reg2reg rep reg tmp)
	-- ToDo: could optimise slightly by checking for byte-addressable
	-- real registers, but that will happen very rarely if at all.
1367
#endif
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387

-- Another variant: this time we want the result in a register that cannot
-- be modified by code to evaluate an arbitrary expression.
getNonClobberedReg :: CmmExpr -> NatM (Reg, InstrBlock)
getNonClobberedReg expr = do
  r <- getRegister expr
  case r of
    Any rep code -> do
	tmp <- getNewRegNat rep
	return (tmp, code tmp)
    Fixed rep reg code
	-- only free regs can be clobbered
	| RealReg rr <- reg, isFastTrue (freeReg rr) -> do
		tmp <- getNewRegNat rep
		return (tmp, code `snocOL` reg2reg rep reg tmp)
	| otherwise -> 
		return (reg, code)

reg2reg :: MachRep -> Reg -> Reg -> Instr
reg2reg rep src dst 
1388
#if i386_TARGET_ARCH
1389
  | isFloatingRep rep = GMOV src dst
1390
#endif
1391
1392
  | otherwise	      = MOV rep (OpReg src) (OpReg dst)

1393
#endif /* i386_TARGET_ARCH || x86_64_TARGET_ARCH */
1394
1395
1396
1397
1398

-- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

#if sparc_TARGET_ARCH

1399
1400
getRegister (CmmLit (CmmFloat f F32)) = do
    lbl <- getNewLabelNat
1401
    let code dst = toOL [
1402
1403
1404
1405
1406
1407
	    LDATA ReadOnlyData
	                [CmmDataLabel lbl,
			 CmmStaticLit (CmmFloat f F32)],
	    SETHI (HI (ImmCLbl lbl)) dst,
	    LD F32 (AddrRegImm dst (LO (ImmCLbl lbl))) dst] 
    return (Any F32 code)
1408

1409
1410
getRegister (CmmLit (CmmFloat d F64)) = do
    lbl <- getNewLabelNat
1411
    let code dst = toOL [
1412
1413
1414
1415
1416
1417
	    LDATA ReadOnlyData
	                [CmmDataLabel lbl,
			 CmmStaticLit (CmmFloat d F64)],
	    SETHI (HI (ImmCLbl lbl)) dst,
	    LD F64 (AddrRegImm dst (LO (ImmCLbl lbl))) dst] 
    return (Any F64 code)
1418

1419
getRegister (CmmMachOp mop [x]) -- unary MachOps
1420
  = case mop of
1421
1422
      MO_S_Neg F32     -> trivialUFCode F32 (FNEG F32) x
      MO_S_Neg F64     -> trivialUFCode F64 (FNEG F64) x
1423

1424
1425
      MO_S_Neg rep     -> trivialUCode rep (SUB False False g0) x
      MO_Not rep       -> trivialUCode rep (XNOR False g0) x
1426

1427
      MO_U_Conv I32 I8 -> trivialCode I8 (AND False) x (CmmLit (CmmInt 255 I8))
1428

1429
1430
      MO_U_Conv F64 F32-> coerceDbl2Flt x
      MO_U_Conv F32 F64-> coerceFlt2Dbl x
1431

1432
1433
1434
1435
      MO_S_Conv F32 I32-> coerceFP2Int F32 I32 x
      MO_S_Conv I32 F32-> coerceInt2FP I32 F32 x
      MO_S_Conv F64 I32-> coerceFP2Int F64 I32 x
      MO_S_Conv I32 F64-> coerceInt2FP I32 F64 x
1436

1437
1438
1439
1440
1441
      -- Conversions which are a nop on sparc
      MO_U_Conv from to
	| from == to   -> conversionNop to   x
      MO_U_Conv I32 to -> conversionNop to   x
      MO_S_Conv I32 to -> conversionNop to   x
1442

1443
1444
1445
1446
1447
      -- widenings
      MO_U_Conv I8 I32  -> integerExtend False I8 I32  x
      MO_U_Conv I16 I32 -> integerExtend False I16 I32 x
      MO_U_Conv I8 I16  -> integerExtend False I8 I16  x
      MO_S_Conv I16 I32 -> integerExtend True I16 I32  x
1448

1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
      other_op -> panic "Unknown unary mach op"
    where
        -- XXX SLL/SRL?
        integerExtend signed from to expr = do
           (reg, e_code) <- getSomeReg expr
	   let
	       code dst =
		   e_code `snocOL` 
		   ((if signed then SRA else SRL)
		          reg (RIImm (ImmInt 0)) dst)
	   return (Any to code)
        conversionNop new_rep expr
            = do e_code <- getRegister expr
                 return (swizzleRegisterRep e_code new_rep)
1463

1464
1465
1466
1467
getRegister (CmmMachOp mop [x, y]) -- dyadic PrimOps
  = case mop of
      MO_Eq F32 -> condFltReg EQQ x y
      MO_Ne F32 -> condFltReg NE x y
1468

1469
1470
1471
1472
      MO_S_Gt F32 -> condFltReg GTT x y
      MO_S_Ge F32 -> condFltReg GE x y 
      MO_S_Lt F32 -> condFltReg LTT x y
      MO_S_Le F32 -> condFltReg LE x y
1473

1474
1475
      MO_Eq F64 -> condFltReg EQQ x y
      MO_Ne F64 -> condFltReg NE x y
1476

1477
1478
1479
1480
      MO_S_Gt F64 -> condFltReg GTT x y
      MO_S_Ge F64 -> condFltReg GE x y
      MO_S_Lt F64 -> condFltReg LTT x y
      MO_S_Le F64 -> condFltReg LE x y
1481

1482
1483
      MO_Eq rep -> condIntReg EQQ x y
      MO_Ne rep -> condIntReg NE x y
1484

1485
1486
1487
1488
1489
1490
1491
1492
1493
      MO_S_Gt rep -> condIntReg GTT x y
      MO_S_Ge rep -> condIntReg GE x y
      MO_S_Lt rep -> condIntReg LTT x y
      MO_S_Le rep -> condIntReg LE x y
	      
      MO_U_Gt I32  -> condIntReg GTT x y
      MO_U_Ge I32  -> condIntReg GE x y
      MO_U_Lt I32  -> condIntReg LTT x y
      MO_U_Le I32  -> condIntReg LE x y
1494

1495
1496
1497
1498
      MO_U_Gt I16 -> condIntReg GU  x y
      MO_U_Ge I16 -> condIntReg GEU x y
      MO_U_Lt I16 -> condIntReg LU  x y
      MO_U_Le I16 -> condIntReg LEU x y
1499

1500
1501
      MO_Add I32 -> trivialCode I32 (ADD False False) x y
      MO_Sub I32 -> trivialCode I32 (SUB False False) x y
1502

1503
1504
      MO_S_MulMayOflo rep -> imulMayOflo rep x y
{-
1505
      -- ToDo: teach about V8+ SPARC div instructions
1506
1507
1508
1509
1510
1511
1512
1513
1514
      MO_S_Quot I32 -> idiv FSLIT(".div")  x y
      MO_S_Rem I32  -> idiv FSLIT(".rem")  x y
      MO_U_Quot I32 -> idiv FSLIT(".udiv")  x y
      MO_U_Rem I32  -> idiv FSLIT(".urem")  x y
-}
      MO_Add F32  -> trivialFCode F32 FADD  x y
      MO_Sub F32   -> trivialFCode F32  FSUB x y
      MO_Mul F32   -> trivialFCode F32  FMUL  x y
      MO_S_Quot F32   -> trivialFCode F32  FDIV x y
1515

1516
1517
1518
1519
      MO_Add F64   -> trivialFCode F64 FADD  x y
      MO_Sub F64   -> trivialFCode F64  FSUB x y
      MO_Mul F64   -> trivialFCode F64  FMUL x y
      MO_S_Quot F64   -> trivialFCode F64  FDIV x y
1520

1521
1522
1523
      MO_And rep   -> trivialCode rep (AND False) x y
      MO_Or rep    -> trivialCode rep (OR  False) x y
      MO_Xor rep   -> trivialCode rep (XOR False) x y
1524

1525
      MO_Mul rep -> trivialCode rep (SMUL False) x y
1526

1527
1528
1529
      MO_Shl rep   -> trivialCode rep SLL  x y
      MO_U_Shr rep   -> trivialCode rep SRL x y
      MO_S_Shr rep   -> trivialCode rep SRA x y
1530

1531
{-
1532
1533
1534
1535
1536
      MO_F32_Pwr  -> getRegister (StCall (Left FSLIT("pow")) CCallConv F64 
                                         [promote x, promote y])
		       where promote x = CmmMachOp MO_F32_to_Dbl [x]
      MO_F64_Pwr -> getRegister (StCall (Left FSLIT("pow")) CCallConv F64 
                                        [x, y])
1537
-}
1538
1539
      other -> pprPanic "getRegister(sparc) - binary CmmMachOp (1)" (pprMachOp mop)
  where
1540
    --idiv fn x y = getRegister (StCall (Left fn) CCallConv I32 [x, y])
1541
1542

    --------------------
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
    imulMayOflo :: MachRep -> CmmExpr -> CmmExpr -> NatM Register
    imulMayOflo rep a b = do
         (a_reg, a_code) <- getSomeReg a
	 (b_reg, b_code) <- getSomeReg b
	 res_lo <- getNewRegNat I32
	 res_hi <- getNewRegNat I32
	 let
	    shift_amt  = case rep of
			  I32 -> 31
			  I64 -> 63
			  _ -> panic "shift_amt"
	    code dst = a_code `appOL` b_code `appOL`
                       toOL [
                           SMUL False a_reg (RIReg b_reg) res_lo,
1557
                           RDY res_hi,
1558
                           SRA res_lo (RIImm (ImmInt shift_amt)) res_lo,
1559
1560
                           SUB False False res_lo (RIReg res_hi) dst
                        ]
1561
         return (Any I32 code)
1562

1563
getRegister (CmmLoad mem pk) = do
1564
1565
    Amode src code <- getAmode mem
    let
1566
    	code__2 dst = code `snocOL` LD pk src dst
1567
1568
    return (Any pk code__2)

1569
getRegister (CmmLit (CmmInt i _))
1570
1571
1572
1573
1574
  | fits13Bits i
  = let
    	src = ImmInt (fromInteger i)
    	code dst = unitOL (OR False g0 (RIImm src) dst)
    in
1575
    	return (Any I32 code)
1576

1577
1578
1579
getRegister (CmmLit lit)
  = let rep = cmmLitRep lit
	imm = litToImm lit
1580
    	code dst = toOL [
1581
1582
1583
    	    SETHI (HI imm) dst,
    	    OR False dst (RIImm (LO imm)) dst]
    in return (Any I32 code)
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686

#endif /* sparc_TARGET_ARCH */

#if powerpc_TARGET_ARCH
getRegister (CmmLoad mem pk)
  | pk /= I64
  = do
        Amode addr addr_code <- getAmode mem
        let code dst = ASSERT((regClass dst == RcDouble) == isFloatingRep pk)
                       addr_code `snocOL` LD pk dst addr
        return (Any pk code)

-- catch simple cases of zero- or sign-extended load
getRegister (CmmMachOp (MO_U_Conv I8 I32) [CmmLoad mem _]) = do
    Amode addr addr_code <- getAmode mem
    return (Any I32 (\dst -> addr_code `snocOL` LD I8 dst addr))

-- Note: there is no Load Byte Arithmetic instruction, so no signed case here

getRegister (CmmMachOp (MO_U_Conv I16 I32) [CmmLoad mem _]) = do
    Amode addr addr_code <- getAmode mem
    return (Any I32 (\dst -> addr_code