Commit 8f16b3cd authored by Simon Peyton Jones's avatar Simon Peyton Jones
Browse files

Merge branch 'master' of http://darcs.haskell.org/ghc

parents b6532314 50b33e32
......@@ -46,8 +46,6 @@ name :: IORef (ty); \
name = Util.globalM (value);
#endif
#define COMMA ,
#ifdef DEBUG
#define ASSERT(e) if (not (e)) then (assertPanic __FILE__ __LINE__) else
#define ASSERT2(e,msg) if (not (e)) then (assertPprPanic __FILE__ __LINE__ (msg)) else
......
......@@ -1227,16 +1227,14 @@ dealWithCoercion co stuff@(dc, _dc_univ_args, dc_args)
-- Cast the value arguments (which include dictionaries)
new_val_args = zipWith cast_arg arg_tys val_args
cast_arg arg_ty arg = mkCoerce (theta_subst arg_ty) arg
in
#ifdef DEBUG
let dump_doc = vcat [ppr dc, ppr dc_univ_tyvars, ppr dc_ex_tyvars,
dump_doc = vcat [ppr dc, ppr dc_univ_tyvars, ppr dc_ex_tyvars,
ppr arg_tys, ppr dc_args, ppr _dc_univ_args,
ppr ex_args, ppr val_args]
in
ASSERT2( eqType _from_ty (mkTyConApp to_tc _dc_univ_args), dump_doc )
ASSERT2( all isTypeArg ex_args, dump_doc )
ASSERT2( equalLength val_args arg_tys, dump_doc )
#endif
Just (dc, to_tc_arg_tys, ex_args ++ new_val_args)
| otherwise
......
......@@ -150,7 +150,8 @@ data NcgImpl statics instr jumpDest = NcgImpl {
--------------------
nativeCodeGen :: DynFlags -> Handle -> UniqSupply -> [RawCmmGroup] -> IO ()
nativeCodeGen dflags h us cmms
= let nCG' :: (PlatformOutputable statics, PlatformOutputable instr, Instruction instr) => NcgImpl statics instr jumpDest -> IO ()
= let platform = targetPlatform dflags
nCG' :: (PlatformOutputable statics, PlatformOutputable instr, Instruction instr) => NcgImpl statics instr jumpDest -> IO ()
nCG' ncgImpl = nativeCodeGen' dflags ncgImpl h us cmms
x86NcgImpl = NcgImpl {
cmmTopCodeGen = X86.CodeGen.cmmTopCodeGen
......@@ -160,13 +161,13 @@ nativeCodeGen dflags h us cmms
,shortcutStatics = X86.Instr.shortcutStatics
,shortcutJump = X86.Instr.shortcutJump
,pprNatCmmDecl = X86.Ppr.pprNatCmmDecl
,maxSpillSlots = X86.Instr.maxSpillSlots
,maxSpillSlots = X86.Instr.maxSpillSlots (target32Bit platform)
,allocatableRegs = X86.Regs.allocatableRegs
,ncg_x86fp_kludge = id
,ncgExpandTop = id
,ncgMakeFarBranches = id
}
in case platformArch $ targetPlatform dflags of
in case platformArch platform of
ArchX86 -> nCG' (x86NcgImpl { ncg_x86fp_kludge = map x86fp_kludge })
ArchX86_64 -> nCG' x86NcgImpl
ArchPPC ->
......
......@@ -11,24 +11,4 @@
#include "ghc_boot_platform.h"
#define COMMA ,
-- - - - - - - - - - - - - - - - - - - - - -
#if i386_TARGET_ARCH
# define IF_ARCH_i386(x,y) x
#else
# define IF_ARCH_i386(x,y) y
#endif
-- - - - - - - - - - - - - - - - - - - - - -
#if linux_TARGET_OS
# define IF_OS_linux(x,y) x
#else
# define IF_OS_linux(x,y) y
#endif
-- - - - - - - - - - - - - - - - - - - - - -
#if darwin_TARGET_OS
# define IF_OS_darwin(x,y) x
#else
# define IF_OS_darwin(x,y) y
#endif
#endif
This diff is collapsed.
......@@ -61,8 +61,8 @@ instance FR SPARC.FreeRegs where
maxSpillSlots :: Platform -> Int
maxSpillSlots platform
= case platformArch platform of
ArchX86 -> X86.Instr.maxSpillSlots
ArchX86_64 -> X86.Instr.maxSpillSlots
ArchX86 -> X86.Instr.maxSpillSlots True -- 32bit
ArchX86_64 -> X86.Instr.maxSpillSlots False -- not 32bit
ArchPPC -> PPC.Instr.maxSpillSlots
ArchSPARC -> SPARC.Instr.maxSpillSlots
ArchARM _ _ -> panic "maxSpillSlots ArchARM"
......
......@@ -161,7 +161,7 @@ stmtToInstrs stmt = do
size = cmmTypeSize ty
CmmCall target result_regs args _ _
-> genCCall target result_regs args
-> genCCall is32Bit target result_regs args
CmmBranch id -> genBranch id
CmmCondBranch arg id -> genCondJump id arg
......@@ -418,8 +418,8 @@ getRegister' is32Bit (CmmReg reg)
-- on x86_64, we have %rip for PicBaseReg, but it's not
-- a full-featured register, it can only be used for
-- rip-relative addressing.
do reg' <- getPicBaseNat archWordSize
return (Fixed archWordSize reg' nilOL)
do reg' <- getPicBaseNat (archWordSize is32Bit)
return (Fixed (archWordSize is32Bit) reg' nilOL)
_ ->
do use_sse2 <- sse2Enabled
let
......@@ -636,15 +636,15 @@ getRegister' is32Bit (CmmMachOp mop [x]) = do -- unary MachOps
return (swizzleRegisterRep e_code new_size)
getRegister' _ (CmmMachOp mop [x, y]) = do -- dyadic MachOps
getRegister' is32Bit (CmmMachOp mop [x, y]) = do -- dyadic MachOps
sse2 <- sse2Enabled
case mop of
MO_F_Eq _ -> condFltReg EQQ x y
MO_F_Ne _ -> condFltReg NE x y
MO_F_Gt _ -> condFltReg GTT x y
MO_F_Ge _ -> condFltReg GE x y
MO_F_Lt _ -> condFltReg LTT x y
MO_F_Le _ -> condFltReg LE x y
MO_F_Eq _ -> condFltReg is32Bit EQQ x y
MO_F_Ne _ -> condFltReg is32Bit NE x y
MO_F_Gt _ -> condFltReg is32Bit GTT x y
MO_F_Ge _ -> condFltReg is32Bit GE x y
MO_F_Lt _ -> condFltReg is32Bit LTT x y
MO_F_Le _ -> condFltReg is32Bit LE x y
MO_Eq _ -> condIntReg EQQ x y
MO_Ne _ -> condIntReg NE x y
......@@ -846,12 +846,15 @@ getRegister' is32Bit (CmmLoad mem pk)
return (Any size code)
where size = intSize $ typeWidth pk
getRegister' _ (CmmLit (CmmInt 0 width))
getRegister' is32Bit (CmmLit (CmmInt 0 width))
= let
size = intSize width
-- x86_64: 32-bit xor is one byte shorter, and zero-extends to 64 bits
size1 = IF_ARCH_i386( size, case size of II64 -> II32; _ -> size )
size1 = if is32Bit then size
else case size of
II64 -> II32
_ -> size
code dst
= unitOL (XOR size1 (OpReg dst) (OpReg dst))
in
......@@ -1052,17 +1055,18 @@ getNonClobberedOperand (CmmLit lit) = do
else getNonClobberedOperand_generic (CmmLit lit)
getNonClobberedOperand (CmmLoad mem pk) = do
is32Bit <- is32BitPlatform
use_sse2 <- sse2Enabled
if (not (isFloatType pk) || use_sse2)
&& IF_ARCH_i386(not (isWord64 pk), True)
&& (if is32Bit then not (isWord64 pk) else True)
then do
Amode src mem_code <- getAmode mem
(src',save_code) <-
if (amodeCouldBeClobbered src)
then do
tmp <- getNewRegNat archWordSize
tmp <- getNewRegNat (archWordSize is32Bit)
return (AddrBaseIndex (EABaseReg tmp) EAIndexNone (ImmInt 0),
unitOL (LEA archWordSize (OpAddr src) (OpReg tmp)))
unitOL (LEA (archWordSize is32Bit) (OpAddr src) (OpReg tmp)))
else
return (src, nilOL)
return (OpAddr src', mem_code `appOL` save_code)
......@@ -1102,8 +1106,9 @@ getOperand (CmmLit lit) = do
else getOperand_generic (CmmLit lit)
getOperand (CmmLoad mem pk) = do
is32Bit <- is32BitPlatform
use_sse2 <- sse2Enabled
if (not (isFloatType pk) || use_sse2) && IF_ARCH_i386(not (isWord64 pk), True)
if (not (isFloatType pk) || use_sse2) && (if is32Bit then not (isWord64 pk) else True)
then do
Amode src mem_code <- getAmode mem
return (OpAddr src, mem_code)
......@@ -1163,8 +1168,9 @@ isSuitableFloatingPointLit _ = False
getRegOrMem :: CmmExpr -> NatM (Operand, InstrBlock)
getRegOrMem e@(CmmLoad mem pk) = do
is32Bit <- is32BitPlatform
use_sse2 <- sse2Enabled
if (not (isFloatType pk) || use_sse2) && IF_ARCH_i386(not (isWord64 pk), True)
if (not (isFloatType pk) || use_sse2) && (if is32Bit then not (isWord64 pk) else True)
then do
Amode src mem_code <- getAmode mem
return (OpAddr src, mem_code)
......@@ -1502,7 +1508,8 @@ genCondJump id bool = do
-- register allocator.
genCCall
:: CmmCallTarget -- function to call
:: Bool -- 32 bit platform?
-> CmmCallTarget -- function to call
-> [HintedCmmFormal] -- where to put the result
-> [HintedCmmActual] -- arguments (of mixed type)
-> NatM InstrBlock
......@@ -1512,9 +1519,10 @@ genCCall
-- Unroll memcpy calls if the source and destination pointers are at
-- least DWORD aligned and the number of bytes to copy isn't too
-- large. Otherwise, call C's memcpy.
genCCall (CmmPrim MO_Memcpy) _ [CmmHinted dst _, CmmHinted src _,
CmmHinted (CmmLit (CmmInt n _)) _,
CmmHinted (CmmLit (CmmInt align _)) _]
genCCall is32Bit (CmmPrim MO_Memcpy) _
[CmmHinted dst _, CmmHinted src _,
CmmHinted (CmmLit (CmmInt n _)) _,
CmmHinted (CmmLit (CmmInt align _)) _]
| n <= maxInlineSizeThreshold && align .&. 3 == 0 = do
code_dst <- getAnyReg dst
dst_r <- getNewRegNat size
......@@ -1524,7 +1532,7 @@ genCCall (CmmPrim MO_Memcpy) _ [CmmHinted dst _, CmmHinted src _,
return $ code_dst dst_r `appOL` code_src src_r `appOL`
go dst_r src_r tmp_r n
where
size = if align .&. 4 /= 0 then II32 else archWordSize
size = if align .&. 4 /= 0 then II32 else (archWordSize is32Bit)
sizeBytes = fromIntegral (sizeInBytes size)
......@@ -1554,10 +1562,11 @@ genCCall (CmmPrim MO_Memcpy) _ [CmmHinted dst _, CmmHinted src _,
dst_addr = AddrBaseIndex (EABaseReg dst) EAIndexNone
(ImmInteger (n - i))
genCCall (CmmPrim MO_Memset) _ [CmmHinted dst _,
CmmHinted (CmmLit (CmmInt c _)) _,
CmmHinted (CmmLit (CmmInt n _)) _,
CmmHinted (CmmLit (CmmInt align _)) _]
genCCall _ (CmmPrim MO_Memset) _
[CmmHinted dst _,
CmmHinted (CmmLit (CmmInt c _)) _,
CmmHinted (CmmLit (CmmInt n _)) _,
CmmHinted (CmmLit (CmmInt align _)) _]
| n <= maxInlineSizeThreshold && align .&. 3 == 0 = do
code_dst <- getAnyReg dst
dst_r <- getNewRegNat size
......@@ -1592,11 +1601,11 @@ genCCall (CmmPrim MO_Memset) _ [CmmHinted dst _,
dst_addr = AddrBaseIndex (EABaseReg dst) EAIndexNone
(ImmInteger (n - i))
genCCall (CmmPrim MO_WriteBarrier) _ _ = return nilOL
genCCall _ (CmmPrim MO_WriteBarrier) _ _ = return nilOL
-- write barrier compiles to no code on x86/x86-64;
-- we keep it this long in order to prevent earlier optimisations.
genCCall (CmmPrim (MO_PopCnt width)) dest_regs@[CmmHinted dst _]
genCCall is32Bit (CmmPrim (MO_PopCnt width)) dest_regs@[CmmHinted dst _]
args@[CmmHinted src _] = do
sse4_2 <- sse4_2Enabled
if sse4_2
......@@ -1616,16 +1625,14 @@ genCCall (CmmPrim (MO_PopCnt width)) dest_regs@[CmmHinted dst _]
targetExpr <- cmmMakeDynamicReference dflags addImportNat
CallReference lbl
let target = CmmCallee targetExpr CCallConv
genCCall target dest_regs args
genCCall is32Bit target dest_regs args
where
size = intSize width
lbl = mkCmmCodeLabel primPackageId (fsLit (popCntLabel width))
genCCall target dest_regs args =
do is32Bit <- is32BitPlatform
if is32Bit
then genCCall32 target dest_regs args
else genCCall64 target dest_regs args
genCCall is32Bit target dest_regs args
| is32Bit = genCCall32 target dest_regs args
| otherwise = genCCall64 target dest_regs args
genCCall32 :: CmmCallTarget -- function to call
-> [HintedCmmFormal] -- where to put the result
......@@ -2144,8 +2151,8 @@ condIntReg cond x y = do
condFltReg :: Cond -> CmmExpr -> CmmExpr -> NatM Register
condFltReg cond x y = if_sse2 condFltReg_sse2 condFltReg_x87
condFltReg :: Bool -> Cond -> CmmExpr -> CmmExpr -> NatM Register
condFltReg is32Bit cond x y = if_sse2 condFltReg_sse2 condFltReg_x87
where
condFltReg_x87 = do
CondCode _ cond cond_code <- condFltCode cond x y
......@@ -2160,8 +2167,8 @@ condFltReg cond x y = if_sse2 condFltReg_sse2 condFltReg_x87
condFltReg_sse2 = do
CondCode _ cond cond_code <- condFltCode cond x y
tmp1 <- getNewRegNat archWordSize
tmp2 <- getNewRegNat archWordSize
tmp1 <- getNewRegNat (archWordSize is32Bit)
tmp2 <- getNewRegNat (archWordSize is32Bit)
let
-- We have to worry about unordered operands (eg. comparisons
-- against NaN). If the operands are unordered, the comparison
......
......@@ -9,7 +9,10 @@
#include "HsVersions.h"
#include "nativeGen/NCG.h"
module X86.Instr
module X86.Instr (Instr(..), Operand(..),
getJumpDestBlockId, canShortcut, shortcutStatics,
shortcutJump, i386_insert_ffrees,
maxSpillSlots, archWordSize)
where
import X86.Cond
......@@ -33,16 +36,12 @@ import CLabel
import UniqSet
import Unique
-- Size of a PPC memory address, in bytes.
-- Size of an x86/x86_64 memory address, in bytes.
--
archWordSize :: Size
#if i386_TARGET_ARCH
archWordSize = II32
#elif x86_64_TARGET_ARCH
archWordSize = II64
#else
archWordSize = panic "X86.Instr.archWordSize: not defined"
#endif
archWordSize :: Bool -> Size
archWordSize is32Bit
| is32Bit = II32
| otherwise = II64
-- | Instruction instance for x86 instruction set.
instance Instruction Instr where
......@@ -617,16 +616,16 @@ x86_mkSpillInstr
-> Instr
x86_mkSpillInstr platform reg delta slot
= let off = spillSlotToOffset slot
= let off = spillSlotToOffset is32Bit slot
in
let off_w = (off-delta) `div` IF_ARCH_i386(4,8)
let off_w = (off - delta) `div` (if is32Bit then 4 else 8)
in case targetClassOfReg platform reg of
RcInteger -> MOV IF_ARCH_i386(II32,II64)
RcInteger -> MOV (archWordSize is32Bit)
(OpReg reg) (OpAddr (spRel platform off_w))
RcDouble -> GST FF80 reg (spRel platform off_w) {- RcFloat/RcDouble -}
RcDoubleSSE -> MOV FF64 (OpReg reg) (OpAddr (spRel platform off_w))
_ -> panic "X86.mkSpillInstr: no match"
where is32Bit = target32Bit platform
-- | Make a spill reload instruction.
x86_mkLoadInstr
......@@ -637,33 +636,35 @@ x86_mkLoadInstr
-> Instr
x86_mkLoadInstr platform reg delta slot
= let off = spillSlotToOffset slot
= let off = spillSlotToOffset is32Bit slot
in
let off_w = (off-delta) `div` IF_ARCH_i386(4,8)
let off_w = (off-delta) `div` (if is32Bit then 4 else 8)
in case targetClassOfReg platform reg of
RcInteger -> MOV IF_ARCH_i386(II32,II64)
RcInteger -> MOV (archWordSize is32Bit)
(OpAddr (spRel platform off_w)) (OpReg reg)
RcDouble -> GLD FF80 (spRel platform off_w) reg {- RcFloat/RcDouble -}
RcDoubleSSE -> MOV FF64 (OpAddr (spRel platform off_w)) (OpReg reg)
_ -> panic "X86.x86_mkLoadInstr"
where is32Bit = target32Bit platform
spillSlotSize :: Int
spillSlotSize = IF_ARCH_i386(12, 8)
spillSlotSize :: Bool -> Int
spillSlotSize is32Bit = if is32Bit then 12 else 8
maxSpillSlots :: Int
maxSpillSlots = ((rESERVED_C_STACK_BYTES - 64) `div` spillSlotSize) - 1
maxSpillSlots :: Bool -> Int
maxSpillSlots is32Bit
= ((rESERVED_C_STACK_BYTES - 64) `div` spillSlotSize is32Bit) - 1
-- convert a spill slot number to a *byte* offset, with no sign:
-- decide on a per arch basis whether you are spilling above or below
-- the C stack pointer.
spillSlotToOffset :: Int -> Int
spillSlotToOffset slot
| slot >= 0 && slot < maxSpillSlots
= 64 + spillSlotSize * slot
spillSlotToOffset :: Bool -> Int -> Int
spillSlotToOffset is32Bit slot
| slot >= 0 && slot < maxSpillSlots is32Bit
= 64 + spillSlotSize is32Bit * slot
| otherwise
= pprPanic "spillSlotToOffset:"
( text "invalid spill location: " <> int slot
$$ text "maxSpillSlots: " <> int maxSpillSlots)
$$ text "maxSpillSlots: " <> int (maxSpillSlots is32Bit))
--------------------------------------------------------------------------------
......
......@@ -345,7 +345,7 @@ pprAddr platform (AddrBaseIndex base index displacement)
= let
pp_disp = ppr_disp displacement
pp_off p = pp_disp <> char '(' <> p <> char ')'
pp_reg r = pprReg platform archWordSize r
pp_reg r = pprReg platform (archWordSize (target32Bit platform)) r
in
case (base, index) of
(EABaseNone, EAIndexNone) -> pp_disp
......@@ -513,7 +513,7 @@ pprInstr platform (MOVZxL sizes src dst) = pprSizeOpOpCoerce platform (sLit "mov
-- the remaining zero-extension to 64 bits is automatic, and the 32-bit
-- instruction is shorter.
pprInstr platform (MOVSxL sizes src dst) = pprSizeOpOpCoerce platform (sLit "movs") sizes archWordSize src dst
pprInstr platform (MOVSxL sizes src dst) = pprSizeOpOpCoerce platform (sLit "movs") sizes (archWordSize (target32Bit platform)) src dst
-- here we do some patching, since the physical registers are only set late
-- in the code generation.
......@@ -598,10 +598,10 @@ pprInstr platform (JXX cond blockid)
pprInstr platform (JXX_GBL cond imm) = pprCondInstr (sLit "j") cond (pprImm platform imm)
pprInstr platform (JMP (OpImm imm)) = (<>) (ptext (sLit "\tjmp ")) (pprImm platform imm)
pprInstr platform (JMP op) = (<>) (ptext (sLit "\tjmp *")) (pprOperand platform archWordSize op)
pprInstr platform (JMP op) = (<>) (ptext (sLit "\tjmp *")) (pprOperand platform (archWordSize (target32Bit platform)) op)
pprInstr platform (JMP_TBL op _ _ _) = pprInstr platform (JMP op)
pprInstr platform (CALL (Left imm) _) = (<>) (ptext (sLit "\tcall ")) (pprImm platform imm)
pprInstr platform (CALL (Right reg) _) = (<>) (ptext (sLit "\tcall *")) (pprReg platform archWordSize reg)
pprInstr platform (CALL (Right reg) _) = (<>) (ptext (sLit "\tcall *")) (pprReg platform (archWordSize (target32Bit platform)) reg)
pprInstr platform (IDIV sz op) = pprSizeOp platform (sLit "idiv") sz op
pprInstr platform (DIV sz op) = pprSizeOp platform (sLit "div") sz op
......@@ -1053,9 +1053,9 @@ pprRegReg :: Platform -> LitString -> Reg -> Reg -> Doc
pprRegReg platform name reg1 reg2
= hcat [
pprMnemonic_ name,
pprReg platform archWordSize reg1,
pprReg platform (archWordSize (target32Bit platform)) reg1,
comma,
pprReg platform archWordSize reg2
pprReg platform (archWordSize (target32Bit platform)) reg2
]
......@@ -1065,7 +1065,7 @@ pprSizeOpReg platform name size op1 reg2
pprMnemonic name size,
pprOperand platform size op1,
comma,
pprReg platform archWordSize reg2
pprReg platform (archWordSize (target32Bit platform)) reg2
]
pprCondRegReg :: Platform -> LitString -> Size -> Cond -> Reg -> Reg -> Doc
......
......@@ -791,9 +791,7 @@ mkStgRhs rhs_fvs srt binder_info rhs
then (if isNotTop toplev
then SingleEntry -- HA! Paydirt for "dem"
else
#ifdef DEBUG
trace "WARNING: SE CAFs unsupported, forcing UPD instead" $
#endif
(if debugIsOn then trace "WARNING: SE CAFs unsupported, forcing UPD instead" else id) $
Updatable)
else Updatable
-- For now we forbid SingleEntry CAFs; they tickle the
......
......@@ -39,9 +39,7 @@ module StgSyn (
pprStgBinding, pprStgBindings, pprStgBindingsWithSRTs
#ifdef DEBUG
, pprStgLVs
#endif
) where
#include "HsVersions.h"
......@@ -804,7 +802,6 @@ instance Outputable AltType where
\end{code}
\begin{code}
#ifdef DEBUG
pprStgLVs :: Outputable occ => GenStgLiveVars occ -> SDoc
pprStgLVs lvs
= getPprStyle $ \ sty ->
......@@ -812,7 +809,6 @@ pprStgLVs lvs
empty
else
hcat [text "{-lvs:", interpp'SP (uniqSetToList lvs), text "-}"]
#endif
\end{code}
\begin{code}
......
......@@ -531,17 +531,16 @@ zonkQuantifiedTyVar tv
-- It might be a skolem type variable,
-- for example from a user type signature
MetaTv _ _ref ->
#ifdef DEBUG
-- [Sept 04] Check for non-empty.
-- See note [Silly Type Synonym]
(readMutVar _ref >>= \cts ->
case cts of
Flexi -> return ()
Indirect ty -> WARN( True, ppr tv $$ ppr ty )
return ()) >>
#endif
skolemiseUnboundMetaTyVar tv vanillaSkolemTv
MetaTv _ ref ->
do when debugIsOn $ do
-- [Sept 04] Check for non-empty.
-- See note [Silly Type Synonym]
cts <- readMutVar ref
case cts of
Flexi -> return ()
Indirect ty -> WARN( True, ppr tv $$ ppr ty )
return ()
skolemiseUnboundMetaTyVar tv vanillaSkolemTv
_other -> pprPanic "zonkQuantifiedTyVar" (ppr tv) -- FlatSkol, RuntimeUnk
skolemiseUnboundMetaTyVar :: TcTyVar -> TcTyVarDetails -> TcM TyVar
......
......@@ -107,18 +107,16 @@ import MonadUtils
import VarSet
import Pair
import FastString
import StaticFlags
import Util
import HsBinds -- for TcEvBinds stuff
import Id
import TcRnTypes
import Data.IORef
import Control.Monad
import Data.IORef
import qualified Data.Map as Map
#ifdef DEBUG
import StaticFlags( opt_PprStyle_Debug )
import Control.Monad( when )
#endif
\end{code}
......@@ -585,12 +583,12 @@ runTcS context untouch tcs
; ty_binds <- TcM.readTcRef ty_binds_var
; mapM_ do_unification (varEnvElts ty_binds)
#ifdef DEBUG
; count <- TcM.readTcRef step_count
; when (opt_PprStyle_Debug && count > 0) $
TcM.debugDumpTcRn (ptext (sLit "Constraint solver steps =")
<+> int count <+> ppr context)
#endif
; when debugIsOn $ do {
count <- TcM.readTcRef step_count
; when (opt_PprStyle_Debug && count > 0) $
TcM.debugDumpTcRn (ptext (sLit "Constraint solver steps =")
<+> int count <+> ppr context)
}
-- And return
; ev_binds <- TcM.readTcRef evb_ref
; return (res, evBindMapBinds ev_binds) }
......@@ -727,12 +725,11 @@ setWantedTyBind tv ty
= do { ref <- getTcSTyBinds
; wrapTcS $
do { ty_binds <- TcM.readTcRef ref
#ifdef DEBUG
; TcM.checkErr (not (tv `elemVarEnv` ty_binds)) $
vcat [ text "TERRIBLE ERROR: double set of meta type variable"
, ppr tv <+> text ":=" <+> ppr ty
, text "Old value =" <+> ppr (lookupVarEnv_NF ty_binds tv)]
#endif
; when debugIsOn $
TcM.checkErr (not (tv `elemVarEnv` ty_binds)) $
vcat [ text "TERRIBLE ERROR: double set of meta type variable"
, ppr tv <+> text ":=" <+> ppr ty
, text "Old value =" <+> ppr (lookupVarEnv_NF ty_binds tv)]
; TcM.writeTcRef ref (extendVarEnv ty_binds tv (tv,ty)) } }
setIPBind :: EvVar -> EvTerm -> TcS ()
......
......@@ -226,9 +226,6 @@ kcTyClDecls1 decls
{ (kc_syn_decls, tcl_env) <- kcSynDecls (calcSynCycles syn_decls)
-- Now check for cyclic classes
; checkClassCycleErrs syn_decls alg_decls
; setLclEnv tcl_env $ do
{ kc_alg_decls <- mapM (wrapLocM kcTyClDecl) alg_decls
......@@ -988,16 +985,10 @@ Validity checking is done once the mutually-recursive knot has been
tied, so we can look at things freely.
\begin{code}
checkClassCycleErrs :: [LTyClDecl Name] -> [LTyClDecl Name] -> TcM ()
checkClassCycleErrs syn_decls alg_decls
| null cls_cycles
= return ()
| otherwise
= do { mapM_ recClsErr cls_cycles
; failM } -- Give up now, because later checkValidTyCl
-- will loop if the synonym is recursive
where
cls_cycles = calcClassCycles syn_decls alg_decls
checkClassCycleErrs :: Class -> TcM ()
checkClassCycleErrs cls
= unless (null cls_cycles) $ mapM_ recClsErr cls_cycles
where cls_cycles = calcClassCycles cls
checkValidTyCl :: TyClDecl Name -> TcM ()