Commit dfc51a46 authored by eir@cis.upenn.edu's avatar eir@cis.upenn.edu

Merge branch 'master' of git://git.haskell.org/ghc

parents bb9d53e3 d1683f0e
......@@ -848,7 +848,7 @@ AS_IF([test "$fp_num1" $2 "$fp_num2"], [$4], [$5])[]dnl
dnl
dnl Check for Happy and version. If we're building GHC, then we need
dnl at least Happy version 1.14. If there's no installed Happy, we look
dnl at least Happy version 1.19. If there's no installed Happy, we look
dnl for a happy source tree and point the build system at that instead.
dnl
AC_DEFUN([FPTOOLS_HAPPY],
......
......@@ -147,7 +147,7 @@ layout :: DynFlags
, [CmmBlock] -- [out] new blocks
)
layout dflags procpoints liveness entry entry_args final_stackmaps final_hwm blocks
layout dflags procpoints liveness entry entry_args final_stackmaps final_sp_high blocks
= go blocks init_stackmap entry_args []
where
(updfr, cont_info) = collectContInfo blocks
......@@ -204,14 +204,7 @@ layout dflags procpoints liveness entry entry_args final_stackmaps final_hwm blo
--
let middle_pre = blockToList $ foldl blockSnoc middle1 middle2
sp_high = final_hwm - entry_args
-- The stack check value is adjusted by the Sp offset on
-- entry to the proc, which is entry_args. We are
-- assuming that we only do a stack check at the
-- beginning of a proc, and we don't modify Sp before the
-- check.
final_blocks = manifestSp dflags final_stackmaps stack0 sp0 sp_high entry0
final_blocks = manifestSp dflags final_stackmaps stack0 sp0 final_sp_high entry0
middle_pre sp_off last1 fixup_blocks
acc_stackmaps' = mapUnion acc_stackmaps out
......@@ -780,24 +773,24 @@ areaToSp :: DynFlags -> ByteOff -> ByteOff -> (Area -> StackLoc) -> CmmExpr -> C
areaToSp dflags sp_old _sp_hwm area_off (CmmStackSlot area n) =
cmmOffset dflags (CmmReg spReg) (sp_old - area_off area - n)
areaToSp dflags _ sp_hwm _ (CmmLit CmmHighStackMark) = mkIntExpr dflags sp_hwm
areaToSp dflags _ _ _ (CmmMachOp (MO_U_Lt _) -- Note [null stack check]
areaToSp dflags _ _ _ (CmmMachOp (MO_U_Lt _) -- Note [Always false stack check]
[CmmMachOp (MO_Sub _)
[ CmmReg (CmmGlobal Sp)
, CmmLit (CmmInt 0 _)],
CmmReg (CmmGlobal SpLim)]) = zeroExpr dflags
[ CmmRegOff (CmmGlobal Sp) off
, CmmLit (CmmInt lit _)],
CmmReg (CmmGlobal SpLim)])
| fromIntegral off == lit = zeroExpr dflags
areaToSp _ _ _ _ other = other
-- -----------------------------------------------------------------------------
-- Note [null stack check]
-- Note [Always false stack check]
-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
--
-- If the high-water Sp is zero, then we end up with
-- We can optimise stack checks of the form
--
-- if (Sp - 0 < SpLim) then .. else ..
-- if ((Sp + x) - x < SpLim) then .. else ..
--
-- and possibly some dead code for the failure case. Optimising this
-- away depends on knowing that SpLim <= Sp, so it is really the job
-- where x is an integer offset. Optimising this away depends on knowing that
-- SpLim <= Sp, so it is really the job of the stack layout algorithm, hence we
-- do it now. This is also convenient because sinking pass will later drop the
-- dead code.
optStackCheck :: CmmNode O C -> CmmNode O C
......@@ -1021,4 +1014,3 @@ insertReloads stackmap =
stackSlotRegs :: StackMap -> [(LocalReg, StackLoc)]
stackSlotRegs sm = eltsUFM (sm_regs sm)
......@@ -51,7 +51,7 @@ cmmMachOpFoldM _ op [CmmLit (CmmInt x rep)]
MO_S_Neg _ -> CmmLit (CmmInt (-x) rep)
MO_Not _ -> CmmLit (CmmInt (complement x) rep)
-- these are interesting: we must first narrow to the
-- these are interesting: we must first narrow to the
-- "from" type, in order to truncate to the correct size.
-- The final narrow/widen to the destination type
-- is implicit in the CmmLit.
......@@ -87,7 +87,7 @@ cmmMachOpFoldM dflags conv_outer [CmmMachOp conv_inner [x]]
| otherwise ->
Nothing
where
isIntConversion (MO_UU_Conv rep1 rep2)
isIntConversion (MO_UU_Conv rep1 rep2)
= Just (rep1,rep2,False)
isIntConversion (MO_SS_Conv rep1 rep2)
= Just (rep1,rep2,True)
......@@ -318,7 +318,7 @@ cmmMachOpFoldM dflags mop [x, (CmmLit (CmmInt n _))]
| Just p <- exactLog2 n ->
Just (cmmMachOpFold dflags (MO_U_Shr rep) [x, CmmLit (CmmInt p rep)])
MO_S_Quot rep
| Just p <- exactLog2 n,
| Just p <- exactLog2 n,
CmmReg _ <- x -> -- We duplicate x below, hence require
-- it is a reg. FIXME: remove this restriction.
-- shift right is not the same as quot, because it rounds
......@@ -362,7 +362,7 @@ cmmMachOpFoldM _ _ _ = Nothing
-- This algorithm for determining the $\log_2$ of exact powers of 2 comes
-- from GCC. It requires bit manipulation primitives, and we use GHC
-- extensions. Tough.
--
--
-- Used to be in MachInstrs --SDM.
-- ToDo: remove use of unboxery --SDM.
......@@ -387,54 +387,6 @@ exactLog2 x_
pow2 x | x ==# _ILIT(1) = _ILIT(0)
| otherwise = _ILIT(1) +# pow2 (x `shiftR_FastInt` _ILIT(1))
-- -----------------------------------------------------------------------------
-- Loopify for C
{-
This is a simple pass that replaces tail-recursive functions like this:
fac() {
...
jump fac();
}
with this:
fac() {
L:
...
goto L;
}
the latter generates better C code, because the C compiler treats it
like a loop, and brings full loop optimisation to bear.
In my measurements this makes little or no difference to anything
except factorial, but what the hell.
-}
{-
cmmLoopifyForC :: DynFlags -> RawCmmDecl -> RawCmmDecl
-- XXX: revisit if we actually want to do this
-- cmmLoopifyForC p@(CmmProc Nothing _ _) = p -- only if there's an info table, ignore case alts
cmmLoopifyForC dflags (CmmProc infos entry_lbl live
(ListGraph blocks@(BasicBlock top_id _ : _))) =
-- pprTrace "jump_lbl" (ppr jump_lbl <+> ppr entry_lbl) $
CmmProc infos entry_lbl live (ListGraph blocks')
where blocks' = [ BasicBlock id (map do_stmt stmts)
| BasicBlock id stmts <- blocks ]
do_stmt (CmmJump (CmmLit (CmmLabel lbl)) _) | lbl == jump_lbl
= CmmBranch top_id
do_stmt stmt = stmt
jump_lbl | tablesNextToCode dflags = toInfoLbl entry_lbl
| otherwise = entry_lbl
cmmLoopifyForC _ top = top
-}
-- -----------------------------------------------------------------------------
-- Utils
......@@ -449,4 +401,3 @@ isComparisonExpr _ = False
isPicReg :: CmmExpr -> Bool
isPicReg (CmmReg (CmmGlobal PicBaseReg)) = True
isPicReg _ = False
......@@ -533,6 +533,27 @@ heapStackCheckGen stk_hwm mb_bytes
call <- mkCall generic_gc (GC, GC) [] [] updfr_sz []
do_checks stk_hwm False mb_bytes (call <*> mkBranch lretry)
-- Note [Single stack check]
-- ~~~~~~~~~~~~~~~~~~~~~~~~~
--
-- When compiling a function we can determine how much stack space it will
-- use. We therefore need to perform only a single stack check at the beginning
-- of a function to see if we have enough stack space. Instead of referring
-- directly to Sp - as we used to do in the past - the code generator uses
-- (old + 0) in the stack check. Stack layout phase turns (old + 0) into Sp.
--
-- The idea here is that, while we need to perform only one stack check for
-- each function, we could in theory place more stack checks later in the
-- function. They would be redundant, but not incorrect (in a sense that they
-- should not change program behaviour). We need to make sure however that a
-- stack check inserted after incrementing the stack pointer checks for a
-- respectively smaller stack space. This would not be the case if the code
-- generator produced direct references to Sp. By referencing (old + 0) we make
-- sure that we always check for a correct amount of stack: when converting
-- (old + 0) to Sp the stack layout phase takes into account changes already
-- made to stack pointer. The idea for this change came from observations made
-- while debugging #8275.
do_checks :: Maybe CmmExpr -- Should we check the stack?
-> Bool -- Should we check for preemption?
-> Maybe CmmExpr -- Heap headroom (bytes)
......@@ -547,11 +568,13 @@ do_checks mb_stk_hwm checkYield mb_alloc_lit do_gc = do
bump_hp = cmmOffsetExprB dflags (CmmReg hpReg) alloc_lit
-- Sp overflow if (Sp - CmmHighStack < SpLim)
-- Sp overflow if ((old + 0) - CmmHighStack < SpLim)
-- At the beginning of a function old + 0 = Sp
-- See Note [Single stack check]
sp_oflo sp_hwm =
CmmMachOp (mo_wordULt dflags)
[CmmMachOp (MO_Sub (typeWidth (cmmRegType dflags spReg)))
[CmmReg spReg, sp_hwm],
[CmmStackSlot Old 0, sp_hwm],
CmmReg spLimReg]
-- Hp overflow if (Hp > HpLim)
......
This diff is collapsed.
......@@ -852,7 +852,7 @@ checkSafeImports dflags tcg_env
(text $ "is imported both as a safe and unsafe import!"))
| otherwise
= return v1
-- easier interface to work with
checkSafe (_, _, False) = return Nothing
checkSafe (m, l, True ) = fst `fmap` hscCheckSafe' dflags m l
......@@ -879,7 +879,7 @@ hscGetSafe hsc_env m l = runHsc hsc_env $ do
let pkgs' | Just p <- self = p:pkgs
| otherwise = pkgs
return (good, pkgs')
-- | Is a module trusted? If not, throw or log errors depending on the type.
-- Return (regardless of trusted or not) if the trust type requires the modules
-- own package be trusted and a list of other packages required to be trusted
......@@ -963,7 +963,7 @@ hscCheckSafe' dflags m l = do
Just _ -> return iface
Nothing -> snd `fmap` (liftIO $ getModuleInterface hsc_env m)
return iface'
#else
#else
return iface
#endif
......@@ -1280,12 +1280,8 @@ tryNewCodeGen hsc_env this_mod data_tycons
| otherwise
= {-# SCC "cmmPipeline" #-}
let initTopSRT = initUs_ us emptySRT in
let run_pipeline topSRT cmmgroup = do
(topSRT, cmmgroup) <- cmmPipeline hsc_env topSRT cmmgroup
return (topSRT,cmmgroup)
let initTopSRT = initUs_ us emptySRT
run_pipeline = cmmPipeline hsc_env
in do topSRT <- Stream.mapAccumL run_pipeline initTopSRT ppr_stream1
Stream.yield (srtToData topSRT)
......@@ -1616,7 +1612,7 @@ hscCompileCoreExpr' hsc_env srcspan ds_expr
; prepd_expr <- corePrepExpr dflags hsc_env tidy_expr
{- Lint if necessary -}
; lintInteractiveExpr "hscCompileExpr" hsc_env prepd_expr
; lintInteractiveExpr "hscCompileExpr" hsc_env prepd_expr
{- Convert to BCOs -}
; bcos <- coreExprToBCOs dflags iNTERACTIVE prepd_expr
......@@ -1658,4 +1654,3 @@ showModuleIndex (i,n) = "[" ++ padded ++ " of " ++ n_str ++ "] "
n_str = show n
i_str = show i
padded = replicate (length n_str - length i_str) ' ' ++ i_str
......@@ -318,6 +318,7 @@ $tab+ { warn Opt_WarnTabs (text "Tab character") }
"[|" / { ifExtension thEnabled } { token ITopenExpQuote }
"[||" / { ifExtension thEnabled } { token ITopenTExpQuote }
"[e|" / { ifExtension thEnabled } { token ITopenExpQuote }
"[e||" / { ifExtension thEnabled } { token ITopenTExpQuote }
"[p|" / { ifExtension thEnabled } { token ITopenPatQuote }
"[d|" / { ifExtension thEnabled } { layout_token ITopenDecQuote }
"[t|" / { ifExtension thEnabled } { token ITopenTypQuote }
......
......@@ -2609,32 +2609,40 @@ type varaibles as well as term variables.
case (case e of ...) of
C t xs::[t] -> j t xs
Note [Join point abstaction]
Note [Join point abstraction]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
If we try to lift a primitive-typed something out
for let-binding-purposes, we will *caseify* it (!),
with potentially-disastrous strictness results. So
instead we turn it into a function: \v -> e
where v::State# RealWorld#. The value passed to this function
is realworld#, which generates (almost) no code.
There's a slight infelicity here: we pass the overall
case_bndr to all the join points if it's used in *any* RHS,
because we don't know its usage in each RHS separately
We used to say "&& isUnLiftedType rhs_ty'" here, but now
we make the join point into a function whenever used_bndrs'
is empty. This makes the join-point more CPR friendly.
Consider: let j = if .. then I# 3 else I# 4
in case .. of { A -> j; B -> j; C -> ... }
Now CPR doesn't w/w j because it's a thunk, so
that means that the enclosing function can't w/w either,
which is a lose. Here's the example that happened in practice:
kgmod :: Int -> Int -> Int
kgmod x y = if x > 0 && y < 0 || x < 0 && y > 0
then 78
else 5
Join points always have at least one value argument,
for several reasons
* If we try to lift a primitive-typed something out
for let-binding-purposes, we will *caseify* it (!),
with potentially-disastrous strictness results. So
instead we turn it into a function: \v -> e
where v::State# RealWorld#. The value passed to this function
is realworld#, which generates (almost) no code.
* CPR. We used to say "&& isUnLiftedType rhs_ty'" here, but now
we make the join point into a function whenever used_bndrs'
is empty. This makes the join-point more CPR friendly.
Consider: let j = if .. then I# 3 else I# 4
in case .. of { A -> j; B -> j; C -> ... }
Now CPR doesn't w/w j because it's a thunk, so
that means that the enclosing function can't w/w either,
which is a lose. Here's the example that happened in practice:
kgmod :: Int -> Int -> Int
kgmod x y = if x > 0 && y < 0 || x < 0 && y > 0
then 78
else 5
* Let-no-escape. We want a join point to turn into a let-no-escape
so that it is implemented as a jump, and one of the conditions
for LNE is that it's not updatable. In CoreToStg, see
Note [What is a non-escaping let]
* Floating. Since a join point will be entered once, no sharing is
gained by floating out, but something might be lost by doing
so because it might be allocated.
I have seen a case alternative like this:
True -> \v -> ...
......@@ -2643,6 +2651,11 @@ It's a bit silly to add the realWorld dummy arg in this case, making
True -> $j s
(the \v alone is enough to make CPR happy) but I think it's rare
There's a slight infelicity here: we pass the overall
case_bndr to all the join points if it's used in *any* RHS,
because we don't know its usage in each RHS separately
Note [Duplicating StrictArg]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The original plan had (where E is a big argument)
......
......@@ -1358,6 +1358,9 @@ tcInstanceMethods dfun_id clas tyvars dfun_ev_vars inst_tys
-- Notice that the dictionary bindings "..d1..d2.." must be generated
-- by the constraint solver, since the <context> may be
-- user-specified.
--
-- See also Note [Newtype deriving superclasses] in TcDeriv
-- for why we don't just coerce the superclass
= do { rep_d_stuff <- checkConstraints InstSkol tyvars dfun_ev_vars $
emitWanted ScOrigin rep_pred
......
......@@ -336,30 +336,62 @@ Interact with axioms
interactTopAdd :: [Xi] -> Xi -> [Pair Type]
interactTopAdd [s,t] r
| Just 0 <- mbZ = [ s === num 0, t === num 0 ]
| Just x <- mbX, Just z <- mbZ, Just y <- minus z x = [t === num y]
| Just y <- mbY, Just z <- mbZ, Just x <- minus z y = [s === num x]
| Just 0 <- mbZ = [ s === num 0, t === num 0 ] -- (s + t ~ 0) => (s ~ 0, t ~ 0)
| Just x <- mbX, Just z <- mbZ, Just y <- minus z x = [t === num y] -- (5 + t ~ 8) => (t ~ 3)
| Just y <- mbY, Just z <- mbZ, Just x <- minus z y = [s === num x] -- (s + 5 ~ 8) => (s ~ 3)
where
mbX = isNumLitTy s
mbY = isNumLitTy t
mbZ = isNumLitTy r
interactTopAdd _ _ = []
{-
Note [Weakened interaction rule for subtraction]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
A simpler interaction here might be:
`s - t ~ r` --> `t + r ~ s`
This would enable us to reuse all the code for addition.
Unfortunately, this works a little too well at the moment.
Consider the following example:
0 - 5 ~ r --> 5 + r ~ 0 --> (5 = 0, r = 0)
This (correctly) spots that the constraint cannot be solved.
However, this may be a problem if the constraint did not
need to be solved in the first place! Consider the following example:
f :: Proxy (If (5 <=? 0) (0 - 5) (5 - 0)) -> Proxy 5
f = id
Currently, GHC is strict while evaluating functions, so this does not
work, because even though the `If` should evaluate to `5 - 0`, we
also evaluate the "then" branch which generates the constraint `0 - 5 ~ r`,
which fails.
So, for the time being, we only add an improvement when the RHS is a constant,
which happens to work OK for the moment, although clearly we need to do
something more general.
-}
interactTopSub :: [Xi] -> Xi -> [Pair Type]
interactTopSub [s,t] r
| Just 0 <- mbZ = [ s === t ]
| otherwise = [ t .+. r === s ]
| Just z <- mbZ = [ s === (num z .+. t) ] -- (s - t ~ 5) => (5 + t ~ s)
where
mbZ = isNumLitTy r
interactTopSub _ _ = []
interactTopMul :: [Xi] -> Xi -> [Pair Type]
interactTopMul [s,t] r
| Just 1 <- mbZ = [ s === num 1, t === num 1 ]
| Just x <- mbX, Just z <- mbZ, Just y <- divide z x = [t === num y]
| Just y <- mbY, Just z <- mbZ, Just x <- divide z y = [s === num x]
| Just 1 <- mbZ = [ s === num 1, t === num 1 ] -- (s * t ~ 1) => (s ~ 1, t ~ 1)
| Just x <- mbX, Just z <- mbZ, Just y <- divide z x = [t === num y] -- (3 * t ~ 15) => (t ~ 5)
| Just y <- mbY, Just z <- mbZ, Just x <- divide z y = [s === num x] -- (s * 3 ~ 15) => (s ~ 5)
where
mbX = isNumLitTy s
mbY = isNumLitTy t
......@@ -368,9 +400,9 @@ interactTopMul _ _ = []
interactTopExp :: [Xi] -> Xi -> [Pair Type]
interactTopExp [s,t] r
| Just 0 <- mbZ = [ s === num 0 ]
| Just x <- mbX, Just z <- mbZ, Just y <- logExact z x = [t === num y]
| Just y <- mbY, Just z <- mbZ, Just x <- rootExact z y = [s === num x]
| Just 0 <- mbZ = [ s === num 0 ] -- (s ^ t ~ 0) => (s ~ 0)
| Just x <- mbX, Just z <- mbZ, Just y <- logExact z x = [t === num y] -- (2 ^ t ~ 8) => (t ~ 3)
| Just y <- mbY, Just z <- mbZ, Just x <- rootExact z y = [s === num x] -- (s ^ 2 ~ 9) => (s ~ 3)
where
mbX = isNumLitTy s
mbY = isNumLitTy t
......@@ -379,7 +411,7 @@ interactTopExp _ _ = []
interactTopLeq :: [Xi] -> Xi -> [Pair Type]
interactTopLeq [s,t] r
| Just 0 <- mbY, Just True <- mbZ = [ s === num 0 ]
| Just 0 <- mbY, Just True <- mbZ = [ s === num 0 ] -- (s <= 0) => (s ~ 0)
where
mbY = isNumLitTy t
mbZ = isBoolLitTy r
......@@ -398,11 +430,11 @@ interactInertAdd [x1,y1] z1 [x2,y2] z2
where sameZ = eqType z1 z2
interactInertAdd _ _ _ _ = []
{- XXX: Should we add some rules here?
When `interactTopSub` sees `x - y ~ z`, it generates `z + y ~ x`.
Hopefully, this should interact further and generate all additional
needed facts that we might need. -}
interactInertSub :: [Xi] -> Xi -> [Xi] -> Xi -> [Pair Type]
interactInertSub [x1,y1] z1 [x2,y2] z2
| sameZ && eqType x1 x2 = [ y1 === y2 ]
| sameZ && eqType y1 y2 = [ x1 === x2 ]
where sameZ = eqType z1 z2
interactInertSub _ _ _ _ = []
interactInertMul :: [Xi] -> Xi -> [Xi] -> Xi -> [Pair Type]
......
--------------------------------------------------------------------------------
-- | Boolean formulas without negation (qunatifier free)
-- | Boolean formulas without quantifiers and without negation.
-- Such a formula consists of variables, conjunctions (and), and disjunctions (or).
--
-- This module is used to represent minimal complete definitions for classes.
--
{-# LANGUAGE DeriveDataTypeable, DeriveFunctor, DeriveFoldable,
DeriveTraversable #-}
......@@ -36,13 +39,16 @@ mkFalse, mkTrue :: BooleanFormula a
mkFalse = Or []
mkTrue = And []
-- Convert a Bool to a BooleanFormula
mkBool :: Bool -> BooleanFormula a
mkBool False = mkFalse
mkBool True = mkTrue
-- Make a conjunction, and try to simplify
mkAnd :: Eq a => [BooleanFormula a] -> BooleanFormula a
mkAnd = maybe mkFalse (mkAnd' . nub) . concatMapM fromAnd
where
-- See Note [Simplification of BooleanFormulas]
fromAnd :: BooleanFormula a -> Maybe [BooleanFormula a]
fromAnd (And xs) = Just xs
-- assume that xs are already simplified
......@@ -55,14 +61,50 @@ mkAnd = maybe mkFalse (mkAnd' . nub) . concatMapM fromAnd
mkOr :: Eq a => [BooleanFormula a] -> BooleanFormula a
mkOr = maybe mkTrue (mkOr' . nub) . concatMapM fromOr
where
-- See Note [Simplification of BooleanFormulas]
fromOr (Or xs) = Just xs
fromOr (And []) = Nothing
fromOr x = Just [x]
mkOr' [x] = x
mkOr' xs = Or xs
{-
Note [Simplification of BooleanFormulas]
~~~~~~~~~~~~~~~~~~~~~~
The smart constructors (`mkAnd` and `mkOr`) do some attempt to simplify expressions. In particular,
1. Collapsing nested ands and ors, so
`(mkAnd [x, And [y,z]]`
is represented as
`And [x,y,z]`
Implemented by `fromAnd`/`fromOr`
2. Collapsing trivial ands and ors, so
`mkAnd [x]` becomes just `x`.
Implemented by mkAnd' / mkOr'
3. Conjunction with false, disjunction with true is simplified, i.e.
`mkAnd [mkFalse,x]` becomes `mkFalse`.
4. Common subexpresion elimination:
`mkAnd [x,x,y]` is reduced to just `mkAnd [x,y]`.
This simplification is not exhaustive, in the sense that it will not produce
the smallest possible equivalent expression. For example,
`Or [And [x,y], And [x]]` could be simplified to `And [x]`, but it currently
is not. A general simplifier would need to use something like BDDs.
The reason behind the (crude) simplifier is to make for more user friendly
error messages. E.g. for the code
> class Foo a where
> {-# MINIMAL bar, (foo, baq | foo, quux) #-}
> instance Foo Int where
> bar = ...
> baz = ...
> quux = ...
We don't show a ridiculous error message like
Implement () and (either (`foo' and ()) or (`foo' and ()))
-}
----------------------------------------------------------------------
-- Evaluation and simplificiation
-- Evaluation and simplification
----------------------------------------------------------------------
isFalse :: BooleanFormula a -> Bool
......@@ -117,6 +159,8 @@ x `implies` Or ys = any (x `implies`) ys
-- Pretty printing
----------------------------------------------------------------------
-- Pretty print a BooleanFormula,
-- using the arguments as pretty printers for Var, And and Or respectively
pprBooleanFormula' :: (Rational -> a -> SDoc)
-> (Rational -> [SDoc] -> SDoc)
-> (Rational -> [SDoc] -> SDoc)
......
......@@ -113,10 +113,8 @@
<listitem>
<para>
The LLVM backend now supports 128bit SIMD
operations. This is now exploited in both the
<literal>vector</literal> and <literal>dph</literal>
packages, exposing a high level interface.
The LLVM backend now supports 128- and 256-bit SIMD
operations.
TODO FIXME: reference.
</para>
......@@ -488,6 +486,20 @@
Template Haskell now supports annotation pragmas.
</para>
</listitem>
<listitem>
<para>
Typed Template Haskell expressions are now supported. See
<xref linkend="template-haskell"/> for more details.
</para>
</listitem>
<listitem>
<para>
Template Haskell declarations, types, patterns, and
<emphasis>untyped</emphasis> expressions are no longer
typechecked at all. This is a backwards-compatible change
since it allows strictly more programs to be typed.
</para>
</listitem>
</itemizedlist>
</sect3>
......
......@@ -7872,12 +7872,13 @@ Wiki page</ulink>.
<itemizedlist>
<listitem><para> an expression; the spliced expression must
have type <literal>Q Exp</literal></para></listitem>
<listitem><para> an type; the spliced expression must
have type <literal>Q Typ</literal></para></listitem>
<listitem><para> a list of top-level declarations; the spliced expression
<listitem><para> a pattern; the spliced pattern must
have type <literal>Q Pat</literal></para></listitem>
<listitem><para> a type; the spliced expression must
have type <literal>Q Type</literal></para></listitem>
<listitem><para> a list of declarations; the spliced expression
must have type <literal>Q [Dec]</literal></para></listitem>
</itemizedlist>
Note that pattern splices are not supported.
Inside a splice you can only call functions defined in imported modules,
not functions defined elsewhere in the same module.</para></listitem>
......@@ -7895,6 +7896,36 @@ Wiki page</ulink>.
the quotation has type <literal>Q Pat</literal>.</para></listitem>
</itemizedlist></para></listitem>
<listitem>
<para>
A <emphasis>typed</emphasis> expression splice is written
<literal>$$x</literal>, where <literal>x</literal> is an
identifier, or <literal>$$(...)</literal>, where the "..." is
an arbitrary expression.
</para>
<para>
A typed expression splice can occur in place of an
expression; the spliced expression must have type <literal>Q
(TExp a)</literal>
</para>
</listitem>
<listitem>
<para>
A <emphasis>typed</emphasis> expression quotation is written
as <literal>[|| ... ||]</literal>, or <literal>[e||
... ||]</literal>, where the "..." is an expression; if the
"..." expression has type <literal>a</literal>, then the
quotation has type <literal>Q (TExp a)</literal>.
</para>
<para>
Values of type <literal>TExp a</literal> may be converted to
values of type <literal>Exp</literal> using the function
<literal>unType :: TExp a -> Exp</literal>.
</para>
</listitem>
<listitem><para>
A quasi-quotation can appear in either a pattern context or an
expression context and is also written in Oxford brackets:
......@@ -7950,13 +7981,117 @@ h z = z-1
</programlisting>
This abbreviation makes top-level declaration slices quieter and less intimidating.
</para></listitem>
<listitem>
<para>
Binders are lexically scoped. For example, consider the
following code, where a value <literal>g</literal> of type
<literal>Bool -> Q Pat</literal> is in scope, having been
imported from another module
<programlisting>
y :: Int
y = 7
f :: Int -> Int -> Int
f n = \ $(g True) -> y+n
</programlisting>
The <literal>y</literal> in the right-hand side of
<literal>f</literal> refers to the top-level <literal>y =
7</literal>, even if the pattern splice <literal>$(g
n)</literal> also generates a binder <literal>y</literal>.
</para>
<para>
Note that a pattern quasiquoter <emphasis>may</emphasis>
generate binders that scope over the right-hand side of a
definition because these binders are in scope lexically. For
example, given a quasiquoter <literal>haskell</literal> that
parses Haskell, in the following code, the <literal>y</literal>
in the right-hand side of <literal>f</literal> refers to the
<literal>y</literal> bound by the <literal>haskell</literal>
pattern quasiquoter, <emphasis>not</emphasis> the top-level
<literal>y = 7</literal>.
<programlisting>
y :: Int
y = 7
f :: Int -> Int -> Int
f n = \ [haskell|y|] -> y+n
</programlisting>
</para>
</listitem>
<listitem>
<para>
The type environment seen by <literal>reify</literal> includes
all the top-level declaration up to the end of the immediately
preceding <emphasis>declaration group</emphasis>, but no more.
</para>
<para>
A <emphasis>declaration group</emphasis> is the group of
declarations created by a top-level declaration splice, plus
those following it, down to but not including the next top-level
declaration splice. The first declaration group in a module
includes all top-level definitions down to but not including the
first top-level declaration splice.
</para>
<para>
Concretely, consider the following code
<programlisting>
module M where
import ...
f x = x
$(th1 4)
h y = k y y $(blah1)
$(th2 10)
w z = $(blah2)
</programlisting>
In this example