CgCase.lhs 38.5 KB
Newer Older
1
%
2
% (c) The GRASP/AQUA Project, Glasgow University, 1992-1996
3
4
5
6
7
8
9
10
11
12
%
%********************************************************
%*							*
\section[CgCase]{Converting @StgCase@ expressions}
%*							*
%********************************************************

\begin{code}
#include "HsVersions.h"

13
module CgCase (	cgCase, saveVolatileVarsAndRegs ) where
14

15
16
IMP_Ubiq(){-uitous-}
IMPORT_DELOOPER(CgLoop2)		( cgExpr, getPrimOpArgAmodes )
17
18

import CgMonad
19
import StgSyn
20
21
import AbsCSyn

22
23
import AbsCUtils	( mkAbstractCs, mkAbsCStmts, mkAlgAltsCSwitch,
			  magicIdPrimRep, getAmodeRep
24
			)
25
26
27
28
29
30
import CgBindery	( getVolatileRegs, getArgAmode, getArgAmodes,
			  bindNewToReg, bindNewToTemp,
			  bindNewPrimToAmode,
			  rebindToAStack, rebindToBStack,
			  getCAddrModeAndInfo, getCAddrModeIfVolatile,
			  idInfoToAmode
31
32
			)
import CgCon		( buildDynCon, bindConArgs )
33
import CgHeapery	( heapCheck, yield )
34
35
36
37
38
39
import CgRetConv	( dataReturnConvAlg, dataReturnConvPrim,
			  ctrlReturnConvAlg,
			  DataReturnConvention(..), CtrlReturnConvention(..),
			  assignPrimOpResultRegs,
			  makePrimOpArgsRobust
			)
40
import CgStackery	( allocAStack, allocBStack, allocAStackTop, allocBStackTop )
41
import CgTailCall	( tailCallBusiness, performReturn )
42
43
import CgUsages		( getSpARelOffset, getSpBRelOffset, freeBStkSlot )
import CLabel		( mkVecTblLabel, mkReturnPtLabel, mkDefaultLabel,
44
			  mkAltLabel
45
46
			)
import ClosureInfo	( mkConLFInfo, mkLFArgument, layOutDynCon )
47
import CmdLineOpts	( opt_SccProfilingOn, opt_GranMacros )
48
import CostCentre	( useCurrentCostCentre )
49
import HeapOffs		( SYN_IE(VirtualSpBOffset), SYN_IE(VirtualHeapOffset) )
50
import Id		( idPrimRep, toplevelishId,
51
52
			  dataConTag, fIRST_TAG, SYN_IE(ConTag),
			  isDataCon, SYN_IE(DataCon),
53
			  idSetToList, GenId{-instance Uniquable,Eq-}
54
			)
55
56
57
import Maybes		( catMaybes )
import PprStyle		( PprStyle(..) )
import PprType		( GenType{-instance Outputable-} )
58
59
60
import PrimOp		( primOpCanTriggerGC, PrimOp(..),
			  primOpStackRequired, StackRequirement(..)
			)
61
62
63
64
65
import PrimRep		( getPrimRepSize, isFollowableRep, retPrimRepSize,
			  PrimRep(..)
			)
import TyCon		( isEnumerationTyCon )
import Type		( typePrimRep,
66
67
			  getAppSpecDataTyConExpandingDicts,
			  maybeAppSpecDataTyConExpandingDicts
68
69
70
71
			)
import Util		( sortLt, isIn, isn'tIn, zipEqual,
			  pprError, panic, assertPanic
			)
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
\end{code}

\begin{code}
data GCFlag
  = GCMayHappen	-- The scrutinee may involve GC, so everything must be
		-- tidy before the code for the scrutinee.

  | NoGC	-- The scrutinee is a primitive value, or a call to a
		-- primitive op which does no GC.  Hence the case can
		-- be done inline, without tidying up first.
\end{code}

It is quite interesting to decide whether to put a heap-check
at the start of each alternative.  Of course we certainly have
to do so if the case forces an evaluation, or if there is a primitive
87
op which can trigger GC.
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106

A more interesting situation is this:

\begin{verbatim}
	!A!;
	...A...
	case x# of
	  0#      -> !B!; ...B...
	  default -> !C!; ...C...
\end{verbatim}

where \tr{!x!} indicates a possible heap-check point. The heap checks
in the alternatives {\em can} be omitted, in which case the topmost
heapcheck will take their worst case into account.

In favour of omitting \tr{!B!}, \tr{!C!}:

\begin{itemize}
\item
107
{\em May} save a heap overflow test,
108
109
110
111
112
113
114
115
	if ...A... allocates anything.  The other advantage
	of this is that we can use relative addressing
	from a single Hp to get at all the closures so allocated.
\item
 No need to save volatile vars etc across the case
\end{itemize}

Against:
116

117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
\begin{itemize}
\item
   May do more allocation than reqd.  This sometimes bites us
	badly.  For example, nfib (ha!)  allocates about 30\% more space if the
	worst-casing is done, because many many calls to nfib are leaf calls
	which don't need to allocate anything.

	This never hurts us if there is only one alternative.
\end{itemize}


*** NOT YET DONE ***  The difficulty is that \tr{!B!}, \tr{!C!} need
to take account of what is live, and that includes all live volatile
variables, even if they also have stable analogues.  Furthermore, the
stack pointers must be lined up properly so that GC sees tidy stacks.
If these things are done, then the heap checks can be done at \tr{!B!} and
\tr{!C!} without a full save-volatile-vars sequence.

\begin{code}
136
137
138
cgCase	:: StgExpr
	-> StgLiveVars
	-> StgLiveVars
139
	-> Unique
140
	-> StgCaseAlts
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
	-> Code
\end{code}

Several special cases for primitive operations.

******* TO DO TO DO: fix what follows

Special case for

	case (op x1 ... xn) of
	  y -> e

where the type of the case scrutinee is a multi-constuctor algebraic type.
Then we simply compile code for

	let y = op x1 ... xn
	in
	e

In this case:

	case (op x1 ... xn) of
	   C a b -> ...
	   y     -> e

where the type of the case scrutinee is a multi-constuctor algebraic type.
we just bomb out at the moment. It never happens in practice.

**** END OF TO DO TO DO

\begin{code}
172
cgCase scrut@(StgPrim op args _) live_in_whole_case live_in_alts uniq
173
174
175
176
177
178
179
180
181
       (StgAlgAlts _ alts (StgBindDefault id _ deflt_rhs))
  = if not (null alts) then
    	panic "cgCase: case on PrimOp with default *and* alts\n"
	-- For now, die if alts are non-empty
    else
	cgExpr (StgLet (StgNonRec id scrut_rhs) deflt_rhs)
  where
    scrut_rhs       = StgRhsClosure useCurrentCostCentre stgArgOcc{-safe-} scrut_free_vars
				Updatable [] scrut
182
    scrut_free_vars = [ fv | StgVarArg fv <- args, not (toplevelishId fv) ]
183
184
185
186
187
			-- Hack, hack
\end{code}


\begin{code}
188
cgCase (StgPrim op args _) live_in_whole_case live_in_alts uniq alts
189
190
191
  | not (primOpCanTriggerGC op)
  =
	-- Get amodes for the arguments and results
192
    getPrimOpArgAmodes op args			`thenFC` \ arg_amodes ->
193
194
195
196
197
198
199
    let
	result_amodes = getPrimAppResultAmodes uniq alts
	liveness_mask = panic "cgCase: liveness of non-GC-ing primop touched\n"
    in
	-- Perform the operation
    getVolatileRegs live_in_alts		        `thenFC` \ vol_regs ->

200
201
    -- seq cannot happen here => no additional B Stack alloc

202
203
204
205
206
207
208
209
    absC (COpStmt result_amodes op
		 arg_amodes -- note: no liveness arg
		 liveness_mask vol_regs) 		`thenC`

	-- Scrutinise the result
    cgInlineAlts NoGC uniq alts

  | otherwise	-- *Can* trigger GC
210
  = getPrimOpArgAmodes op args	`thenFC` \ arg_amodes ->
211
212
213
214
215

   	-- Get amodes for the arguments and results, and assign to regs
	-- (Can-trigger-gc primops guarantee to have their (nonRobust)
	--  args in regs)
    let
216
	op_result_regs = assignPrimOpResultRegs op
217
218
219

    	op_result_amodes = map CReg op_result_regs

220
	(op_arg_amodes, liveness_mask, arg_assts)
221
	  = makePrimOpArgsRobust op arg_amodes
222
223
224
225
226
227
228
229
230
231
232
233

	liveness_arg  = mkIntCLit liveness_mask
    in
	-- Tidy up in case GC happens...

	-- Nota Bene the use of live_in_whole_case in nukeDeadBindings.
	-- Reason: the arg_assts computed above may refer to some stack slots
	-- which are not live in the alts.  So we mustn't use those slots
	-- to save volatile vars in!
    nukeDeadBindings live_in_whole_case	`thenC`
    saveVolatileVars live_in_alts	`thenFC` \ volatile_var_save_assts ->

234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
    -- Allocate stack words for the prim-op itself,
    -- these are guaranteed to be ON TOP OF the stack.
    -- Currently this is used *only* by the seq# primitive op.
    let 
      (a_req,b_req) = case (primOpStackRequired op) of
    			   NoStackRequired        -> (0, 0)
    			   FixedStackRequired a b -> (a, b)
    			   VariableStackRequired  -> (0, 0) -- i.e. don't care
    in
    allocAStackTop a_req 		`thenFC` \ a_slot ->
    allocBStackTop b_req 		`thenFC` \ b_slot ->

    getEndOfBlockInfo                 	`thenFC` \ eob_info@(EndOfBlockInfo args_spa args_spb sequel) ->
    -- a_req and b_req allocate stack space that is taken care of by the
    -- macros generated for the primops; thus, we there is no need to adjust
    -- this part of the stacks later on (=> +a_req in EndOfBlockInfo)
    -- currently all this is only used for SeqOp
    forkEval (if True {- a_req==0 && b_req==0 -}
                then eob_info
                else (EndOfBlockInfo (args_spa+a_req) 
	                             (args_spb+b_req) sequel)) nopC 
	     (
	      getAbsC (cgInlineAlts GCMayHappen uniq alts) `thenFC` \ abs_c ->
257
	      absC (CRetUnVector vtbl_label (CLabelledCode return_label abs_c))
258
    	    	    	    	    	`thenC`
259
260
	      returnFC (CaseAlts (CUnVecLbl return_label vtbl_label)
				 Nothing{-no semi-tagging-}))
261
262
263
264
265
	    `thenFC` \ new_eob_info ->

	-- Record the continuation info
    setEndOfBlockInfo new_eob_info (

266
	-- Now "return" to the inline alternatives; this will get
267
268
269
	-- compiled to a fall-through.
    let
	simultaneous_assts = arg_assts `mkAbsCStmts` volatile_var_save_assts
270

271
272
	-- do_op_and_continue will be passed an amode for the continuation
	do_op_and_continue sequel
273
	  = absC (COpStmt op_result_amodes
274
275
276
277
278
279
			  op
			  (pin_liveness op liveness_arg op_arg_amodes)
			  liveness_mask
			  [{-no vol_regs-}])
    	    	    	    	    	`thenC`

280
281
	    sequelToAmode sequel        `thenFC` \ dest_amode ->
	    absC (CReturn dest_amode DirectReturn)
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311

		-- Note: we CJump even for algebraic data types,
		-- because cgInlineAlts always generates code, never a
		-- vector.
    in
    performReturn simultaneous_assts do_op_and_continue live_in_alts
    )
  where
    -- for all PrimOps except ccalls, we pin the liveness info
    -- on as the first "argument"
    -- ToDo: un-duplicate?

    pin_liveness (CCallOp _ _ _ _ _) _ args = args
    pin_liveness other_op liveness_arg args
      = liveness_arg :args

    vtbl_label = mkVecTblLabel uniq
    return_label = mkReturnPtLabel uniq

\end{code}

Another special case: scrutinising a primitive-typed variable.	No
evaluation required.  We don't save volatile variables, nor do we do a
heap-check in the alternatives.	 Instead, the heap usage of the
alternatives is worst-cased and passed upstream.  This can result in
allocating more heap than strictly necessary, but it will sometimes
eliminate a heap check altogether.

\begin{code}
cgCase (StgApp v [] _) live_in_whole_case live_in_alts uniq (StgPrimAlts ty alts deflt)
312
  = getArgAmode v		`thenFC` \ amode ->
313
314
315
316
317
318
319
320
    cgPrimAltsGivenScrutinee NoGC amode alts deflt
\end{code}

Special case: scrutinising a non-primitive variable.
This can be done a little better than the general case, because
we can reuse/trim the stack slot holding the variable (if it is in one).

\begin{code}
321
322
cgCase (StgApp (StgVarArg fun) args _ {-lvs must be same as live_in_alts-})
	live_in_whole_case live_in_alts uniq alts@(StgAlgAlts _ _ _)
323
324
  =
    getCAddrModeAndInfo fun		`thenFC` \ (fun_amode, lf_info) ->
325
    getArgAmodes args			`thenFC` \ arg_amodes ->
326
327
328

	-- Squish the environment
    nukeDeadBindings live_in_alts	`thenC`
329
    saveVolatileVarsAndRegs live_in_alts
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
    	    	    	`thenFC` \ (save_assts, alts_eob_info, maybe_cc_slot) ->

    forkEval alts_eob_info
    	     nopC (cgEvalAlts maybe_cc_slot uniq alts) `thenFC` \ scrut_eob_info ->
    setEndOfBlockInfo scrut_eob_info  (
      tailCallBusiness fun fun_amode lf_info arg_amodes live_in_alts save_assts
    )

\end{code}

Finally, here is the general case.

\begin{code}
cgCase expr live_in_whole_case live_in_alts uniq alts
  =	-- Figure out what volatile variables to save
    nukeDeadBindings live_in_whole_case	`thenC`
    saveVolatileVarsAndRegs live_in_alts
    	    	    	`thenFC` \ (save_assts, alts_eob_info, maybe_cc_slot) ->

349
	-- Save those variables right now!
350
351
    absC save_assts 	    	    	`thenC`

352
    forkEval alts_eob_info
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
    	(nukeDeadBindings live_in_alts)
	(cgEvalAlts maybe_cc_slot uniq alts) `thenFC` \ scrut_eob_info ->

    setEndOfBlockInfo scrut_eob_info (cgExpr expr)
\end{code}

%************************************************************************
%*									*
\subsection[CgCase-primops]{Primitive applications}
%*									*
%************************************************************************

Get result amodes for a primitive operation, in the case wher GC can't happen.
The  amodes are returned in canonical order, ready for the prim-op!

	Alg case: temporaries named as in the alternatives,
		  plus (CTemp u) for the tag (if needed)
	Prim case: (CTemp u)

This is all disgusting, because these amodes must be consistent with those
invented by CgAlgAlts.

\begin{code}
getPrimAppResultAmodes
	:: Unique
378
	-> StgCaseAlts
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
	-> [CAddrMode]
\end{code}

\begin{code}
-- If there's an StgBindDefault which does use the bound
-- variable, then we can only handle it if the type involved is
-- an enumeration type.   That's important in the case
-- of comparisions:
--
--	case x ># y of
--	  r -> f r
--
-- The only reason for the restriction to *enumeration* types is our
-- inability to invent suitable temporaries to hold the results;
-- Elaborating the CTemp addr mode to have a second uniq field
-- (which would simply count from 1) would solve the problem.
-- Anyway, cgInlineAlts is now capable of handling all cases;
-- it's only this function which is being wimpish.

getPrimAppResultAmodes uniq (StgAlgAlts ty alts (StgBindDefault _ True {- used -} _))
  | isEnumerationTyCon spec_tycon = [tag_amode]
  | otherwise		          = panic "getPrimAppResultAmodes: non-enumeration algebraic alternatives with default"
  where
    -- A temporary variable to hold the tag; this is unaffected by GC because
    -- the heap-checks in the branches occur after the switch
404
    tag_amode     = CTemp uniq IntRep
405
    (spec_tycon, _, _) = getAppSpecDataTyConExpandingDicts ty
406
407
408
409
410
411
412
413
414

getPrimAppResultAmodes uniq (StgAlgAlts ty alts other_default)
	-- Default is either StgNoDefault or StgBindDefault with unused binder
  = case alts of
	[_]	-> arg_amodes			-- No need for a tag
	other	-> tag_amode : arg_amodes
  where
    -- A temporary variable to hold the tag; this is unaffected by GC because
    -- the heap-checks in the branches occur after the switch
415
    tag_amode = CTemp uniq IntRep
416
417
418
419

    -- Sort alternatives into canonical order; there must be a complete
    -- set because there's no default case.
    sorted_alts = sortLt lt alts
420
    (con1,_,_,_) `lt` (con2,_,_,_) = dataConTag con1 < dataConTag con2
421
422
423
424
425
426

    arg_amodes :: [CAddrMode]

    -- Turn them into amodes
    arg_amodes = concat (map mk_amodes sorted_alts)
    mk_amodes (con, args, use_mask, rhs)
427
      = [ CTemp (uniqueOf arg) (idPrimRep arg) | arg <- args ]
428
429
430
431
432
433
434
\end{code}

The situation is simpler for primitive
results, because there is only one!

\begin{code}
getPrimAppResultAmodes uniq (StgPrimAlts ty _ _)
435
  = [CTemp uniq (typePrimRep ty)]
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
\end{code}


%************************************************************************
%*									*
\subsection[CgCase-alts]{Alternatives}
%*									*
%************************************************************************

@cgEvalAlts@ returns an addressing mode for a continuation for the
alternatives of a @case@, used in a context when there
is some evaluation to be done.

\begin{code}
cgEvalAlts :: Maybe VirtualSpBOffset	-- Offset of cost-centre to be restored, if any
	   -> Unique
452
	   -> StgCaseAlts
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
	   -> FCode Sequel		-- Any addr modes inside are guaranteed to be a label
					-- so that we can duplicate it without risk of
					-- duplicating code

cgEvalAlts cc_slot uniq (StgAlgAlts ty alts deflt)
  = 	-- Generate the instruction to restore cost centre, if any
    restoreCurrentCostCentre cc_slot 	`thenFC` \ cc_restore ->

	-- Generate sequel info for use downstream
	-- At the moment, we only do it if the type is vector-returnable.
	-- Reason: if not, then it costs extra to label the
	-- alternatives, because we'd get return code like:
	--
	--	switch TagReg { 0 : JMP(alt_1); 1 : JMP(alt_2) ..etc }
	--
	-- which is worse than having the alt code in the switch statement

    let
471
	(spec_tycon, _, _) = getAppSpecDataTyConExpandingDicts ty
472

473
	use_labelled_alts
474
475
476
477
478
479
480
481
	  = case ctrlReturnConvAlg spec_tycon of
	      VectoredReturn _ -> True
	      _	    	       -> False

	semi_tagged_stuff
    	  = if not use_labelled_alts then
		Nothing -- no semi-tagging info
	    else
482
		cgSemiTaggedAlts uniq alts deflt -- Just <something>
483
    in
484
    cgAlgAlts GCMayHappen uniq cc_restore use_labelled_alts ty alts deflt True
485
486
487
488
489
490
491
492
493
494
495
496
497
498
					`thenFC` \ (tagged_alt_absCs, deflt_absC) ->

    mkReturnVector uniq ty tagged_alt_absCs deflt_absC `thenFC` \ return_vec ->

    returnFC (CaseAlts return_vec semi_tagged_stuff)

cgEvalAlts cc_slot uniq (StgPrimAlts ty alts deflt)
  =	-- Generate the instruction to restore cost centre, if any
    restoreCurrentCostCentre cc_slot 			 `thenFC` \ cc_restore ->

	-- Generate the switch
    getAbsC (cgPrimAlts GCMayHappen uniq ty alts deflt)  `thenFC` \ abs_c ->

	-- Generate the labelled block, starting with restore-cost-centre
499
500
    absC (CRetUnVector vtbl_label
	 (CLabelledCode return_label (cc_restore `mkAbsCStmts` abs_c)))
501
502
503
504
505
506
507
508
509
510
511
    	    	    	    	    	    	    	 `thenC`
	-- Return an amode for the block
    returnFC (CaseAlts (CUnVecLbl return_label vtbl_label) Nothing{-no semi-tagging-})
  where
    vtbl_label = mkVecTblLabel uniq
    return_label = mkReturnPtLabel uniq
\end{code}


\begin{code}
cgInlineAlts :: GCFlag -> Unique
512
    	     -> StgCaseAlts
513
514
515
    	     -> Code
\end{code}

516
517
518
519
520
521
HWL comment on {\em GrAnSim\/}  (adding GRAN_YIELDs for context switch): If
we  do  an inlining of the  case  no separate  functions  for returning are
created, so we don't have to generate a GRAN_YIELD in that case.  This info
must be  propagated  to cgAlgAltRhs (where the  GRAN_YIELD  macro might  be
emitted). Hence, the new Bool arg to cgAlgAltRhs.

522
523
524
525
526
527
528
First case: algebraic case, exactly one alternative, no default.
In this case the primitive op will not have set a temporary to the
tag, so we shouldn't generate a switch statment.  Instead we just
do the right thing.

\begin{code}
cgInlineAlts gc_flag uniq (StgAlgAlts ty [alt@(con,args,use_mask,rhs)] StgNoDefault)
529
  = cgAlgAltRhs gc_flag con args use_mask rhs False{-no yield macro if alt gets inlined-}
530
531
532
533
534
535
536
537
\end{code}

Second case: algebraic case, several alternatives.
Tag is held in a temporary.

\begin{code}
cgInlineAlts gc_flag uniq (StgAlgAlts ty alts deflt)
  = cgAlgAlts gc_flag uniq AbsCNop{-restore_cc-} False{-no semi-tagging-}
538
539
		ty alts deflt
                False{-don't emit yield-}  `thenFC` \ (tagged_alts, deflt_c) ->
540
541
542
543
544
545

	-- Do the switch
    absC (mkAlgAltsCSwitch tag_amode tagged_alts deflt_c)
 where
    -- A temporary variable to hold the tag; this is unaffected by GC because
    -- the heap-checks in the branches occur after the switch
546
    tag_amode = CTemp uniq IntRep
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
\end{code}

Third (real) case: primitive result type.

\begin{code}
cgInlineAlts gc_flag uniq (StgPrimAlts ty alts deflt)
  = cgPrimAlts gc_flag uniq ty alts deflt
\end{code}


%************************************************************************
%*									*
\subsection[CgCase-alg-alts]{Algebraic alternatives}
%*									*
%************************************************************************

In @cgAlgAlts@, none of the binders in the alternatives are
assumed to be yet bound.

566
567
568
569
570
HWL comment on {\em GrAnSim\/} (adding GRAN_YIELDs for context switch): The
last   arg of  cgAlgAlts  indicates  if we  want  a context   switch at the
beginning of  each alternative. Normally we  want that. The  only exception
are inlined alternatives.

571
572
573
574
575
\begin{code}
cgAlgAlts :: GCFlag
	  -> Unique
	  -> AbstractC				-- Restore-cost-centre instruction
	  -> Bool				-- True <=> branches must be labelled
576
577
578
	  -> Type	    	    	    	-- From the case statement
	  -> [(Id, [Id], [Bool], StgExpr)]	-- The alternatives
	  -> StgCaseDefault		-- The default
579
          -> Bool                               -- Context switch at alts?
580
581
582
583
584
585
586
587
588
589
590
591
	  -> FCode ([(ConTag, AbstractC)],	-- The branches
		    AbstractC			-- The default case
	     )
\end{code}

The case with a default which has a binder is different.  We need to
pick all the constructors which aren't handled explicitly by an
alternative, and which return their results in registers, allocate
them explicitly in the heap, and jump to a join point for the default
case.

OLD:  All of this only works if a heap-check is required anyway, because
592
otherwise it isn't safe to allocate.
593
594
595
596
597
598
599
600
601
602
603
604
605
606

NEW (July 94): now false!  It should work regardless of gc_flag,
because of the extra_branches argument now added to forkAlts.

We put a heap-check at the join point, for the benefit of constructors
which don't need to do allocation. This means that ones which do need
to allocate may end up doing two heap-checks; but that's just too bad.
(We'd need two join labels otherwise.  ToDo.)

It's all pretty turgid anyway.

\begin{code}
cgAlgAlts gc_flag uniq restore_cc semi_tagging
	ty alts deflt@(StgBindDefault binder True{-used-} _)
607
        emit_yield{-should a yield macro be emitted?-}
608
  = let
609
	extra_branches :: [FCode (ConTag, AbstractC)]
610
	extra_branches = catMaybes (map mk_extra_branch default_cons)
611
612
613

	must_label_default = semi_tagging || not (null extra_branches)
    in
614
    forkAlts (map (cgAlgAlt gc_flag uniq restore_cc semi_tagging emit_yield) alts)
615
	     extra_branches
616
	     (cgAlgDefault  gc_flag uniq restore_cc must_label_default deflt emit_yield)
617
618
619
  where

    default_join_lbl = mkDefaultLabel uniq
620
    jump_instruction = CJump (CLbl default_join_lbl CodePtrRep)
621

622
    (spec_tycon, _, spec_cons) = getAppSpecDataTyConExpandingDicts ty
623
624
625
626

    alt_cons = [ con | (con,_,_,_) <- alts ]

    default_cons  = [ spec_con | spec_con <- spec_cons,	-- In this type
627
				 spec_con `not_elem` alt_cons ]	-- Not handled explicitly
628
629
630
631
632
633
634
635
	where
	  not_elem = isn'tIn "cgAlgAlts"

    -- (mk_extra_branch con) returns the a maybe for the extra branch for con.
    -- The "maybe" is because con may return in heap, in which case there is
    -- nothing to do. Otherwise, we have a special case for a nullary constructor,
    -- but in the general case we do an allocation and heap-check.

636
    mk_extra_branch :: DataCon -> (Maybe (FCode (ConTag, AbstractC)))
637

638
    mk_extra_branch con
639
      = ASSERT(isDataCon con)
640
	case dataReturnConvAlg con of
641
642
643
644
645
646
	  ReturnInHeap	  -> Nothing
	  ReturnInRegs rs -> Just (getAbsC (alloc_code rs) `thenFC` \ abs_c ->
				   returnFC (tag, abs_c)
				  )
      where
	lf_info		= mkConLFInfo con
647
	tag		= dataConTag con
648
649
650
651
652
653
654
655
656
657

	-- alloc_code generates code to allocate constructor con, whose args are
	-- in the arguments to alloc_code, assigning the result to Node.
	alloc_code :: [MagicId] -> Code

	alloc_code regs
	  = possibleHeapCheck gc_flag regs False (
		buildDynCon binder useCurrentCostCentre con
				(map CReg regs) (all zero_size regs)
						`thenFC` \ idinfo ->
658
		idInfoToAmode PtrRep idinfo	`thenFC` \ amode ->
659
660
661
662
663

		absC (CAssign (CReg node) amode) `thenC`
		absC jump_instruction
	    )
	  where
664
	    zero_size reg = getPrimRepSize (magicIdPrimRep reg) == 0
665
666
667
668
669
\end{code}

Now comes the general case

\begin{code}
670
cgAlgAlts gc_flag uniq restore_cc must_label_branches ty alts deflt
671
	{- The deflt is either StgNoDefault or a BindDefault which doesn't use the binder -}
672
673
674
          emit_yield{-should a yield macro be emitted?-}

  = forkAlts (map (cgAlgAlt gc_flag uniq restore_cc must_label_branches emit_yield) alts)
675
	     [{- No "extra branches" -}]
676
	     (cgAlgDefault gc_flag uniq restore_cc must_label_branches deflt emit_yield)
677
678
679
680
681
\end{code}

\begin{code}
cgAlgDefault :: GCFlag
	     -> Unique -> AbstractC -> Bool -- turgid state...
682
	     -> StgCaseDefault	    -- input
683
684
	     -> Bool
	     -> FCode AbstractC	    -- output
685
686

cgAlgDefault gc_flag uniq restore_cc must_label_branch
687
	     StgNoDefault _
688
689
690
691
  = returnFC AbsCNop

cgAlgDefault gc_flag uniq restore_cc must_label_branch
	     (StgBindDefault _ False{-binder not used-} rhs)
692
             emit_yield{-should a yield macro be emitted?-}
693
694

  = getAbsC (absC restore_cc `thenC`
695
696
697
698
699
700
701
	     let
		emit_gran_macros = opt_GranMacros
	     in
             (if emit_gran_macros && emit_yield 
                then yield [] False 
                else absC AbsCNop)                            `thenC`     
    -- liveness same as in possibleHeapCheck below
702
703
704
705
706
707
708
709
710
711
712
713
	     possibleHeapCheck gc_flag [] False (cgExpr rhs)) `thenFC` \ abs_c ->
    let
	final_abs_c | must_label_branch = CJump (CLabelledCode lbl abs_c)
		    | otherwise	        = abs_c
    in
    returnFC final_abs_c
  where
    lbl = mkDefaultLabel uniq


cgAlgDefault gc_flag uniq restore_cc must_label_branch
	     (StgBindDefault binder True{-binder used-} rhs)
714
          emit_yield{-should a yield macro be emitted?-}
715
716
717
718
719

  = 	-- We have arranged that Node points to the thing, even
    	-- even if we return in registers
    bindNewToReg binder node mkLFArgument `thenC`
    getAbsC (absC restore_cc `thenC`
720
721
722
723
724
725
726
	     let
		emit_gran_macros = opt_GranMacros
	     in
             (if emit_gran_macros && emit_yield
                then yield [node] False
                else absC AbsCNop)                            `thenC`     
		-- liveness same as in possibleHeapCheck below
727
728
729
730
731
732
733
734
735
736
737
738
739
740
	     possibleHeapCheck gc_flag [node] False (cgExpr rhs)
	-- Node is live, but doesn't need to point at the thing itself;
	-- it's ok for Node to point to an indirection or FETCH_ME
	-- Hence no need to re-enter Node.
    )					`thenFC` \ abs_c ->

    let
	final_abs_c | must_label_branch = CJump (CLabelledCode lbl abs_c)
		    | otherwise	        = abs_c
    in
    returnFC final_abs_c
  where
    lbl = mkDefaultLabel uniq

741
-- HWL comment on GrAnSim: GRAN_YIELDs needed; emitted in cgAlgAltRhs
742
743
744

cgAlgAlt :: GCFlag
	 -> Unique -> AbstractC -> Bool		-- turgid state
745
	 -> Bool                               -- Context switch at alts?
746
	 -> (Id, [Id], [Bool], StgExpr)
747
748
	 -> FCode (ConTag, AbstractC)

749
750
751
cgAlgAlt gc_flag uniq restore_cc must_label_branch 
         emit_yield{-should a yield macro be emitted?-}
         (con, args, use_mask, rhs)
752
  = getAbsC (absC restore_cc `thenC`
753
754
755
	     cgAlgAltRhs gc_flag con args use_mask rhs 
             emit_yield
            ) `thenFC` \ abs_c -> 
756
757
758
759
760
761
    let
	final_abs_c | must_label_branch = CJump (CLabelledCode lbl abs_c)
		    | otherwise	        = abs_c
    in
    returnFC (tag, final_abs_c)
  where
762
    tag	= dataConTag con
763
764
    lbl = mkAltLabel uniq tag

765
766
767
768
769
770
771
772
cgAlgAltRhs :: GCFlag 
	    -> Id 
	    -> [Id] 
	    -> [Bool] 
	    -> StgExpr 
	    -> Bool              -- context switch?
	    -> Code
cgAlgAltRhs gc_flag con args use_mask rhs emit_yield
773
  = let
774
      (live_regs, node_reqd)
775
	= case (dataReturnConvAlg con) of
776
	    ReturnInHeap      -> ([],						  True)
777
	    ReturnInRegs regs -> ([reg | (reg,True) <- zipEqual "cgAlgAltRhs" regs use_mask], False)
778
779
780
781
782
				-- Pick the live registers using the use_mask
				-- Doing so is IMPORTANT, because with semi-tagging
				-- enabled only the live registers will have valid
				-- pointers in them.
    in
783
784
785
786
787
788
789
     let
	emit_gran_macros = opt_GranMacros
     in
    (if emit_gran_macros && emit_yield
      then yield live_regs node_reqd 
      else absC AbsCNop)                                    `thenC`     
    -- liveness same as in possibleHeapCheck below
790
791
    possibleHeapCheck gc_flag live_regs node_reqd (
    (case gc_flag of
792
	NoGC   	    -> mapFCs bindNewToTemp args `thenFC` \ _ ->
793
794
795
		       nopC
    	GCMayHappen -> bindConArgs con args
    )	`thenC`
796
    cgExpr rhs
797
798
799
800
801
802
803
804
805
806
807
808
809
    )
\end{code}

%************************************************************************
%*									*
\subsection[CgCase-semi-tagged-alts]{The code to deal with sem-tagging}
%*									*
%************************************************************************

Turgid-but-non-monadic code to conjure up the required info from
algebraic case alternatives for semi-tagging.

\begin{code}
810
cgSemiTaggedAlts :: Unique
811
812
		 -> [(Id, [Id], [Bool], StgExpr)]
		 -> GenStgCaseDefault Id Id
813
814
		 -> SemiTaggingStuff

815
816
cgSemiTaggedAlts uniq alts deflt
  = Just (map st_alt alts, st_deflt deflt)
817
818
819
820
821
822
823
824
825
  where
    st_deflt StgNoDefault = Nothing

    st_deflt (StgBindDefault binder binder_used _)
      = Just (if binder_used then Just binder else Nothing,
	      (CCallProfCtrMacro SLIT("RET_SEMI_BY_DEFAULT") [], -- ToDo: monadise?
	       mkDefaultLabel uniq)
	     )

826
827
    st_alt (con, args, use_mask, _)
      = case (dataReturnConvAlg con) of
828
829
830
831

	  ReturnInHeap ->
	    -- Ha!  Nothing to do; Node already points to the thing
	    (con_tag,
832
833
	     (CCallProfCtrMacro SLIT("RET_SEMI_IN_HEAP") -- ToDo: monadise?
			[mkIntCLit (length args)], -- how big the thing in the heap is
834
835
836
837
838
839
840
	     join_label)
	    )

	  ReturnInRegs regs ->
	    -- We have to load the live registers from the constructor
	    -- pointed to by Node.
	    let
841
		(_, regs_w_offsets) = layOutDynCon con magicIdPrimRep regs
842
843
844

		used_regs = selectByMask use_mask regs

845
		used_regs_w_offsets = [ ro | ro@(reg,offset) <- regs_w_offsets,
846
847
848
849
850
851
					     reg `is_elem` used_regs]

		is_elem = isIn "cgSemiTaggedAlts"
	    in
	    (con_tag,
	     (mkAbstractCs [
852
853
854
		CCallProfCtrMacro SLIT("RET_SEMI_IN_REGS")  -- ToDo: macroise?
			[mkIntCLit (length regs_w_offsets),
			 mkIntCLit (length used_regs_w_offsets)],
855
856
857
		CSimultaneous (mkAbstractCs (map move_to_reg used_regs_w_offsets))],
	      join_label))
      where
858
	con_tag	    = dataConTag con
859
860
861
862
	join_label  = mkAltLabel uniq con_tag

    move_to_reg :: (MagicId, VirtualHeapOffset {-from Node-}) -> AbstractC
    move_to_reg (reg, offset)
863
      = CAssign (CReg reg) (CVal (NodeRel offset) (magicIdPrimRep reg))
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
\end{code}

%************************************************************************
%*									*
\subsection[CgCase-prim-alts]{Primitive alternatives}
%*									*
%************************************************************************

@cgPrimAlts@ generates a suitable @CSwitch@ for dealing with the
alternatives of a primitive @case@, given an addressing mode for the
thing to scrutinise.  It also keeps track of the maximum stack depth
encountered down any branch.

As usual, no binders in the alternatives are yet bound.

\begin{code}
cgPrimAlts :: GCFlag
	   -> Unique
882
883
884
    	   -> Type
	   -> [(Literal, StgExpr)]	-- Alternatives
	   -> StgCaseDefault		-- Default
885
886
887
888
889
890
891
892
893
894
	   -> Code

cgPrimAlts gc_flag uniq ty alts deflt
  = cgPrimAltsGivenScrutinee gc_flag scrutinee alts deflt
 where
    -- A temporary variable, or standard register, to hold the result
    scrutinee = case gc_flag of
		     NoGC	 -> CTemp uniq kind
		     GCMayHappen -> CReg (dataReturnConvPrim kind)

895
    kind = typePrimRep ty
896
897
898
899
900
901
902
903
904
905
906


cgPrimAltsGivenScrutinee gc_flag scrutinee alts deflt
  = forkAlts (map (cgPrimAlt gc_flag) alts)
	     [{- No "extra branches" -}]
	     (cgPrimDefault gc_flag scrutinee deflt) `thenFC` \ (alt_absCs, deflt_absC) ->
    absC (CSwitch scrutinee alt_absCs deflt_absC)
	  -- CSwitch does sensible things with one or zero alternatives


cgPrimAlt :: GCFlag
907
908
	  -> (Literal, StgExpr)    -- The alternative
	  -> FCode (Literal, AbstractC) -- Its compiled form
909
910
911
912
913
914
915
916
917

cgPrimAlt gc_flag (lit, rhs)
  = getAbsC rhs_code	 `thenFC` \ absC ->
    returnFC (lit,absC)
  where
    rhs_code = possibleHeapCheck gc_flag [] False (cgExpr rhs )

cgPrimDefault :: GCFlag
	      -> CAddrMode		-- Scrutinee
918
	      -> StgCaseDefault
919
920
921
922
923
924
925
926
927
928
929
	      -> FCode AbstractC

cgPrimDefault gc_flag scrutinee StgNoDefault
  = panic "cgPrimDefault: No default in prim case"

cgPrimDefault gc_flag scrutinee (StgBindDefault _ False{-binder not used-} rhs)
  = getAbsC (possibleHeapCheck gc_flag [] False (cgExpr rhs ))

cgPrimDefault gc_flag scrutinee (StgBindDefault binder True{-used-} rhs)
  = getAbsC (possibleHeapCheck gc_flag regs False rhs_code)
  where
930
    regs = if isFollowableRep (getAmodeRep scrutinee) then
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
	      [node] else []

    rhs_code = bindNewPrimToAmode binder scrutinee `thenC`
    	       cgExpr rhs
\end{code}


%************************************************************************
%*									*
\subsection[CgCase-tidy]{Code for tidying up prior to an eval}
%*									*
%************************************************************************

\begin{code}
saveVolatileVarsAndRegs
946
    :: StgLiveVars               -- Vars which should be made safe
947
948
    -> FCode (AbstractC,              -- Assignments to do the saves
       EndOfBlockInfo,                -- New sequel, recording where the return
949
				      -- address now is
950
951
952
953
954
955
956
957
       Maybe VirtualSpBOffset)        -- Slot for current cost centre


saveVolatileVarsAndRegs vars
  = saveVolatileVars vars     `thenFC` \ var_saves ->
    saveCurrentCostCentre     `thenFC` \ (maybe_cc_slot, cc_save) ->
    saveReturnAddress         `thenFC` \ (new_eob_info, ret_save) ->
    returnFC (mkAbstractCs [var_saves, cc_save, ret_save],
958
959
	      new_eob_info,
	      maybe_cc_slot)
960
961


962
saveVolatileVars :: StgLiveVars	-- Vars which should be made safe
963
964
965
		 -> FCode AbstractC	-- Assignments to to the saves

saveVolatileVars vars
966
  = save_em (idSetToList vars)
967
968
969
970
971
972
973
  where
    save_em [] = returnFC AbsCNop

    save_em (var:vars)
      = getCAddrModeIfVolatile var `thenFC` \ v ->
	case v of
	    Nothing	    -> save_em vars -- Non-volatile, so carry on
974

975
976
977
978
979
980
981

	    Just vol_amode  ->	-- Aha! It's volatile
			       save_var var vol_amode 	`thenFC` \ abs_c ->
			       save_em vars		`thenFC` \ abs_cs ->
			       returnFC (abs_c `mkAbsCStmts` abs_cs)

    save_var var vol_amode
982
      | isFollowableRep kind
983
984
985
986
987
      = allocAStack 			`thenFC` \ a_slot ->
	rebindToAStack var a_slot 	`thenC`
	getSpARelOffset a_slot		`thenFC` \ spa_rel ->
	returnFC (CAssign (CVal spa_rel kind) vol_amode)
      | otherwise
988
      = allocBStack (getPrimRepSize kind) 	`thenFC` \ b_slot ->
989
990
991
992
	rebindToBStack var b_slot 	`thenC`
	getSpBRelOffset b_slot		`thenFC` \ spb_rel ->
	returnFC (CAssign (CVal spb_rel kind) vol_amode)
      where
993
	kind = getAmodeRep vol_amode
994
995

saveReturnAddress :: FCode (EndOfBlockInfo, AbstractC)
996
saveReturnAddress
997
998
999
1000
1001
  = getEndOfBlockInfo                `thenFC` \ eob_info@(EndOfBlockInfo vA vB sequel) ->

      -- See if it is volatile
    case sequel of
      InRetReg ->     -- Yes, it's volatile
1002
1003
		   allocBStack retPrimRepSize    `thenFC` \ b_slot ->
		   getSpBRelOffset b_slot      `thenFC` \ spb_rel ->
1004

1005
1006
		   returnFC (EndOfBlockInfo vA vB (OnStack b_slot),
			     CAssign (CVal spb_rel RetRep) (CReg RetReg))
1007
1008
1009
1010
1011
1012
1013

      UpdateCode _ ->   -- It's non-volatile all right, but we still need
			-- to allocate a B-stack slot for it, *solely* to make
			-- sure that update frames for different values do not
			-- appear adjacent on the B stack. This makes sure
			-- that B-stack squeezing works ok.
			-- See note below
1014
1015
		   allocBStack retPrimRepSize    `thenFC` \ b_slot ->
		   returnFC (eob_info, AbsCNop)
1016
1017

      other ->     	 -- No, it's non-volatile, so do nothing
1018
		   returnFC (eob_info, AbsCNop)
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
\end{code}

Note about B-stack squeezing.  Consider the following:`

	y = [...] \u [] -> ...
	x = [y]   \u [] -> case y of (a,b) -> a

The code for x will push an update frame, and then enter y.  The code
for y will push another update frame.  If the B-stack-squeezer then
wakes up, it will see two update frames right on top of each other,
and will combine them.  This is WRONG, of course, because x's value is
not the same as y's.

The fix implemented above makes sure that we allocate an (unused)
B-stack slot before entering y.  You can think of this as holding the
saved value of RetAddr, which (after pushing x's update frame will be
some update code ptr).  The compiler is clever enough to load the
static update code ptr into RetAddr before entering ~a~, but the slot
is still there to separate the update frames.

When we save the current cost centre (which is done for lexical
scoping), we allocate a free B-stack location, and return (a)~the
virtual offset of the location, to pass on to the alternatives, and
(b)~the assignment to do the save (just as for @saveVolatileVars@).

\begin{code}
1045
saveCurrentCostCentre ::
1046
1047
1048
1049
1050
1051
	FCode (Maybe VirtualSpBOffset,	-- Where we decide to store it
					--   Nothing if not lexical CCs
	       AbstractC)		-- Assignment to save it
					--   AbsCNop if not lexical CCs

saveCurrentCostCentre
1052
1053
1054
  = let
	doing_profiling = opt_SccProfilingOn
    in
1055
1056
1057
    if not doing_profiling then
	returnFC (Nothing, AbsCNop)
    else
1058
	allocBStack (getPrimRepSize CostCentreRep) `thenFC` \ b_slot ->
1059
1060
	getSpBRelOffset b_slot		     	 `thenFC` \ spb_rel ->
	returnFC (Just b_slot,
1061
		  CAssign (CVal spb_rel CostCentreRep) (CReg CurCostCentre))
1062
1063
1064

restoreCurrentCostCentre :: Maybe VirtualSpBOffset -> FCode AbstractC

1065
restoreCurrentCostCentre Nothing
1066
 = returnFC AbsCNop
1067
restoreCurrentCostCentre (Just b_slot)
1068
1069
 = getSpBRelOffset b_slot			 `thenFC` \ spb_rel ->
   freeBStkSlot b_slot				 `thenC`
1070
   returnFC (CCallProfCCMacro SLIT("RESTORE_CCC") [CVal spb_rel CostCentreRep])
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
    -- we use the RESTORE_CCC macro, rather than just
    -- assigning into CurCostCentre, in case RESTORE_CCC
    -- has some sanity-checking in it.
\end{code}


%************************************************************************
%*									*
\subsection[CgCase-return-vec]{Building a return vector}
%*									*
%************************************************************************

Build a return vector, and return a suitable label addressing
mode for it.

\begin{code}
mkReturnVector :: Unique
1088
	       -> Type
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
	       -> [(ConTag, AbstractC)] -- Branch codes
	       -> AbstractC		-- Default case
	       -> FCode CAddrMode

mkReturnVector uniq ty tagged_alt_absCs deflt_absC
  = let
     (return_vec_amode, vtbl_body) = case (ctrlReturnConvAlg spec_tycon) of {

      UnvectoredReturn _ ->
    	(CUnVecLbl ret_label vtbl_label,
	 absC (CRetUnVector vtbl_label
1100
1101
1102
1103
			    (CLabelledCode ret_label
    	    	    	    	    	   (mkAlgAltsCSwitch (CReg TagReg)
    	    	    	    	    	    	    	     tagged_alt_absCs
							     deflt_absC))));
1104
      VectoredReturn table_size ->
1105
    	(CLbl vtbl_label DataPtrRep,
1106
1107
	 absC (CRetVector vtbl_label
			-- must restore cc before each alt, if required
1108
			  (map mk_vector_entry [fIRST_TAG .. (table_size+fIRST_TAG-1)])
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
			  deflt_absC))

-- Leave nops and comments in for now; they are eliminated
-- lazily as it's printed.
--	                  (case (nonemptyAbsC deflt_absC) of
--		                Nothing  -> AbsCNop
--		                Just def -> def)

    } in
    vtbl_body    	    	    	    	    	    `thenC`
    returnFC return_vec_amode
    -- )
  where

1123
    (spec_tycon,_,_) = case (maybeAppSpecDataTyConExpandingDicts ty) of -- *must* be a real "data" type constructor
1124
	      Just xx -> xx
1125
	      Nothing -> pprError "ERROR: can't generate code for polymorphic case;\nprobably a mis-use of `seq' or `par';\nthe User's Guide has more details.\nOffending type: " (ppr PprDebug ty)
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160

    vtbl_label = mkVecTblLabel uniq
    ret_label = mkReturnPtLabel uniq

    mk_vector_entry :: ConTag -> Maybe CAddrMode
    mk_vector_entry tag
      = case [ absC | (t, absC) <- tagged_alt_absCs, t == tag ] of
	     []     -> Nothing
	     [absC] -> Just (CCode absC)
	     _      -> panic "mkReturnVector: too many"
\end{code}

%************************************************************************
%*									*
\subsection[CgCase-utils]{Utilities for handling case expressions}
%*									*
%************************************************************************

@possibleHeapCheck@ tests a flag passed in to decide whether to
do a heap check or not.

\begin{code}
possibleHeapCheck :: GCFlag -> [MagicId] -> Bool -> Code -> Code

possibleHeapCheck GCMayHappen regs node_reqd code = heapCheck regs node_reqd code
possibleHeapCheck NoGC	      _    _         code = code
\end{code}

Select a restricted set of registers based on a usage mask.

\begin{code}
selectByMask []	    	[]	   = []
selectByMask (True:ms)  (x:xs) = x : selectByMask ms xs
selectByMask (False:ms) (x:xs) = selectByMask ms xs
\end{code}