Storage.h 22.2 KB
Newer Older
1
/* -----------------------------------------------------------------------------
2
 * $Id: Storage.h,v 1.43 2002/03/26 10:43:15 simonmar Exp $
3
4
 *
 * (c) The GHC Team, 1998-1999
5
6
7
8
9
10
11
12
13
 *
 * External Storage Manger Interface
 *
 * ---------------------------------------------------------------------------*/

#ifndef STORAGE_H
#define STORAGE_H

#include "Block.h"
14
#include "MBlock.h"
15
16
#include "BlockAlloc.h"
#include "StoragePriv.h"
17
18
19
#ifdef PROFILING
#include "LdvProfile.h"
#endif
20
21
22
23
24
25
26
27
28
29
30

/* -----------------------------------------------------------------------------
   Initialisation / De-initialisation
   -------------------------------------------------------------------------- */

extern void initStorage(void);
extern void exitStorage(void);

/* -----------------------------------------------------------------------------
   Generic allocation

31
   StgPtr allocate(nat n)       Allocates a chunk of contiguous store
32
33
   				n words long, returning a pointer to
				the first word.  Always succeeds.
34
				
35
36
37
38
39
40
41
42
43
44
45
   StgPtr allocatePinned(nat n) Allocates a chunk of contiguous store
   				n words long, which is at a fixed
				address (won't be moved by GC).  
				Returns a pointer to the first word.
				Always succeeds.
				
				NOTE: the GC can't in general handle
				pinned objects, so allocatePinned()
				can only be used for ByteArrays at the
				moment.

46
				Don't forget to TICK_ALLOC_XXX(...)
47
48
				after calling allocate or
				allocatePinned, for the
49
				benefit of the ticky-ticky profiler.
50
51
52
53
54
55
56

   rtsBool doYouWantToGC(void)  Returns True if the storage manager is
   				ready to perform a GC, False otherwise.

   lnat  allocated_bytes(void)  Returns the number of bytes allocated
                                via allocate() since the last GC.
				Used in the reoprting of statistics.
57
58
59

   SMP: allocate and doYouWantToGC can be used from STG code, they are
   surrounded by a mutex.
60
61
   -------------------------------------------------------------------------- */

62
63
64
65
66
67
extern StgPtr  allocate        ( nat n );
extern StgPtr  allocatePinned  ( nat n );
extern lnat    allocated_bytes ( void );

static inline rtsBool
doYouWantToGC( void )
68
69
70
71
72
73
74
75
76
77
78
79
80
{
  return (alloc_blocks >= alloc_blocks_lim);
}

/* -----------------------------------------------------------------------------
   ExtendNursery(hp,hplim)      When hplim is reached, try to grab
   				some more allocation space.  Returns
				False if the allocation space is
				exhausted, and the application should
				call GarbageCollect().
  -------------------------------------------------------------------------- */

#define ExtendNursery(hp,hplim)			\
81
82
83
  (CurrentNursery->free = (P_)(hp)+1,		\
   CurrentNursery->link == NULL ? rtsFalse :	\
   (CurrentNursery = CurrentNursery->link,	\
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
    OpenNursery(hp,hplim),			\
    rtsTrue))

extern void PleaseStopAllocating(void);

/* -----------------------------------------------------------------------------
   Performing Garbage Collection

   GarbageCollect(get_roots)    Performs a garbage collection.  
				'get_roots' is called to find all the 
				roots that the system knows about.

   StgClosure 			Called by get_roots on each root.	
   MarkRoot(StgClosure *p)	Returns the new location of the root.
   -------------------------------------------------------------------------- */

100
extern void GarbageCollect(void (*get_roots)(evac_fn),rtsBool force_major_gc);
101
102
103
104

/* -----------------------------------------------------------------------------
   Generational garbage collection support

105
   recordMutable(StgPtr p)       Informs the garbage collector that a
106
107
108
109
				 previously immutable object has
				 become (permanently) mutable.  Used
				 by thawArray and similar.

110
   updateWithIndirection(p1,p2)  Updates the object at p1 with an
111
112
113
114
				 indirection pointing to p2.  This is
				 normally called for objects in an old
				 generation (>0) when they are updated.

115
116
   updateWithPermIndirection(p1,p2)  As above but uses a permanent indir.

117
118
   -------------------------------------------------------------------------- */

119
120
121
/*
 * Storage manager mutex
 */
sof's avatar
sof committed
122
123
124
125
126
127
128
#if defined(SMP)
extern Mutex sm_mutex;
#define ACQUIRE_SM_LOCK   ACQUIRE_LOCK(&sm_mutex)
#define RELEASE_SM_LOCK   RELEASE_LOCK(&sm_mutex)
#else
#define ACQUIRE_SM_LOCK
#define RELEASE_SM_LOCK
129
130
#endif

131
132
133
/* ToDo: shouldn't recordMutable and recordOldToNewPtrs acquire some
 * kind of lock in the SMP case?
 */
134
135
136
137
138
static inline void
recordMutable(StgMutClosure *p)
{
  bdescr *bd;

139
#ifdef SMP
140
  ASSERT(p->header.info == &stg_WHITEHOLE_info || closure_MUTABLE(p));
141
#else
142
  ASSERT(closure_MUTABLE(p));
143
#endif
144
145

  bd = Bdescr((P_)p);
146
147
148
  if (bd->gen_no > 0) {
    p->mut_link = generations[bd->gen_no].mut_list;
    generations[bd->gen_no].mut_list = p;
149
150
151
152
153
154
155
156
157
  }
}

static inline void
recordOldToNewPtrs(StgMutClosure *p)
{
  bdescr *bd;
  
  bd = Bdescr((P_)p);
158
159
160
  if (bd->gen_no > 0) {
    p->mut_link = generations[bd->gen_no].mut_once_list;
    generations[bd->gen_no].mut_once_list = p;
161
162
  }
}
163

164
165
166
167
// @LDV profiling
// We zero out the slop when PROFILING is on.
// #ifndef DEBUG
#if !defined(DEBUG) && !defined(PROFILING)
168
169
170
171
172
#define updateWithIndirection(info, p1, p2)				\
  {									\
    bdescr *bd;								\
									\
    bd = Bdescr((P_)p1);						\
173
    if (bd->gen_no == 0) {						\
174
      ((StgInd *)p1)->indirectee = p2;					\
175
      SET_INFO(p1,&stg_IND_info);					\
176
177
178
      TICK_UPD_NEW_IND();						\
    } else {								\
      ((StgIndOldGen *)p1)->indirectee = p2;				\
179
      if (info != &stg_BLACKHOLE_BQ_info) {				\
sof's avatar
sof committed
180
        ACQUIRE_SM_LOCK;					        \
181
182
        ((StgIndOldGen *)p1)->mut_link = generations[bd->gen_no].mut_once_list;	\
        generations[bd->gen_no].mut_once_list = (StgMutClosure *)p1;			\
sof's avatar
sof committed
183
        RELEASE_SM_LOCK;					        \
184
      }									\
185
      SET_INFO(p1,&stg_IND_OLDGEN_info);				\
186
187
      TICK_UPD_OLD_IND();						\
    }									\
188
  }
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
#elif defined(PROFILING)
// @LDV profiling
// We call LDV_recordDead_FILL_SLOP_DYNAMIC(p1) regardless of the generation in 
// which p1 resides.
//
// Note: 
//   After all, we do *NOT* need to call LDV_recordCreate() for both IND and 
//   IND_OLDGEN closures because they are inherently used. But, it corrupts
//   the invariants that every closure keeps its creation time in the profiling
//   field. So, we call LDV_recordCreate().

#define updateWithIndirection(info, p1, p2)				\
  {									\
    bdescr *bd;								\
									\
    LDV_recordDead_FILL_SLOP_DYNAMIC((p1));                             \
    bd = Bdescr((P_)p1);						\
    if (bd->gen_no == 0) {						\
      ((StgInd *)p1)->indirectee = p2;					\
      SET_INFO(p1,&stg_IND_info);					\
      LDV_recordCreate((p1));                                           \
      TICK_UPD_NEW_IND();						\
    } else {								\
      ((StgIndOldGen *)p1)->indirectee = p2;				\
      if (info != &stg_BLACKHOLE_BQ_info) {				\
sof's avatar
sof committed
214
        ACQUIRE_SM_LOCK;					        \
215
216
        ((StgIndOldGen *)p1)->mut_link = generations[bd->gen_no].mut_once_list;	\
        generations[bd->gen_no].mut_once_list = (StgMutClosure *)p1;    \
sof's avatar
sof committed
217
        RELEASE_SM_LOCK;					        \
218
219
220
221
222
223
      }									\
      SET_INFO(p1,&stg_IND_OLDGEN_info);				\
      LDV_recordCreate((p1));                                           \
    }									\
  }

224
225
226
227
#else

/* In the DEBUG case, we also zero out the slop of the old closure,
 * so that the sanity checker can tell where the next closure is.
228
229
230
231
232
 *
 * Two important invariants: we should never try to update a closure
 * to point to itself, and the closure being updated should not
 * already have been updated (the mutable list will get messed up
 * otherwise).
233
234
235
236
237
 */
#define updateWithIndirection(info, p1, p2)				\
  {									\
    bdescr *bd;								\
									\
238
    ASSERT( p1 != p2 && !closure_IND(p1) );				\
239
    bd = Bdescr((P_)p1);						\
240
    if (bd->gen_no == 0) {						\
241
242
243
244
245
      ((StgInd *)p1)->indirectee = p2;					\
      SET_INFO(p1,&stg_IND_info);					\
      TICK_UPD_NEW_IND();						\
    } else {								\
      if (info != &stg_BLACKHOLE_BQ_info) {				\
246
	{								\
247
248
249
          StgInfoTable *inf = get_itbl(p1);				\
	  nat np = inf->layout.payload.ptrs,				\
	      nw = inf->layout.payload.nptrs, i;			\
250
          if (inf->type != THUNK_SELECTOR) {				\
251
252
             for (i = np; i < np + nw; i++) {				\
	        ((StgClosure *)p1)->payload[i] = 0;			\
253
             }								\
254
255
          }								\
        }								\
sof's avatar
sof committed
256
        ACQUIRE_SM_LOCK;					        \
257
258
        ((StgIndOldGen *)p1)->mut_link = generations[bd->gen_no].mut_once_list;	\
        generations[bd->gen_no].mut_once_list = (StgMutClosure *)p1;			\
sof's avatar
sof committed
259
        RELEASE_SM_LOCK;					        \
260
261
262
263
264
265
266
      }									\
      ((StgIndOldGen *)p1)->indirectee = p2;				\
      SET_INFO(p1,&stg_IND_OLDGEN_info);				\
      TICK_UPD_OLD_IND();						\
    }									\
  }
#endif
267

268
269
270
271
/* Static objects all live in the oldest generation
 */
#define updateWithStaticIndirection(info, p1, p2)			\
  {									\
272
    ASSERT( p1 != p2 && !closure_IND(p1) );				\
273
274
    ASSERT( ((StgMutClosure*)p1)->mut_link == NULL );			\
									\
sof's avatar
sof committed
275
    ACQUIRE_SM_LOCK;						        \
276
277
    ((StgMutClosure *)p1)->mut_link = oldest_gen->mut_once_list;	\
    oldest_gen->mut_once_list = (StgMutClosure *)p1;			\
sof's avatar
sof committed
278
    RELEASE_SM_LOCK;						        \
279
280
281
282
283
284
									\
    ((StgInd *)p1)->indirectee = p2;					\
    SET_INFO((StgInd *)p1, &stg_IND_STATIC_info);			\
    TICK_UPD_STATIC_IND();						\
  }

285
#if defined(TICKY_TICKY) || defined(PROFILING)
286
static inline void
287
updateWithPermIndirection(const StgInfoTable *info, StgClosure *p1, StgClosure *p2) 
288
289
290
{
  bdescr *bd;

291
  ASSERT( p1 != p2 && !closure_IND(p1) );
292

293
#ifdef PROFILING
294
295
  // @LDV profiling
  // Destroy the old closure.
296
  // Nb: LDV_* stuff cannot mix with ticky-ticky
297
  LDV_recordDead_FILL_SLOP_DYNAMIC(p1);
298
#endif
299
  bd = Bdescr((P_)p1);
300
  if (bd->gen_no == 0) {
301
    ((StgInd *)p1)->indirectee = p2;
302
    SET_INFO(p1,&stg_IND_PERM_info);
303
#ifdef PROFILING
304
305
306
    // @LDV profiling
    // We have just created a new closure.
    LDV_recordCreate(p1);
307
#endif
308
    TICK_UPD_NEW_PERM_IND(p1);
309
310
  } else {
    ((StgIndOldGen *)p1)->indirectee = p2;
311
    if (info != &stg_BLACKHOLE_BQ_info) {
sof's avatar
sof committed
312
      ACQUIRE_SM_LOCK;
313
314
      ((StgIndOldGen *)p1)->mut_link = generations[bd->gen_no].mut_once_list;
      generations[bd->gen_no].mut_once_list = (StgMutClosure *)p1;
sof's avatar
sof committed
315
      RELEASE_SM_LOCK;
316
    }
317
    SET_INFO(p1,&stg_IND_OLDGEN_PERM_info);
318
#ifdef PROFILING
319
320
321
    // @LDV profiling
    // We have just created a new closure.
    LDV_recordCreate(p1);
322
#endif
323
    TICK_UPD_OLD_PERM_IND();
324
325
326
327
  }
}
#endif

328
/* -----------------------------------------------------------------------------
329
   The CAF table - used to let us revert CAFs
330
331
   -------------------------------------------------------------------------- */

332
333
void revertCAFs( void );

334
335
336
#if defined(DEBUG)
void printMutOnceList(generation *gen);
void printMutableList(generation *gen);
337
#endif /* DEBUG */
338

339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
/* --------------------------------------------------------------------------
                      Address space layout macros
   --------------------------------------------------------------------------

   Here are the assumptions GHC makes about address space layout.
   Broadly, it thinks there are three sections:

     CODE    Read-only.  Contains code and read-only data (such as
                info tables)
             Also called "text"

     DATA    Read-write data.  Contains static closures (and on some
                architectures, info tables too)

     HEAP    Dynamically-allocated closures

355
356
357
358
     USER    None of the above.  The only way USER things arise right 
             now is when GHCi allocates a constructor info table, which
	     it does by mallocing them.

359
   Three macros identify these three areas:
360
     IS_DATA(p), HEAP_ALLOCED(p)
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400

   HEAP_ALLOCED is called FOR EVERY SINGLE CLOSURE during GC.
   It needs to be FAST.

   Implementation of HEAP_ALLOCED
   ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
   Concerning HEAP, most of the time (certainly under [Static] and [GHCi],
   we ensure that the heap is allocated above some fixed address HEAP_BASE
   (defined in MBlock.h).  In this case we set TEXT_BEFORE_HEAP, and we
   get a nice fast test.

   Sometimes we can't be quite sure.  For example in Windows, we can't 
   fix where our heap address space comes from.  In this case we un-set 
   TEXT_BEFORE_HEAP. That makes it more expensive to test whether a pointer
   comes from the HEAP section, because we need to look at the allocator's
   address maps (see HEAP_ALLOCED macro)

   Implementation of CODE and DATA
   ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
   Concerning CODE and DATA, there are three main regimes:

     [Static] Totally      The segments are contiguous, and laid out 
     statically linked     exactly as above

     [GHCi] Static,        GHCi may load new modules, but it knows the
     except for GHCi       address map, so for any given address it can
                           still tell which section it belongs to

     [DLL] OS-supported    Chunks of CODE and DATA may be mixed in 
     dynamic loading       the address space, and we can't tell how


   For the [Static] case, we assume memory is laid out like this
   (in order of increasing addresses)

       Start of memory
           CODE section
       TEXT_SECTION_END_MARKER   (usually _etext)
           DATA section
       DATA_SECTION_END_MARKER   (usually _end)
401
           USER section
402
403
404
405
406
407
408
409
       HEAP_BASE
           HEAP section

   For the [GHCi] case, we have to consult GHCi's dynamic linker's
   address maps, which is done by macros
         is_dynamically_loaded_code_or_rodata_ptr
         is_dynamically_loaded_code_or_rwdata_ptr

410
   For the [DLL] case, IS_DATA is really not usable at all.
411
 */
412
413
414
415
416
417


#undef TEXT_BEFORE_HEAP
#ifndef mingw32_TARGET_OS
#define TEXT_BEFORE_HEAP 1
#endif
418
419
420
421

extern void* TEXT_SECTION_END_MARKER_DECL;
extern void* DATA_SECTION_END_MARKER_DECL;

sebc's avatar
sebc committed
422
423
424
425
426
427
428
429
430
431
432
#ifdef darwin_TARGET_OS
extern unsigned long macho_etext;
extern unsigned long macho_edata;
#define IS_CODE_PTR(p) (  ((P_)(p) < (P_)macho_etext) \
                       || is_dynamically_loaded_code_or_rodata_ptr((char *)p) )
#define IS_DATA_PTR(p) ( ((P_)(p) >= (P_)macho_etext && \
                          (P_)(p) < (P_)macho_edata) \
                       || is_dynamically_loaded_rwdata_ptr((char *)p) )
#define IS_USER_PTR(p) ( ((P_)(p) >= (P_)macho_edata) \
                       && is_not_dynamically_loaded_ptr((char *)p) )
#else
433
434
435
436
437
438
/* Take into account code sections in dynamically loaded object files. */
#define IS_DATA_PTR(p) ( ((P_)(p) >= (P_)&TEXT_SECTION_END_MARKER && \
                          (P_)(p) < (P_)&DATA_SECTION_END_MARKER) \
                       || is_dynamically_loaded_rwdata_ptr((char *)p) )
#define IS_USER_PTR(p) ( ((P_)(p) >= (P_)&DATA_SECTION_END_MARKER) \
                       && is_not_dynamically_loaded_ptr((char *)p) )
sebc's avatar
sebc committed
439
#endif
440
441
442

/* The HEAP_ALLOCED test below is called FOR EVERY SINGLE CLOSURE
 * during GC.  It needs to be FAST.
443
444
445
 *
 * BEWARE: when we're dynamically loading code (for GHCi), make sure
 * that we don't load any code above HEAP_BASE, or this test won't work.
446
447
448
449
 */
#ifdef TEXT_BEFORE_HEAP
# define HEAP_ALLOCED(x)  ((StgPtr)(x) >= (StgPtr)(HEAP_BASE))
#else
sof's avatar
sof committed
450
/* mingw, really */
451
452
453
# define HEAP_ALLOCED(x)  (is_heap_alloced(x))
#endif

454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476

/* --------------------------------------------------------------------------
   Macros for distinguishing data pointers from code pointers
   --------------------------------------------------------------------------

  Specification
  ~~~~~~~~~~~~~
  The garbage collector needs to make some critical distinctions between pointers.
  In particular we need
 
     LOOKS_LIKE_GHC_INFO(p)          p points to an info table

  For both of these macros, p is
      *either* a pointer to a closure (static or heap allocated)
      *or* a return address on the (Haskell) stack

  (Return addresses are in fact info-pointers, so that the Haskell stack
  looks very like a chunk of heap.)

  The garbage collector uses LOOKS_LIKE_GHC_INFO when walking the stack, as it
  walks over the "pending arguments" on its way to the next return address.
  It is called moderately often, but not as often as HEAP_ALLOCED

477
478
479
  ToDo: LOOKS_LIKE_GHC_INFO(p) does not return True when p points to a
  constructor info table allocated by GHCi.  We should really rename 
  LOOKS_LIKE_GHC_INFO to LOOKS_LIKE_GHC_RETURN_INFO.
480
481
482
483
484
485
486
487

  Implementation
  ~~~~~~~~~~~~~~
  LOOKS_LIKE_GHC_INFO is more complicated because of the need to distinguish 
  between static closures and info tables.  It's a known portability problem.
  We have three approaches:

  Plan A: Address-space partitioning.  
488
    keep static closures in the (single, contiguous) data segment: IS_DATA_PTR(p)
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524

  Plan A can fail for two reasons:
    * In many environments (eg. dynamic loading),
      text and data aren't in a single contiguous range.  
    * When we compile through vanilla C (no mangling) we sometimes
      can't guaranteee to put info tables in the text section.  This
      happens eg. on MacOS where the C compiler refuses to put const
      data in the text section if it has any code pointers in it
      (which info tables do *only* when we're compiling without
      TABLES_NEXT_TO_CODE).
    
  Hence, Plan B: (compile-via-C-with-mangling, or native code generation)
    Put a zero word before each static closure.
    When compiling to native code, or via C-with-mangling, info tables
    are laid out "backwards" from the address specified in the info pointer
    (the entry code goes forward from the info pointer).  Hence, the word
    before the one referenced the info pointer is part of the info table,
    and is guaranteed non-zero.

    For reasons nobody seems to fully understand, the statically-allocated tables
    of INTLIKE and CHARLIKE closures can't have this zero word, so we
    have to test separately for them.

    Plan B fails altogether for the compile-through-vanilla-C route, because
    info tables aren't laid out backwards.


  Hence, Plan C: (unregisterised, compile-through-vanilla-C route only)
    If we didn't manage to get info tables into the text section, then
    we can distinguish between a static closure pointer and an info
    pointer as follows:  the first word of an info table is a code pointer,
    and therefore in text space, whereas the first word of a closure pointer
    is an info pointer, and therefore not.  Shazam!
*/


525
526
527
528
529
530
531
/* When working with Win32 DLLs, static closures are identified by
   being prefixed with a zero word. This is needed so that we can
   distinguish between pointers to static closures and (reversed!)
   info tables.

   This 'scheme' breaks down for closure tables such as CHARLIKE,
   so we catch these separately.
532
  
533
534
535
536
537
538
   LOOKS_LIKE_STATIC_CLOSURE() 
       - discriminates between static closures and info tbls
         (needed by LOOKS_LIKE_GHC_INFO() below - [Win32 DLLs only.])
   LOOKS_LIKE_STATIC() 
       - distinguishes between static and heap allocated data.
 */
539
#if defined(ENABLE_WIN32_DLL_SUPPORT)
540
541
542
543
544
545
546
547
548
549
550
551
552
553
            /* definitely do not enable for mingw DietHEP */
#define LOOKS_LIKE_STATIC(r) (!(HEAP_ALLOCED(r)))

/* Tiresome predicates needed to check for pointers into the closure tables */
#define IS_CHARLIKE_CLOSURE(p) \
    ( (P_)(p) >= (P_)stg_CHARLIKE_closure && \
      (char*)(p) <= ((char*)stg_CHARLIKE_closure + \
                     (MAX_CHARLIKE-MIN_CHARLIKE) * sizeof(StgIntCharlikeClosure)) )
#define IS_INTLIKE_CLOSURE(p) \
    ( (P_)(p) >= (P_)stg_INTLIKE_closure && \
      (char*)(p) <= ((char*)stg_INTLIKE_closure + \
                     (MAX_INTLIKE-MIN_INTLIKE) * sizeof(StgIntCharlikeClosure)) )

#define LOOKS_LIKE_STATIC_CLOSURE(r) (((*(((unsigned long *)(r))-1)) == 0) || IS_CHARLIKE_CLOSURE(r) || IS_INTLIKE_CLOSURE(r))
554
555
556

#elif defined(darwin_TARGET_OS) && !defined(TABLES_NEXT_TO_CODE)

sebc's avatar
sebc committed
557
#define LOOKS_LIKE_STATIC(r) (!(HEAP_ALLOCED(r)))
558
559
#define LOOKS_LIKE_STATIC_CLOSURE(r) (IS_DATA_PTR(r) && !LOOKS_LIKE_GHC_INFO(r))

560
#else
561

562
563
#define LOOKS_LIKE_STATIC(r) IS_DATA_PTR(r)
#define LOOKS_LIKE_STATIC_CLOSURE(r) IS_DATA_PTR(r)
564

565
566
567
568
569
570
571
572
573
574
575
576
#endif


/* -----------------------------------------------------------------------------
   Macros for distinguishing infotables from closures.
   
   You'd think it'd be easy to tell an info pointer from a closure pointer:
   closures live on the heap and infotables are in read only memory.  Right?
   Wrong!  Static closures live in read only memory and Hugs allocates
   infotables for constructors on the (writable) C heap.
   -------------------------------------------------------------------------- */

577
578
579
/* not accurate by any means, but stops the assertions failing... */
/* TODO TODO TODO TODO TODO TODO TODO TODO TODO TODO TODO TODO */
#define IS_HUGS_CONSTR_INFO(info)  IS_USER_PTR(info)
580
581
582
583

/* LOOKS_LIKE_GHC_INFO is called moderately often during GC, but
 * Certainly not as often as HEAP_ALLOCED.
 */
584
#if defined(darwin_TARGET_OS) && !defined(TABLES_NEXT_TO_CODE)
sebc's avatar
sebc committed
585
	/* Plan C, see above */
586
#define LOOKS_LIKE_GHC_INFO(info) IS_CODE_PTR(((StgInfoTable *)info).entry)
sebc's avatar
sebc committed
587
#else
588
589
#define LOOKS_LIKE_GHC_INFO(info) (!HEAP_ALLOCED(info) \
                                   && !LOOKS_LIKE_STATIC_CLOSURE(info))
sebc's avatar
sebc committed
590
#endif
591

592
593
594
595
/* -----------------------------------------------------------------------------
   Macros for calculating how big a closure will be (used during allocation)
   -------------------------------------------------------------------------- */

ken's avatar
ken committed
596
static __inline__ StgOffset AP_sizeW    ( nat n_args )              
597
598
{ return sizeofW(StgAP_UPD) + n_args; }

ken's avatar
ken committed
599
static __inline__ StgOffset PAP_sizeW   ( nat n_args )              
600
601
{ return sizeofW(StgPAP)    + n_args; }

ken's avatar
ken committed
602
static __inline__ StgOffset CONSTR_sizeW( nat p, nat np )  
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
{ return sizeofW(StgHeader) + p + np; }

static __inline__ StgOffset THUNK_SELECTOR_sizeW ( void )                    
{ return sizeofW(StgHeader) + MIN_UPD_SIZE; }

static __inline__ StgOffset BLACKHOLE_sizeW ( void )                    
{ return sizeofW(StgHeader) + MIN_UPD_SIZE; }

/* --------------------------------------------------------------------------
 * Sizes of closures
 * ------------------------------------------------------------------------*/

static __inline__ StgOffset sizeW_fromITBL( const StgInfoTable* itbl ) 
{ return sizeofW(StgClosure) 
       + sizeofW(StgPtr)  * itbl->layout.payload.ptrs 
       + sizeofW(StgWord) * itbl->layout.payload.nptrs; }

static __inline__ StgOffset pap_sizeW( StgPAP* x )
{ return PAP_sizeW(x->n_args); }

static __inline__ StgOffset arr_words_sizeW( StgArrWords* x )
{ return sizeofW(StgArrWords) + x->words; }

static __inline__ StgOffset mut_arr_ptrs_sizeW( StgMutArrPtrs* x )
{ return sizeofW(StgMutArrPtrs) + x->ptrs; }

static __inline__ StgWord tso_sizeW ( StgTSO *tso )
{ return TSO_STRUCT_SIZEW + tso->stack_size; }

632
#endif /* STORAGE_H */
633