Skip to content
GitLab
Projects
Groups
Snippets
Help
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Sign in / Register
Toggle navigation
GHC
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Locked Files
Issues
4,260
Issues
4,260
List
Boards
Labels
Service Desk
Milestones
Iterations
Merge Requests
401
Merge Requests
401
Requirements
Requirements
List
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Security & Compliance
Security & Compliance
Dependency List
License Compliance
Operations
Operations
Incidents
Environments
Analytics
Analytics
CI / CD
Code Review
Insights
Issue
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
Glasgow Haskell Compiler
GHC
Commits
cf403b50
Commit
cf403b50
authored
Mar 17, 2013
by
ian@well-typed.com
1
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
Remove some directories that used to be used by GUM
This hasn't been used for some time
parent
0374cade
Changes
61
Expand all
Hide whitespace changes
Inline
Side-by-side
Showing
61 changed files
with
0 additions
and
27782 deletions
+0
-27782
rts/parallel/0Hash.c
rts/parallel/0Hash.c
+0
-320
rts/parallel/0Parallel.h
rts/parallel/0Parallel.h
+0
-414
rts/parallel/0Unpack.c
rts/parallel/0Unpack.c
+0
-440
rts/parallel/Dist.c
rts/parallel/Dist.c
+0
-117
rts/parallel/Dist.h
rts/parallel/Dist.h
+0
-20
rts/parallel/FetchMe.h
rts/parallel/FetchMe.h
+0
-24
rts/parallel/FetchMe.hc
rts/parallel/FetchMe.hc
+0
-180
rts/parallel/Global.c
rts/parallel/Global.c
+0
-1090
rts/parallel/GranSim.c
rts/parallel/GranSim.c
+0
-3015
rts/parallel/GranSimRts.h
rts/parallel/GranSimRts.h
+0
-268
rts/parallel/HLC.h
rts/parallel/HLC.h
+0
-63
rts/parallel/HLComms.c
rts/parallel/HLComms.c
+0
-1810
rts/parallel/LLC.h
rts/parallel/LLC.h
+0
-130
rts/parallel/LLComms.c
rts/parallel/LLComms.c
+0
-489
rts/parallel/PEOpCodes.h
rts/parallel/PEOpCodes.h
+0
-58
rts/parallel/Pack.c
rts/parallel/Pack.c
+0
-4293
rts/parallel/ParInit.c
rts/parallel/ParInit.c
+0
-322
rts/parallel/ParInit.h
rts/parallel/ParInit.h
+0
-19
rts/parallel/ParTicky.c
rts/parallel/ParTicky.c
+0
-450
rts/parallel/ParTicky.h
rts/parallel/ParTicky.h
+0
-60
rts/parallel/ParTypes.h
rts/parallel/ParTypes.h
+0
-38
rts/parallel/Parallel.c
rts/parallel/Parallel.c
+0
-1140
rts/parallel/ParallelDebug.c
rts/parallel/ParallelDebug.c
+0
-1955
rts/parallel/ParallelDebug.h
rts/parallel/ParallelDebug.h
+0
-79
rts/parallel/ParallelRts.h
rts/parallel/ParallelRts.h
+0
-253
rts/parallel/RBH.c
rts/parallel/RBH.c
+0
-337
rts/parallel/SysMan.c
rts/parallel/SysMan.c
+0
-650
utils/parallel/AVG.pl
utils/parallel/AVG.pl
+0
-108
utils/parallel/GrAnSim.el
utils/parallel/GrAnSim.el
+0
-432
utils/parallel/Makefile
utils/parallel/Makefile
+0
-53
utils/parallel/RTS2gran.pl
utils/parallel/RTS2gran.pl
+0
-684
utils/parallel/SN.pl
utils/parallel/SN.pl
+0
-280
utils/parallel/SPLIT.pl
utils/parallel/SPLIT.pl
+0
-379
utils/parallel/avg-RTS.pl
utils/parallel/avg-RTS.pl
+0
-15
utils/parallel/get_SN.pl
utils/parallel/get_SN.pl
+0
-40
utils/parallel/ghc-fool-sort.pl
utils/parallel/ghc-fool-sort.pl
+0
-23
utils/parallel/ghc-unfool-sort.pl
utils/parallel/ghc-unfool-sort.pl
+0
-16
utils/parallel/gp-ext-imp.pl
utils/parallel/gp-ext-imp.pl
+0
-86
utils/parallel/gr2RTS.pl
utils/parallel/gr2RTS.pl
+0
-138
utils/parallel/gr2ap.bash
utils/parallel/gr2ap.bash
+0
-124
utils/parallel/gr2gran.bash
utils/parallel/gr2gran.bash
+0
-113
utils/parallel/gr2java.pl
utils/parallel/gr2java.pl
+0
-322
utils/parallel/gr2jv.bash
utils/parallel/gr2jv.bash
+0
-123
utils/parallel/gr2pe.pl
utils/parallel/gr2pe.pl
+0
-1434
utils/parallel/gr2ps.bash
utils/parallel/gr2ps.bash
+0
-169
utils/parallel/gr2qp.pl
utils/parallel/gr2qp.pl
+0
-329
utils/parallel/gran-extr.pl
utils/parallel/gran-extr.pl
+0
-2114
utils/parallel/grs2gr.pl
utils/parallel/grs2gr.pl
+0
-48
utils/parallel/par-aux.pl
utils/parallel/par-aux.pl
+0
-89
utils/parallel/ps-scale-y.pl
utils/parallel/ps-scale-y.pl
+0
-188
utils/parallel/qp2ap.pl
utils/parallel/qp2ap.pl
+0
-495
utils/parallel/qp2ps.pl
utils/parallel/qp2ps.pl
+0
-988
utils/parallel/sn_filter.pl
utils/parallel/sn_filter.pl
+0
-92
utils/parallel/stats.pl
utils/parallel/stats.pl
+0
-168
utils/parallel/template.pl
utils/parallel/template.pl
+0
-141
utils/parallel/tf.pl
utils/parallel/tf.pl
+0
-148
utils/stat2resid/Makefile
utils/stat2resid/Makefile
+0
-41
utils/stat2resid/parse-gcstats.prl
utils/stat2resid/parse-gcstats.prl
+0
-232
utils/stat2resid/prefix.txt
utils/stat2resid/prefix.txt
+0
-10
utils/stat2resid/process-gcstats.prl
utils/stat2resid/process-gcstats.prl
+0
-45
utils/stat2resid/stat2resid.prl
utils/stat2resid/stat2resid.prl
+0
-81
No files found.
rts/parallel/0Hash.c
deleted
100644 → 0
View file @
0374cade
/*-----------------------------------------------------------------------------
*
* (c) The AQUA Project, Glasgow University, 1995-1998
* (c) The GHC Team, 1999
*
* Dynamically expanding linear hash tables, as described in
* Per-\AAke Larson, ``Dynamic Hash Tables,'' CACM 31(4), April 1988,
* pp. 446 -- 457.
* -------------------------------------------------------------------------- */
/*
Replaced with ghc/rts/Hash.c in the new RTS
*/
#if 0
#include "Rts.h"
#include "Hash.h"
#include "RtsUtils.h"
#define HSEGSIZE 1024 /* Size of a single hash table segment */
/* Also the minimum size of a hash table */
#define HDIRSIZE 1024 /* Size of the segment directory */
/* Maximum hash table size is HSEGSIZE * HDIRSIZE */
#define HLOAD 5 /* Maximum average load of a single hash bucket */
#define HCHUNK (1024 * sizeof(W_) / sizeof(HashList))
/* Number of HashList cells to allocate in one go */
/* Linked list of (key, data) pairs for separate chaining */
struct hashlist {
StgWord key;
void *data;
struct hashlist *next; /* Next cell in bucket chain (same hash value) */
};
typedef struct hashlist HashList;
struct hashtable {
int split; /* Next bucket to split when expanding */
int max; /* Max bucket of smaller table */
int mask1; /* Mask for doing the mod of h_1 (smaller table) */
int mask2; /* Mask for doing the mod of h_2 (larger table) */
int kcount; /* Number of keys */
int bcount; /* Number of buckets */
HashList **dir[HDIRSIZE]; /* Directory of segments */
};
/* -----------------------------------------------------------------------------
* Hash first using the smaller table. If the bucket is less than the
* next bucket to be split, re-hash using the larger table.
* -------------------------------------------------------------------------- */
static int
hash(HashTable *table, W_ key)
{
int bucket;
/* Strip the boring zero bits */
key /= sizeof(StgWord);
/* Mod the size of the hash table (a power of 2) */
bucket = key & table->mask1;
if (bucket < table->split) {
/* Mod the size of the expanded hash table (also a power of 2) */
bucket = key & table->mask2;
}
return bucket;
}
/* -----------------------------------------------------------------------------
* Allocate a new segment of the dynamically growing hash table.
* -------------------------------------------------------------------------- */
static void
allocSegment(HashTable *table, int segment)
{
table->dir[segment] = stgMallocBytes(HSEGSIZE * sizeof(HashList *),
"allocSegment");
}
/* -----------------------------------------------------------------------------
* Expand the larger hash table by one bucket, and split one bucket
* from the smaller table into two parts. Only the bucket referenced
* by @table->split@ is affected by the expansion.
* -------------------------------------------------------------------------- */
static void
expand(HashTable *table)
{
int oldsegment;
int oldindex;
int newbucket;
int newsegment;
int newindex;
HashList *hl;
HashList *next;
HashList *old, *new;
if (table->split + table->max >= HDIRSIZE * HSEGSIZE)
/* Wow! That's big. Too big, so don't expand. */
return;
/* Calculate indices of bucket to split */
oldsegment = table->split / HSEGSIZE;
oldindex = table->split % HSEGSIZE;
newbucket = table->max + table->split;
/* And the indices of the new bucket */
newsegment = newbucket / HSEGSIZE;
newindex = newbucket % HSEGSIZE;
if (newindex == 0)
allocSegment(table, newsegment);
if (++table->split == table->max) {
table->split = 0;
table->max *= 2;
table->mask1 = table->mask2;
table->mask2 = table->mask2 << 1 | 1;
}
table->bcount++;
/* Split the bucket, paying no attention to the original order */
old = new = NULL;
for (hl = table->dir[oldsegment][oldindex]; hl != NULL; hl = next) {
next = hl->next;
if (hash(table, hl->key) == newbucket) {
hl->next = new;
new = hl;
} else {
hl->next = old;
old = hl;
}
}
table->dir[oldsegment][oldindex] = old;
table->dir[newsegment][newindex] = new;
return;
}
void *
lookupHashTable(HashTable *table, StgWord key)
{
int bucket;
int segment;
int index;
HashList *hl;
bucket = hash(table, key);
segment = bucket / HSEGSIZE;
index = bucket % HSEGSIZE;
for (hl = table->dir[segment][index]; hl != NULL; hl = hl->next)
if (hl->key == key)
return hl->data;
/* It's not there */
return NULL;
}
/* -----------------------------------------------------------------------------
* We allocate the hashlist cells in large chunks to cut down on malloc
* overhead. Although we keep a free list of hashlist cells, we make
* no effort to actually return the space to the malloc arena.
* -------------------------------------------------------------------------- */
static HashList *freeList = NULL;
static HashList *
allocHashList(void)
{
HashList *hl, *p;
if ((hl = freeList) != NULL) {
freeList = hl->next;
} else {
hl = stgMallocBytes(HCHUNK * sizeof(HashList), "allocHashList");
freeList = hl + 1;
for (p = freeList; p < hl + HCHUNK - 1; p++)
p->next = p + 1;
p->next = NULL;
}
return hl;
}
static void
freeHashList(HashList *hl)
{
hl->next = freeList;
freeList = hl;
}
void
insertHashTable(HashTable *table, StgWord key, void *data)
{
int bucket;
int segment;
int index;
HashList *hl;
/* We want no duplicates */
ASSERT(lookupHashTable(table, key) == NULL);
/* When the average load gets too high, we expand the table */
if (++table->kcount >= HLOAD * table->bcount)
expand(table);
bucket = hash(table, key);
segment = bucket / HSEGSIZE;
index = bucket % HSEGSIZE;
hl = allocHashList();
hl->key = key;
hl->data = data;
hl->next = table->dir[segment][index];
table->dir[segment][index] = hl;
}
void *
removeHashTable(HashTable *table, StgWord key, void *data)
{
int bucket;
int segment;
int index;
HashList *hl;
HashList *prev = NULL;
bucket = hash(table, key);
segment = bucket / HSEGSIZE;
index = bucket % HSEGSIZE;
for (hl = table->dir[segment][index]; hl != NULL; hl = hl->next) {
if (hl->key == key && (data == NULL || hl->data == data)) {
if (prev == NULL)
table->dir[segment][index] = hl->next;
else
prev->next = hl->next;
table->kcount--;
return hl->data;
}
prev = hl;
}
/* It's not there */
ASSERT(data == NULL);
return NULL;
}
/* -----------------------------------------------------------------------------
* When we free a hash table, we are also good enough to free the
* data part of each (key, data) pair, as long as our caller can tell
* us how to do it.
* -------------------------------------------------------------------------- */
void
freeHashTable(HashTable *table, void (*freeDataFun)(void *) )
{
long segment;
long index;
HashList *hl;
HashList *next;
/* The last bucket with something in it is table->max + table->split - 1 */
segment = (table->max + table->split - 1) / HSEGSIZE;
index = (table->max + table->split - 1) % HSEGSIZE;
while (segment >= 0) {
while (index >= 0) {
for (hl = table->dir[segment][index]; hl != NULL; hl = next) {
next = hl->next;
if (freeDataFun != NULL)
(*freeDataFun)(hl->data);
freeHashList(hl);
}
index--;
}
free(table->dir[segment]);
segment--;
index = HSEGSIZE - 1;
}
free(table);
}
/* -----------------------------------------------------------------------------
* When we initialize a hash table, we set up the first segment as well,
* initializing all of the first segment's hash buckets to NULL.
* -------------------------------------------------------------------------- */
HashTable *
allocHashTable(void)
{
HashTable *table;
HashList **hb;
table = stgMallocBytes(sizeof(HashTable),"allocHashTable");
allocSegment(table, 0);
for (hb = table->dir[0]; hb < table->dir[0] + HSEGSIZE; hb++)
*hb = NULL;
table->split = 0;
table->max = HSEGSIZE;
table->mask1 = HSEGSIZE - 1;
table->mask2 = 2 * HSEGSIZE - 1;
table->kcount = 0;
table->bcount = HSEGSIZE;
return table;
}
#endif
rts/parallel/0Parallel.h
deleted
100644 → 0
View file @
0374cade
/*
Time-stamp: <Mon Oct 04 1999 14:50:28 Stardate: [-30]3692.88 hwloidl>
Definitions for parallel machines.
This section contains definitions applicable only to programs compiled
to run on a parallel machine, i.e. on GUM. Some of these definitions
are also used when simulating parallel execution, i.e. on GranSim.
*/
/*
ToDo: Check the PAR specfic part of this file
Move stuff into Closures.h and ClosureMacros.h
Clean-up GRAN specific code
-- HWL
*/
#ifndef PARALLEL_H
#define PARALLEL_H
#if defined(PAR) || defined(GRAN)
/* whole file */
#include "Rts.h"
#include "GranSim.h"
//#include "ClosureTypes.h"
//@menu
//* Basic definitions::
//* Externs and types::
//* Dummy defs::
//* Par specific fixed headers::
//* Parallel only heap objects::
//* Packing definitions::
//* End of File::
//@end menu
//*/
//@node Basic definitions, Externs and types
//@section Basic definitions
/* SET_PAR_HDR and SET_STATIC_PAR_HDR now live in ClosureMacros.h */
/* Needed for dumping routines */
#if defined(PAR)
# define TIME StgWord64
# define CURRENT_TIME msTime()
# define TIME_ON_PROC(p) msTime()
# define CURRENT_PROC thisPE
# define BINARY_STATS RtsFlags.ParFlags.granSimStats_Binary
#elif defined(GRAN)
# define TIME rtsTime
# define CURRENT_TIME CurrentTime[CurrentProc]
# define TIME_ON_PROC(p) CurrentTime[p]
# define CURRENT_PROC CurrentProc
# define BINARY_STATS RtsFlags.GranFlags.granSimStats_Binary
#endif
#if defined(PAR)
# define MAX_PES 256
/* Maximum number of processors */
/* MAX_PES is enforced by SysMan, which does not
allow more than this many "processors".
This is important because PackGA [GlobAddr.lc]
**assumes** that a PE# can fit in 8+ bits.
*/
#endif
//@node Externs and types, Dummy defs, Basic definitions
//@section Externs and types
#if defined(PAR)
/* GUM: one spark queue on each PE, and each PE sees only its own spark queue */
extern
rtsSparkQ
pending_sparks_hd
;
extern
rtsSparkQ
pending_sparks_tl
;
#elif defined(GRAN)
/* GranSim: a globally visible array of spark queues */
extern
rtsSparkQ
pending_sparks_hds
[];
extern
rtsSparkQ
pending_sparks_tls
[];
#endif
extern
unsigned
int
/* nat */
spark_queue_len
(
PEs
proc
);
extern
StgInt
SparksAvail
;
/* How many sparks are available */
/* prototypes of spark routines */
/* ToDo: check whether all have to be visible -- HWL */
#if defined(GRAN)
rtsSpark
*
newSpark
(
StgClosure
*
node
,
StgInt
name
,
StgInt
gran_info
,
StgInt
size_info
,
StgInt
par_info
,
StgInt
local
);
void
disposeSpark
(
rtsSpark
*
spark
);
void
disposeSparkQ
(
rtsSparkQ
spark
);
void
add_to_spark_queue
(
rtsSpark
*
spark
);
void
delete_from_spark_queue
(
rtsSpark
*
spark
);
#endif
#define STATS_FILENAME_MAXLEN 128
/* Where to write the log file */
//extern FILE *gr_file;
extern
char
gr_filename
[
STATS_FILENAME_MAXLEN
];
#if defined(GRAN)
int
init_gr_simulation
(
char
*
rts_argv
[],
int
rts_argc
,
char
*
prog_argv
[],
int
prog_argc
);
void
end_gr_simulation
(
void
);
#endif
#if defined(PAR)
extern
I_
do_sp_profile
;
extern
P_
PendingFetches
;
extern
GLOBAL_TASK_ID
*
PEs
;
extern
rtsBool
IAmMainThread
,
GlobalStopPending
;
extern
rtsBool
fishing
;
extern
GLOBAL_TASK_ID
SysManTask
;
extern
int
seed
;
/*pseudo-random-number generator seed:*/
/*Initialised in ParInit*/
extern
I_
threadId
;
/*Number of Threads that have existed on a PE*/
extern
GLOBAL_TASK_ID
mytid
;
extern
int
nPEs
;
extern
rtsBool
InGlobalGC
;
/* Are we in the midst of performing global GC */
extern
HashTable
*
pGAtoGALAtable
;
extern
HashTable
*
LAtoGALAtable
;
extern
GALA
*
freeIndirections
;
extern
GALA
*
liveIndirections
;
extern
GALA
*
freeGALAList
;
extern
GALA
*
liveRemoteGAs
;
extern
int
thisPE
;
void
RunParallelSystem
(
StgPtr
program_closure
);
void
initParallelSystem
();
void
SynchroniseSystem
();
void
registerTask
(
GLOBAL_TASK_ID
gtid
);
globalAddr
*
LAGAlookup
(
P_
addr
);
P_
GALAlookup
(
globalAddr
*
ga
);
globalAddr
*
MakeGlobal
(
P_
addr
,
rtsBool
preferred
);
globalAddr
*
setRemoteGA
(
P_
addr
,
globalAddr
*
ga
,
rtsBool
preferred
);
void
splitWeight
(
globalAddr
*
to
,
globalAddr
*
from
);
globalAddr
*
addWeight
(
globalAddr
*
ga
);
void
initGAtables
();
W_
taskIDtoPE
(
GLOBAL_TASK_ID
gtid
);
void
RebuildLAGAtable
();
void
*
lookupHashTable
(
HashTable
*
table
,
StgWord
key
);
void
insertHashTable
(
HashTable
*
table
,
StgWord
key
,
void
*
data
);
void
freeHashTable
(
HashTable
*
table
,
void
(
*
freeDataFun
)
((
void
*
data
)));
HashTable
*
allocHashTable
();
void
*
removeHashTable
(
HashTable
*
table
,
StgWord
key
,
void
*
data
);
#endif
/* PAR */
/* Interface for dumping routines (i.e. writing to log file) */
void
DumpGranEvent
(
GranEventType
name
,
StgTSO
*
tso
);
void
DumpRawGranEvent
(
PEs
proc
,
PEs
p
,
GranEventType
name
,
StgTSO
*
tso
,
StgClosure
*
node
,
StgInt
sparkname
,
StgInt
len
);
//void DumpEndEvent(PEs proc, StgTSO *tso, rtsBool mandatory_thread);
//@node Dummy defs, Par specific fixed headers, Externs and types
//@section Dummy defs
/*
Get this out of the way. These are all null definitions.
*/
//# define GA_HDR_SIZE 0
//# define GA(closure) /*nothing */
//# define SET_GA(closure,ga) /* nothing */
//# define SET_STATIC_GA(closure) /* nothing */
//# define SET_GRAN_HDR(closure,pe) /* nothing */
//# define SET_STATIC_PROCS(closure) /* nothing */
//# define SET_TASK_ACTIVITY(act) /* nothing */
#if defined(GRAN)
# define GA_HDR_SIZE 1
# define PROCS_HDR_POSN PAR_HDR_POSN
# define PROCS_HDR_SIZE 1
/* Accessing components of the field */
# define PROCS(closure) ((closure)->header.gran.procs)
/* SET_PROCS is now SET_GRAN_HEADER in ClosureMacros.h. */
#endif
//@node Par specific fixed headers, Parallel only heap objects, Dummy defs
//@section Par specific fixed headers
/*
Definitions relating to the entire parallel-only fixed-header field.
On GUM, the global addresses for each local closure are stored in a separate
hash table, rather then with the closure in the heap. We call @getGA@ to
look up the global address associated with a local closure (0 is returned
for local closures that have no global address), and @setGA@ to store a new
global address for a local closure which did not previously have one.
*/
#if defined(PAR)
# define GA_HDR_SIZE 0
# define GA(closure) getGA(closure)
# define SET_GA(closure, ga) setGA(closure,ga)
# define SET_STATIC_GA(closure)
# define SET_GRAN_HDR(closure,pe)
# define SET_STATIC_PROCS(closure)
# define MAX_GA_WEIGHT 0
/* Treat as 2^n */
W_
PackGA
((
W_
,
int
));
/* There was a PACK_GA macro here; but we turned it into the PackGA
routine [GlobAddr.lc] (because it needs to do quite a bit of
paranoia checking. Phil & Will (95/08)
*/
/* At the moment, there is no activity profiling for GUM. This may change. */
# define SET_TASK_ACTIVITY(act)
/* nothing */
#endif
//@node Parallel only heap objects, Packing definitions, Par specific fixed headers
//@section Parallel only heap objects
// NB: The following definitons are BOTH for GUM and GrAnSim -- HWL
/* All in Closures.h and CLosureMacros.h */
//@node Packing definitions, End of File, Parallel only heap objects
//@section Packing definitions
//@menu
//* GUM::
//* GranSim::
//@end menu
//*/
//@node GUM, GranSim, Packing definitions, Packing definitions
//@subsection GUM
#if defined(PAR)
/*
Symbolic constants for the packing code.
This constant defines how many words of data we can pack into a single
packet in the parallel (GUM) system.
*/
//@menu
//* Externs::
//* Prototypes::
//* Macros::
//@end menu
//*/
//@node Externs, Prototypes, GUM, GUM
//@subsubsection Externs
extern
W_
*
PackBuffer
;
/* size: can be set via option */
extern
long
*
buffer
;
/* HWL_ */
extern
W_
*
freeBuffer
;
/* HWL_ */
extern
W_
*
packBuffer
;
/* HWL_ */
extern
void
InitPackBuffer
(
STG_NO_ARGS
);
extern
void
InitMoreBuffers
(
STG_NO_ARGS
);
extern
void
InitPendingGABuffer
(
W_
size
);
extern
void
AllocClosureQueue
(
W_
size
);
//@node Prototypes, Macros, Externs, GUM
//@subsubsection Prototypes
void
InitPackBuffer
();
P_
PackTSO
(
P_
tso
,
W_
*
size
);
P_
PackStkO
(
P_
stko
,
W_
*
size
);
P_
AllocateHeap
(
W_
size
);
/* Doesn't belong */
void
InitClosureQueue
();
P_
DeQueueClosure
();
void
QueueClosure
(
P_
closure
);
rtsBool
QueueEmpty
();
void
PrintPacket
(
P_
buffer
);
P_
get_closure_info
(
P_
closure
,
W_
*
size
,
W_
*
ptrs
,
W_
*
nonptrs
,
W_
*
vhs
,
char
*
type
);
rtsBool
isOffset
(
globalAddr
*
ga
),
isFixed
(
globalAddr
*
ga
);
void
doGlobalGC
();
P_
PackNearbyGraph
(
P_
closure
,
W_
*
size
);
P_
UnpackGraph
(
W_
*
buffer
,
globalAddr
**
gamap
,
W_
*
nGAs
);
//@node Macros, , Prototypes, GUM
//@subsubsection Macros
# define PACK_HEAP_REQUIRED \
((RtsFlags.ParFlags.packBufferSize - PACK_HDR_SIZE) / (PACK_GA_SIZE + _FHS) * (SPEC_HS + 2))
# define MAX_GAS (RtsFlags.ParFlags.packBufferSize / PACK_GA_SIZE)
# define PACK_GA_SIZE 3
/* Size of a packed GA in words */
/* Size of a packed fetch-me in words */
# define PACK_FETCHME_SIZE (PACK_GA_SIZE + FIXED_HS)
# define PACK_HDR_SIZE 1
/* Words of header in a packet */
# define PACK_PLC_SIZE 2
/* Size of a packed PLC in words */
#endif
/* PAR */
//@node GranSim, , GUM, Packing definitions
//@subsection GranSim
#if defined(GRAN)
/* ToDo: Check which of the PAR routines are needed in GranSim -- HWL */
//@menu
//* Types::
//* Prototypes::