Commit 2be44cb2 authored by simonmar's avatar simonmar
Browse files

[project @ 2002-10-21 11:38:53 by simonmar]

Bite the bullet and generalise the central memory allocation scheme.
Previously we tried to allocate memory starting from a fixed address,
which was set for each architecture (0x5000000 was a common one), and
to decide whether a particular address was in the heap or not we would
do a simple comparison against this address.

This doesn't work too well, because:

 - if we dynamically-load some objects above the boundary, the
   heap-allocated test becomes invalid

 - on windows we have less control, and the heap might be
   split into multiple sections

 - it turns out that on some Linux kernels we don't get memory where
   we asked for it.  This might be a bug in those kernels, but it
   exposes the fragility of our allocation scheme.

The solution is to bite the bullet and maintain a table mapping
addresses to a value indicating whether that address is in the heap or
not.  Since we normally allocate heap in chunks of 1Mb, the table is
quite small: 4k on a 32-bit machine, using one byte for each 1Mb
block.  Testing an address for heap residency now involves a memory
access, but the table is normally cache-resident.  I didn't manage to
measure any slowdown after making the change.

On a 64-bit machine, we'll need to use a 2-level table; I haven't
implemented that yet.

Now we can generalise the procedure used to grab memory from the OS.
In the general case, we allocate one megablock more than we need to,
and trim off the slop around the allocation to leave an aligned chunk.
The next time around, however, we try to allocate memory right after
the last chunk allocated, on the grounds that it is aligned and
probably free: if this doesn't work, we have to back off to the
general mechanism (it seems to work most of the time).

This cleans up the Windows story too: is_heap_alloced() has gone, and
we should be able to handle more than 256M of memory (or whatever the
arbitrary limit was before).

MERGE TO STABLE (after lots of testing)
parent 241e4b61
/* -----------------------------------------------------------------------------
* $Id: MBlock.c,v 1.29 2002/07/17 09:21:50 simonmar Exp $
* $Id: MBlock.c,v 1.30 2002/10/21 11:38:53 simonmar Exp $
*
* (c) The GHC Team 1998-1999
*
......@@ -18,24 +18,23 @@
#include "MBlock.h"
#include "BlockAlloc.h"
#ifdef HAVE_STDLIB_H
#include <stdlib.h>
#endif
#ifdef HAVE_UNISTD_H
#include <unistd.h>
#endif
#ifdef HAVE_SYS_TYPES_H
#include <sys/types.h>
#endif
#ifndef mingw32_TARGET_OS
# ifdef HAVE_SYS_MMAN_H
# include <sys/mman.h>
# endif
#endif
#ifdef HAVE_FCNTL_H
#include <fcntl.h>
#endif
#if HAVE_WINDOWS_H
#include <windows.h>
#endif
......@@ -44,56 +43,162 @@
lnat mblocks_allocated = 0;
/* -----------------------------------------------------------------------------
The MBlock Map: provides our implementation of HEAP_ALLOCED()
-------------------------------------------------------------------------- */
StgWord8 mblock_map[4096]; // initially all zeros
static void
mblockIsHeap (void *p)
{
mblock_map[((StgWord)p & ~MBLOCK_MASK) >> MBLOCK_SHIFT] = 1;
}
/* -----------------------------------------------------------------------------
Allocate new mblock(s)
-------------------------------------------------------------------------- */
void *
getMBlock(void)
{
return getMBlocks(1);
}
/* -----------------------------------------------------------------------------
The mmap() method
On Unix-like systems, we use mmap() to allocate our memory. We
want memory in chunks of MBLOCK_SIZE, and aligned on an MBLOCK_SIZE
boundary. The mmap() interface doesn't give us this level of
control, so we have to use some heuristics.
In the general case, if we want a block of n megablocks, then we
allocate n+1 and trim off the slop from either side (using
munmap()) to get an aligned chunk of size n. However, the next
time we'll try to allocate directly after the previously allocated
chunk, on the grounds that this is aligned and likely to be free.
If it turns out that we were wrong, we have to munmap() and try
again using the general method.
-------------------------------------------------------------------------- */
#if !defined(mingw32_TARGET_OS) && !defined(cygwin32_TARGET_OS)
// A wrapper around mmap(), to abstract away from OS differences in
// the mmap() interface.
static void *
my_mmap (void *addr, int size)
{
void *ret;
#ifdef solaris2_TARGET_OS
{
int fd = open("/dev/zero",O_RDONLY);
ret = mmap(addr, size, PROT_READ | PROT_WRITE,
MAP_FIXED | MAP_PRIVATE, fd, 0);
close(fd);
}
#elif hpux_TARGET_OS
ret = mmap(addr, size, PROT_READ | PROT_WRITE,
MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
#elif darwin_TARGET_OS
ret = mmap(addr, size, PROT_READ | PROT_WRITE,
MAP_FIXED | MAP_ANON | MAP_PRIVATE, -1, 0);
#else
ret = mmap(addr, size, PROT_READ | PROT_WRITE,
MAP_ANON | MAP_PRIVATE, -1, 0);
#endif
return ret;
}
// Implements the general case: allocate a chunk of memory of 'size'
// mblocks.
static void *
gen_map_mblocks (int size)
{
int slop;
void *ret;
// Try to map a larger block, and take the aligned portion from
// it (unmap the rest).
size += MBLOCK_SIZE;
ret = my_mmap(0, size);
if (ret == (void *)-1) {
barf("gen_map_mblocks: mmap failed");
}
// unmap the slop bits around the chunk we allocated
slop = (W_)ret & MBLOCK_MASK;
if (munmap(ret, MBLOCK_SIZE - slop) == -1) {
barf("gen_map_mblocks: munmap failed");
}
if (slop > 0 && munmap(ret+size-slop, slop) == -1) {
barf("gen_map_mblocks: munmap failed");
}
// next time, try after the block we just got.
ret += MBLOCK_SIZE - slop;
return ret;
}
// The external interface: allocate 'n' mblocks, and return the
// address.
void *
getMBlocks(nat n)
{
static caddr_t next_request = (caddr_t)HEAP_BASE;
caddr_t ret;
lnat size = MBLOCK_SIZE * n;
nat i;
#ifdef solaris2_TARGET_OS
{
int fd = open("/dev/zero",O_RDONLY);
ret = mmap(next_request, size, PROT_READ | PROT_WRITE,
MAP_FIXED | MAP_PRIVATE, fd, 0);
close(fd);
}
#elif hpux_TARGET_OS
ret = mmap(next_request, size, PROT_READ | PROT_WRITE,
MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
#elif darwin_TARGET_OS
ret = mmap(next_request, size, PROT_READ | PROT_WRITE,
MAP_FIXED | MAP_ANON | MAP_PRIVATE, -1, 0);
#else
ret = mmap(next_request, size, PROT_READ | PROT_WRITE,
MAP_ANON | MAP_PRIVATE, -1, 0);
#endif
if (next_request == 0) {
// use gen_map_mblocks the first time.
ret = gen_map_mblocks(size);
} else {
ret = my_mmap(next_request, size);
if (ret == (void *)-1) {
if (errno == ENOMEM) {
barf("getMBlock: out of memory (blocks requested: %d)", n);
} else {
barf("GetMBlock: mmap failed");
}
}
if (ret == (void *)-1) {
if (errno == ENOMEM) {
belch("out of memory (requested %d bytes)", n * BLOCK_SIZE);
stg_exit(EXIT_FAILURE);
} else {
barf("getMBlock: mmap failed");
}
}
if (((W_)ret & MBLOCK_MASK) != 0) {
barf("GetMBlock: misaligned block %p returned when allocating %d megablock(s) at %p", ret, n, next_request);
if (((W_)ret & MBLOCK_MASK) != 0) {
// misaligned block!
#ifdef DEBUG
belch("getMBlock: misaligned block %p returned when allocating %d megablock(s) at %p", ret, n, next_request);
#endif
// unmap this block...
if (munmap(ret, size) == -1) {
barf("getMBlock: munmap failed");
}
// and do it the hard way
ret = gen_map_mblocks(size);
}
}
// Next time, we'll try to allocate right after the block we just got.
next_request = ret + size;
IF_DEBUG(gc,fprintf(stderr,"Allocated %d megablock(s) at %p\n",n,ret));
next_request += size;
// fill in the table
for (i = 0; i < n; i++) {
mblockIsHeap( ret + i * MBLOCK_SIZE );
}
mblocks_allocated += n;
return ret;
}
......@@ -123,15 +228,6 @@ char* end_non_committed = (char*)0;
/* Number of bytes reserved */
static unsigned long size_reserved_pool = SIZE_RESERVED_POOL;
/* This predicate should be inlined, really. */
/* TODO: this only works for a single chunk */
int
is_heap_alloced(const void* x)
{
return (((char*)(x) >= base_non_committed) &&
((char*)(x) <= end_non_committed));
}
void *
getMBlocks(nat n)
{
......@@ -198,6 +294,11 @@ getMBlocks(nat n)
mblocks_allocated += n;
// fill in the table
for (i = 0; i < n; i++) {
mblockIsHeap( ret + i * MBLOCK_SIZE );
}
return ret;
}
......
/* -----------------------------------------------------------------------------
* $Id: MBlock.h,v 1.14 2002/05/14 08:15:49 matthewc Exp $
* $Id: MBlock.h,v 1.15 2002/10/21 11:38:54 simonmar Exp $
*
* (c) The GHC Team, 1998-1999
*
* MegaBlock Allocator interface.
*
* ---------------------------------------------------------------------------*/
#ifndef __MBLOCK_H__
#define __MBLOCK_H__
extern lnat mblocks_allocated;
#if defined(mingw32_TARGET_OS)
extern int is_heap_alloced(const void* p);
#endif
extern void * getMBlock(void);
extern void * getMBlocks(nat n);
#if freebsd2_TARGET_OS || freebsd_TARGET_OS
/* Executable is loaded from 0x0
* Shared libraries are loaded at 0x2000000
* Stack is at the top of the address space. The kernel probably owns
* 0x8000000 onwards, so we'll pick 0x5000000.
*/
#define HEAP_BASE 0x50000000
#elif netbsd_TARGET_OS
/* NetBSD i386 shared libs are at 0x40000000
*/
#define HEAP_BASE 0x50000000
#elif openbsd_TARGET_OS
#define HEAP_BASE 0x50000000
#elif linux_TARGET_OS
#if ia64_TARGET_ARCH
/* Shared libraries are in region 1, text in region 2, data in region 3.
* Stack is at the top of region 4. We use the bottom.
*/
#define HEAP_BASE (4L<<61)
#else
/* Any ideas?
*/
#define HEAP_BASE 0x50000000
#endif
#elif solaris2_TARGET_OS
/* guess */
#define HEAP_BASE 0x50000000
#elif osf3_TARGET_OS
#if osf3_TARGET_OS
/* ToDo: Perhaps by adjusting this value we can make linking without
* -static work (i.e., not generate a core-dumping executable)? */
#if SIZEOF_VOID_P == 8
......@@ -57,20 +23,52 @@ extern void * getMBlocks(nat n);
#error I have no idea where to begin the heap on a non-64-bit osf3 machine.
#endif
#elif hpux_TARGET_OS
/* guess */
#define HEAP_BASE 0x50000000
#else
#elif darwin_TARGET_OS
/* guess */
#define HEAP_BASE 0x50000000
// we're using the generic method
#define HEAP_BASE 0
#elif defined(mingw32_TARGET_OS) || defined(cygwin32_TARGET_OS)
/* doesn't matter, we use a reserve/commit algorithm */
#endif
/* -----------------------------------------------------------------------------
The HEAP_ALLOCED() test.
HEAP_ALLOCED is called FOR EVERY SINGLE CLOSURE during GC.
It needs to be FAST.
Implementation of HEAP_ALLOCED
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Since heap is allocated in chunks of megablocks (MBLOCK_SIZE), we
can just use a table to record which megablocks in the address
space belong to the heap. On a 32-bit machine, with 1Mb
megablocks, using 8 bits for each entry in the table, the table
requires 4k. Lookups during GC will be fast, because the table
will be quickly cached (indeed, performance measurements showed no
measurable difference between doing the table lookup and using a
constant comparison).
-------------------------------------------------------------------------- */
#if SIZEOF_VOID_P == 4
// This is the table. Each byte is non-zero if the appropriate MBlock
// in the address space contains heap.
extern StgWord8 mblock_map[];
#define HEAP_ALLOCED(p) \
((int)(mblock_map[((StgWord)(p) & ~MBLOCK_MASK) >> MBLOCK_SHIFT]))
#else // SIZEOF_VOID_P != 4
// on a 64-bit machine, we need to extend the above scheme to use a
// 2-level mapping. (ToDo)
#ifdef TEXT_BEFORE_HEAP
# define HEAP_ALLOCED(x) ((StgPtr)(x) >= (StgPtr)(HEAP_BASE))
#else
#error Dont know where to get memory from on this architecture
/* ToDo: memory locations on other architectures */
#error HEAP_ALLOCED not defined
#endif
#endif
#endif // SIZEOF_VOID_P != 4
#endif // __MBLOCK_H__
/* -----------------------------------------------------------------------------
* $Id: Storage.h,v 1.44 2002/03/26 23:56:44 sof Exp $
* $Id: Storage.h,v 1.45 2002/10/21 11:38:54 simonmar Exp $
*
* (c) The GHC Team, 1998-1999
*
......@@ -438,20 +438,6 @@ extern unsigned long macho_edata;
&& is_not_dynamically_loaded_ptr((char *)p) )
#endif
/* The HEAP_ALLOCED test below is called FOR EVERY SINGLE CLOSURE
* during GC. It needs to be FAST.
*
* BEWARE: when we're dynamically loading code (for GHCi), make sure
* that we don't load any code above HEAP_BASE, or this test won't work.
*/
#ifdef TEXT_BEFORE_HEAP
# define HEAP_ALLOCED(x) ((StgPtr)(x) >= (StgPtr)(HEAP_BASE))
#else
/* mingw, really */
# define HEAP_ALLOCED(x) (is_heap_alloced(x))
#endif
/* --------------------------------------------------------------------------
Macros for distinguishing data pointers from code pointers
--------------------------------------------------------------------------
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment