Commit fdd7a41e authored by Simon Marlow's avatar Simon Marlow
Browse files

Fix some bugs in the stack-reducing code (#2571)

parent 561ca008
......@@ -2192,7 +2192,7 @@ static StgTSO *
threadStackUnderflow (Task *task STG_UNUSED, StgTSO *tso)
{
bdescr *bd, *new_bd;
lnat new_tso_size_w, tso_size_w;
lnat free_w, tso_size_w;
StgTSO *new_tso;
tso_size_w = tso_sizeW(tso);
......@@ -2207,19 +2207,19 @@ threadStackUnderflow (Task *task STG_UNUSED, StgTSO *tso)
// while we are moving the TSO:
lockClosure((StgClosure *)tso);
new_tso_size_w = round_to_mblocks(tso_size_w/2);
debugTrace(DEBUG_sched, "thread %ld: reducing TSO size from %lu words to %lu",
(long)tso->id, tso_size_w, new_tso_size_w);
// this is the number of words we'll free
free_w = round_to_mblocks(tso_size_w/2);
bd = Bdescr((StgPtr)tso);
new_bd = splitLargeBlock(bd, new_tso_size_w / BLOCK_SIZE_W);
new_bd->free = bd->free;
new_bd = splitLargeBlock(bd, free_w / BLOCK_SIZE_W);
bd->free = bd->start + TSO_STRUCT_SIZEW;
new_tso = (StgTSO *)new_bd->start;
memcpy(new_tso,tso,TSO_STRUCT_SIZE);
new_tso->stack_size = new_tso_size_w - TSO_STRUCT_SIZEW;
new_tso->stack_size = new_bd->free - new_tso->stack;
debugTrace(DEBUG_sched, "thread %ld: reducing TSO size from %lu words to %lu",
(long)tso->id, tso_size_w, tso_sizeW(new_tso));
tso->what_next = ThreadRelocated;
tso->_link = new_tso; // no write barrier reqd: same generation
......
......@@ -565,6 +565,9 @@ freeChain_lock(bdescr *bd)
RELEASE_SM_LOCK;
}
// splitBlockGroup(bd,B) splits bd in two. Afterward, bd will have B
// blocks, and a new block descriptor pointing to the remainder is
// returned.
bdescr *
splitBlockGroup (bdescr *bd, nat blocks)
{
......@@ -575,16 +578,21 @@ splitBlockGroup (bdescr *bd, nat blocks)
}
if (bd->blocks > BLOCKS_PER_MBLOCK) {
nat mblocks;
nat low_mblocks, high_mblocks;
void *new_mblock;
if ((blocks - BLOCKS_PER_MBLOCK) % (MBLOCK_SIZE / BLOCK_SIZE) != 0) {
barf("splitLargeBlock: not a multiple of a megablock");
}
mblocks = 1 + (blocks - BLOCKS_PER_MBLOCK) / (MBLOCK_SIZE / BLOCK_SIZE);
new_mblock = (void *) ((P_)MBLOCK_ROUND_DOWN(bd) + mblocks * MBLOCK_SIZE_W);
low_mblocks = 1 + (blocks - BLOCKS_PER_MBLOCK) / (MBLOCK_SIZE / BLOCK_SIZE);
high_mblocks = (bd->blocks - blocks) / (MBLOCK_SIZE / BLOCK_SIZE);
new_mblock = (void *) ((P_)MBLOCK_ROUND_DOWN(bd) + low_mblocks * MBLOCK_SIZE_W);
initMBlock(new_mblock);
new_bd = FIRST_BDESCR(new_mblock);
new_bd->blocks = MBLOCK_GROUP_BLOCKS(mblocks);
new_bd->blocks = MBLOCK_GROUP_BLOCKS(high_mblocks);
ASSERT(blocks + new_bd->blocks ==
bd->blocks + BLOCKS_PER_MBLOCK - MBLOCK_SIZE/BLOCK_SIZE);
}
else
{
......
......@@ -661,8 +661,8 @@ allocatedBytes( void )
return allocated;
}
// split N blocks off the start of the given bdescr, returning the
// remainder as a new block group. We treat the remainder as if it
// split N blocks off the front of the given bdescr, returning the
// new block group. We treat the remainder as if it
// had been freshly allocated in generation 0.
bdescr *
splitLargeBlock (bdescr *bd, nat blocks)
......@@ -680,6 +680,7 @@ splitLargeBlock (bdescr *bd, nat blocks)
new_bd->step = g0s0;
new_bd->flags = BF_LARGE;
new_bd->free = bd->free;
ASSERT(new_bd->free <= new_bd->start + new_bd->blocks * BLOCK_SIZE_W);
// add the new number of blocks to the counter. Due to the gaps
// for block descriptor, new_bd->blocks + bd->blocks might not be
......@@ -687,7 +688,7 @@ splitLargeBlock (bdescr *bd, nat blocks)
bd->step->n_large_blocks += bd->blocks;
return new_bd;
}
}
/* -----------------------------------------------------------------------------
allocateLocal()
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment