Commit aa779e09 authored by Simon Marlow's avatar Simon Marlow

Don't move Capabilities in setNumCapabilities (#8209)

We have various problems with reallocating the array of Capabilities,
due to threads in waitForReturnCapability that are already holding a
pointer to a Capability.

Rather than add more locking to make this safer, I decided it would be
easier to ensure that we never move the Capabilities at all.  The
capabilities array is now an array of pointers to Capabaility.  There
are extra indirections, but it rarely matters - we don't often access
Capabilities via the array, normally we already have a pointer to
one.  I ran the parallel benchmarks and didn't see any difference.
parent 5a3918fe
......@@ -35,7 +35,13 @@ Capability MainCapability;
nat n_capabilities = 0;
nat enabled_capabilities = 0;
Capability *capabilities = NULL;
// The array of Capabilities. It's important that when we need
// to allocate more Capabilities we don't have to move the existing
// Capabilities, because there may be pointers to them in use
// (e.g. threads in waitForReturnCapability(), see #8209), so this is
// an array of Capability* rather than an array of Capability.
Capability **capabilities = NULL;
// Holds the Capability which last became free. This is used so that
// an in-call has a chance of quickly finding a free Capability.
......@@ -126,7 +132,7 @@ findSpark (Capability *cap)
/* visit cap.s 0..n-1 in sequence until a theft succeeds. We could
start at a random place instead of 0 as well. */
for ( i=0 ; i < n_capabilities ; i++ ) {
robbed = &capabilities[i];
robbed = capabilities[i];
if (cap == robbed) // ourselves...
continue;
......@@ -169,7 +175,7 @@ anySparks (void)
nat i;
for (i=0; i < n_capabilities; i++) {
if (!emptySparkPoolCap(&capabilities[i])) {
if (!emptySparkPoolCap(capabilities[i])) {
return rtsTrue;
}
}
......@@ -323,7 +329,8 @@ initCapabilities( void )
#else /* !THREADED_RTS */
n_capabilities = 1;
capabilities = &MainCapability;
capabilities = stgMallocBytes(sizeof(Capability*), "initCapabilities");
capabilities[0] = &MainCapability;
initCapability(&MainCapability, 0);
#endif
......@@ -333,46 +340,40 @@ initCapabilities( void )
// There are no free capabilities to begin with. We will start
// a worker Task to each Capability, which will quickly put the
// Capability on the free list when it finds nothing to do.
last_free_capability = &capabilities[0];
last_free_capability = capabilities[0];
}
Capability *
void
moreCapabilities (nat from USED_IF_THREADS, nat to USED_IF_THREADS)
{
#if defined(THREADED_RTS)
nat i;
Capability *old_capabilities = capabilities;
Capability **old_capabilities = capabilities;
capabilities = stgMallocBytes(to * sizeof(Capability*), "moreCapabilities");
if (to == 1) {
// THREADED_RTS must work on builds that don't have a mutable
// BaseReg (eg. unregisterised), so in this case
// capabilities[0] must coincide with &MainCapability.
capabilities = &MainCapability;
} else {
capabilities = stgMallocBytes(to * sizeof(Capability),
"moreCapabilities");
if (from > 0) {
memcpy(capabilities, old_capabilities, from * sizeof(Capability));
}
capabilities[0] = &MainCapability;
}
for (i = from; i < to; i++) {
initCapability(&capabilities[i], i);
for (i = 0; i < to; i++) {
if (i < from) {
capabilities[i] = old_capabilities[i];
} else {
capabilities[i] = stgMallocBytes(sizeof(Capability),
"moreCapabilities");
initCapability(capabilities[i], i);
}
}
last_free_capability = &capabilities[0];
debugTrace(DEBUG_sched, "allocated %d more capabilities", to - from);
// Return the old array to free later.
if (from > 1) {
return old_capabilities;
} else {
return NULL;
if (old_capabilities != NULL) {
stgFree(old_capabilities);
}
#else
return NULL;
#endif
}
......@@ -385,7 +386,7 @@ void contextSwitchAllCapabilities(void)
{
nat i;
for (i=0; i < n_capabilities; i++) {
contextSwitchCapability(&capabilities[i]);
contextSwitchCapability(capabilities[i]);
}
}
......@@ -393,7 +394,7 @@ void interruptAllCapabilities(void)
{
nat i;
for (i=0; i < n_capabilities; i++) {
interruptCapability(&capabilities[i]);
interruptCapability(capabilities[i]);
}
}
......@@ -606,8 +607,8 @@ waitForReturnCapability (Capability **pCap, Task *task)
// otherwise, search for a free capability
cap = NULL;
for (i = 0; i < n_capabilities; i++) {
if (!capabilities[i].running_task) {
cap = &capabilities[i];
if (!capabilities[i]->running_task) {
cap = capabilities[i];
break;
}
}
......@@ -955,7 +956,7 @@ shutdownCapabilities(Task *task, rtsBool safe)
nat i;
for (i=0; i < n_capabilities; i++) {
ASSERT(task->incall->tso == NULL);
shutdownCapability(&capabilities[i], task, safe);
shutdownCapability(capabilities[i], task, safe);
}
#if defined(THREADED_RTS)
ASSERT(checkSparkCountInvariant());
......@@ -981,11 +982,13 @@ freeCapabilities (void)
#if defined(THREADED_RTS)
nat i;
for (i=0; i < n_capabilities; i++) {
freeCapability(&capabilities[i]);
freeCapability(capabilities[i]);
stgFree(capabilities[i]);
}
#else
freeCapability(&MainCapability);
#endif
stgFree(capabilities);
traceCapsetDelete(CAPSET_OSPROCESS_DEFAULT);
traceCapsetDelete(CAPSET_CLOCKDOMAIN_DEFAULT);
}
......@@ -1032,7 +1035,7 @@ markCapabilities (evac_fn evac, void *user)
{
nat n;
for (n = 0; n < n_capabilities; n++) {
markCapability(evac, user, &capabilities[n], rtsFalse);
markCapability(evac, user, capabilities[n], rtsFalse);
}
}
......@@ -1044,13 +1047,13 @@ rtsBool checkSparkCountInvariant (void)
nat i;
for (i = 0; i < n_capabilities; i++) {
sparks.created += capabilities[i].spark_stats.created;
sparks.dud += capabilities[i].spark_stats.dud;
sparks.overflowed+= capabilities[i].spark_stats.overflowed;
sparks.converted += capabilities[i].spark_stats.converted;
sparks.gcd += capabilities[i].spark_stats.gcd;
sparks.fizzled += capabilities[i].spark_stats.fizzled;
remaining += sparkPoolSize(capabilities[i].sparks);
sparks.created += capabilities[i]->spark_stats.created;
sparks.dud += capabilities[i]->spark_stats.dud;
sparks.overflowed+= capabilities[i]->spark_stats.overflowed;
sparks.converted += capabilities[i]->spark_stats.converted;
sparks.gcd += capabilities[i]->spark_stats.gcd;
sparks.fizzled += capabilities[i]->spark_stats.fizzled;
remaining += sparkPoolSize(capabilities[i]->sparks);
}
/* The invariant is
......
......@@ -132,8 +132,8 @@ struct Capability_ {
StgTRecHeader *free_trec_headers;
nat transaction_tokens;
} // typedef Capability is defined in RtsAPI.h
// Capabilities are stored in an array, so make sure that adjacent
// Capabilities don't share any cache-lines:
// We never want a Capability to overlap a cache line with anything
// else, so round it up to a cache line size:
#ifndef mingw32_HOST_OS
ATTRIBUTE_ALIGNED(64)
#endif
......@@ -181,7 +181,7 @@ void initCapabilities (void);
// Add and initialise more Capabilities
//
Capability * moreCapabilities (nat from, nat to);
void moreCapabilities (nat from, nat to);
// Release a capability. This is called by a Task that is exiting
// Haskell to make a foreign call, or in various other cases when we
......@@ -211,7 +211,7 @@ INLINE_HEADER void releaseCapability_ (Capability* cap STG_UNUSED,
// Array of all the capabilities
//
extern Capability *capabilities;
extern Capability **capabilities;
// The Capability that was last free. Used as a good guess for where
// to assign new threads.
......
......@@ -149,7 +149,7 @@ initProfiling1 (void)
{
nat n;
for (n=0; n < n_capabilities; n++) {
capabilities[n].r.rCCCS = CCS_SYSTEM;
capabilities[n]->r.rCCCS = CCS_SYSTEM;
}
}
......
......@@ -76,7 +76,7 @@ handleProfTick(void)
if (do_prof_ticks) {
nat n;
for (n=0; n < n_capabilities; n++) {
capabilities[n].r.rCCCS->time_ticks++;
capabilities[n]->r.rCCCS->time_ticks++;
}
}
#endif
......
......@@ -1789,7 +1789,7 @@ computeRetainerSet( void )
// because we can find MUT_VAR objects which have not been
// visited during retainer profiling.
for (n = 0; n < n_capabilities; n++) {
for (bd = capabilities[n].mut_lists[g]; bd != NULL; bd = bd->link) {
for (bd = capabilities[n]->mut_lists[g]; bd != NULL; bd = bd->link) {
for (ml = bd->start; ml < bd->free; ml++) {
maybeInitRetainerSet((StgClosure *)*ml);
......
This diff is collapsed.
......@@ -346,10 +346,10 @@ calcTotalAllocated(void)
W_ tot_alloc = 0;
W_ n;
for (n = 0; n < n_capabilities; n++) {
tot_alloc += capabilities[n].total_allocated;
traceEventHeapAllocated(&capabilities[n],
tot_alloc += capabilities[n]->total_allocated;
traceEventHeapAllocated(capabilities[n],
CAPSET_HEAP_DEFAULT,
capabilities[n].total_allocated * sizeof(W_));
capabilities[n]->total_allocated * sizeof(W_));
}
return tot_alloc;
......@@ -730,12 +730,12 @@ stat_exit (void)
nat i;
SparkCounters sparks = { 0, 0, 0, 0, 0, 0};
for (i = 0; i < n_capabilities; i++) {
sparks.created += capabilities[i].spark_stats.created;
sparks.dud += capabilities[i].spark_stats.dud;
sparks.overflowed+= capabilities[i].spark_stats.overflowed;
sparks.converted += capabilities[i].spark_stats.converted;
sparks.gcd += capabilities[i].spark_stats.gcd;
sparks.fizzled += capabilities[i].spark_stats.fizzled;
sparks.created += capabilities[i]->spark_stats.created;
sparks.dud += capabilities[i]->spark_stats.dud;
sparks.overflowed+= capabilities[i]->spark_stats.overflowed;
sparks.converted += capabilities[i]->spark_stats.converted;
sparks.gcd += capabilities[i]->spark_stats.gcd;
sparks.fizzled += capabilities[i]->spark_stats.fizzled;
}
statsPrintf(" SPARKS: %" FMT_Word " (%" FMT_Word " converted, %" FMT_Word " overflowed, %" FMT_Word " dud, %" FMT_Word " GC'd, %" FMT_Word " fizzled)\n\n",
......@@ -900,10 +900,10 @@ statDescribeGens(void)
mut = 0;
for (i = 0; i < n_capabilities; i++) {
mut += countOccupied(capabilities[i].mut_lists[g]);
mut += countOccupied(capabilities[i]->mut_lists[g]);
// Add the pinned object block.
bd = capabilities[i].pinned_object_block;
bd = capabilities[i]->pinned_object_block;
if (bd != NULL) {
gen_live += bd->free - bd->start;
gen_blocks += bd->blocks;
......@@ -999,12 +999,12 @@ extern void getSparkStats( SparkCounters *s ) {
s->gcd = 0;
s->fizzled = 0;
for (i = 0; i < n_capabilities; i++) {
s->created += capabilities[i].spark_stats.created;
s->dud += capabilities[i].spark_stats.dud;
s->overflowed+= capabilities[i].spark_stats.overflowed;
s->converted += capabilities[i].spark_stats.converted;
s->gcd += capabilities[i].spark_stats.gcd;
s->fizzled += capabilities[i].spark_stats.fizzled;
s->created += capabilities[i]->spark_stats.created;
s->dud += capabilities[i]->spark_stats.dud;
s->overflowed+= capabilities[i]->spark_stats.overflowed;
s->converted += capabilities[i]->spark_stats.converted;
s->gcd += capabilities[i]->spark_stats.gcd;
s->fizzled += capabilities[i]->spark_stats.fizzled;
}
}
#endif
......
......@@ -326,34 +326,6 @@ discardTasksExcept (Task *keep)
RELEASE_LOCK(&all_tasks_mutex);
}
//
// After the capabilities[] array has moved, we have to adjust all
// (Capability *) pointers to point to the new array. The old array
// is still valid at this point.
//
void updateCapabilityRefs (void)
{
Task *task;
InCall *incall;
ACQUIRE_LOCK(&all_tasks_mutex);
for (task = all_tasks; task != NULL; task=task->all_next) {
if (task->cap != NULL) {
task->cap = &capabilities[task->cap->no];
}
for (incall = task->incall; incall != NULL; incall = incall->prev_stack) {
if (incall->suspended_cap != NULL) {
incall->suspended_cap = &capabilities[incall->suspended_cap->no];
}
}
}
RELEASE_LOCK(&all_tasks_mutex);
}
#if defined(THREADED_RTS)
void
......
......@@ -213,11 +213,6 @@ void interruptWorkerTask (Task *task);
#endif /* THREADED_RTS */
// Update any (Capability *) pointers belonging to Tasks after the
// Capability array is moved/resized.
//
void updateCapabilityRefs (void);
// For stats
extern nat taskCount;
extern nat workerCount;
......
......@@ -801,7 +801,7 @@ printAllThreads(void)
debugBelch("all threads:\n");
for (i = 0; i < n_capabilities; i++) {
cap = &capabilities[i];
cap = capabilities[i];
debugBelch("threads on capability %d:\n", cap->no);
for (t = cap->run_queue_hd; t != END_TSO_QUEUE; t = t->_link) {
printThreadStatus(t);
......
......@@ -933,7 +933,7 @@ compact(StgClosure *static_objects)
bdescr *bd;
StgPtr p;
for (n = 0; n < n_capabilities; n++) {
for (bd = capabilities[n].mut_lists[g];
for (bd = capabilities[n]->mut_lists[g];
bd != NULL; bd = bd->link) {
for (p = bd->start; p < bd->free; p++) {
thread((StgClosure **)p);
......
......@@ -236,8 +236,8 @@ GarbageCollect (nat collect_gen,
// attribute any costs to CCS_GC
#ifdef PROFILING
for (n = 0; n < n_capabilities; n++) {
save_CCS[n] = capabilities[n].r.rCCCS;
capabilities[n].r.rCCCS = CCS_GC;
save_CCS[n] = capabilities[n]->r.rCCCS;
capabilities[n]->r.rCCCS = CCS_GC;
}
#endif
......@@ -339,18 +339,18 @@ GarbageCollect (nat collect_gen,
if (n_gc_threads == 1) {
for (n = 0; n < n_capabilities; n++) {
#if defined(THREADED_RTS)
scavenge_capability_mut_Lists1(&capabilities[n]);
scavenge_capability_mut_Lists1(capabilities[n]);
#else
scavenge_capability_mut_lists(&capabilities[n]);
scavenge_capability_mut_lists(capabilities[n]);
#endif
}
} else {
scavenge_capability_mut_lists(gct->cap);
for (n = 0; n < n_capabilities; n++) {
if (gc_threads[n]->idle) {
markCapability(mark_root, gct, &capabilities[n],
markCapability(mark_root, gct, capabilities[n],
rtsTrue/*don't mark sparks*/);
scavenge_capability_mut_lists(&capabilities[n]);
scavenge_capability_mut_lists(capabilities[n]);
}
}
}
......@@ -363,7 +363,7 @@ GarbageCollect (nat collect_gen,
gct->evac_gen_no = 0;
if (n_gc_threads == 1) {
for (n = 0; n < n_capabilities; n++) {
markCapability(mark_root, gct, &capabilities[n],
markCapability(mark_root, gct, capabilities[n],
rtsTrue/*don't mark sparks*/);
}
} else {
......@@ -417,12 +417,12 @@ GarbageCollect (nat collect_gen,
#ifdef THREADED_RTS
if (n_gc_threads == 1) {
for (n = 0; n < n_capabilities; n++) {
pruneSparkQueue(&capabilities[n]);
pruneSparkQueue(capabilities[n]);
}
} else {
for (n = 0; n < n_capabilities; n++) {
if (n == cap->no || gc_threads[n]->idle) {
pruneSparkQueue(&capabilities[n]);
pruneSparkQueue(capabilities[n]);
}
}
}
......@@ -495,7 +495,7 @@ GarbageCollect (nat collect_gen,
if (g > 0) {
W_ mut_list_size = 0;
for (n = 0; n < n_capabilities; n++) {
mut_list_size += countOccupied(capabilities[n].mut_lists[g]);
mut_list_size += countOccupied(capabilities[n]->mut_lists[g]);
}
copied += mut_list_size;
......@@ -646,14 +646,14 @@ GarbageCollect (nat collect_gen,
// Reset the nursery: make the blocks empty
if (DEBUG_IS_ON || n_gc_threads == 1) {
for (n = 0; n < n_capabilities; n++) {
clearNursery(&capabilities[n]);
clearNursery(capabilities[n]);
}
} else {
// When doing parallel GC, clearNursery() is called by the
// worker threads
for (n = 0; n < n_capabilities; n++) {
if (gc_threads[n]->idle) {
clearNursery(&capabilities[n]);
clearNursery(capabilities[n]);
}
}
}
......@@ -753,7 +753,7 @@ GarbageCollect (nat collect_gen,
// restore enclosing cost centre
#ifdef PROFILING
for (n = 0; n < n_capabilities; n++) {
capabilities[n].r.rCCCS = save_CCS[n];
capabilities[n]->r.rCCCS = save_CCS[n];
}
#endif
......@@ -794,7 +794,7 @@ new_gc_thread (nat n, gc_thread *t)
nat g;
gen_workspace *ws;
t->cap = &capabilities[n];
t->cap = capabilities[n];
#ifdef THREADED_RTS
t->id = 0;
......@@ -866,12 +866,6 @@ initGcThreads (nat from USED_IF_THREADS, nat to USED_IF_THREADS)
"initGcThreads");
}
// We have to update the gct->cap pointers to point to the new
// Capability array now.
for (i = 0; i < from; i++) {
gc_threads[i]->cap = &capabilities[gc_threads[i]->cap->no];
}
for (i = from; i < to; i++) {
gc_threads[i] =
stgMallocBytes(sizeof(gc_thread) +
......@@ -1124,7 +1118,7 @@ waitForGcThreads (Capability *cap USED_IF_THREADS)
for (i=0; i < n_threads; i++) {
if (i == me || gc_threads[i]->idle) continue;
if (gc_threads[i]->wakeup != GC_THREAD_STANDING_BY) {
prodCapability(&capabilities[i], cap->running_task);
prodCapability(capabilities[i], cap->running_task);
}
}
for (j=0; j < 10; j++) {
......@@ -1132,7 +1126,7 @@ waitForGcThreads (Capability *cap USED_IF_THREADS)
for (i=0; i < n_threads; i++) {
if (i == me || gc_threads[i]->idle) continue;
write_barrier();
interruptCapability(&capabilities[i]);
interruptCapability(capabilities[i]);
if (gc_threads[i]->wakeup != GC_THREAD_STANDING_BY) {
retry = rtsTrue;
}
......@@ -1228,8 +1222,8 @@ prepare_collected_gen (generation *gen)
g = gen->no;
if (g != 0) {
for (i = 0; i < n_capabilities; i++) {
freeChain(capabilities[i].mut_lists[g]);
capabilities[i].mut_lists[g] = allocBlock();
freeChain(capabilities[i]->mut_lists[g]);
capabilities[i]->mut_lists[g] = allocBlock();
}
}
......@@ -1360,7 +1354,7 @@ prepare_uncollected_gen (generation *gen)
// allocate a fresh block for each one. We'll traverse these
// mutable lists as roots early on in the GC.
for (i = 0; i < n_capabilities; i++) {
stash_mut_list(&capabilities[i], gen->no);
stash_mut_list(capabilities[i], gen->no);
}
ASSERT(gen->scavenged_large_objects == NULL);
......@@ -1429,7 +1423,7 @@ collect_pinned_object_blocks (void)
for (n = 0; n < n_capabilities; n++) {
prev = NULL;
for (bd = capabilities[n].pinned_object_blocks; bd != NULL; bd = bd->link) {
for (bd = capabilities[n]->pinned_object_blocks; bd != NULL; bd = bd->link) {
prev = bd;
}
if (prev != NULL) {
......@@ -1437,8 +1431,8 @@ collect_pinned_object_blocks (void)
if (g0->large_objects != NULL) {
g0->large_objects->u.back = prev;
}
g0->large_objects = capabilities[n].pinned_object_blocks;
capabilities[n].pinned_object_blocks = 0;
g0->large_objects = capabilities[n]->pinned_object_blocks;
capabilities[n]->pinned_object_blocks = 0;
}
}
}
......
......@@ -615,7 +615,7 @@ checkLocalMutableLists (nat cap_no)
{
nat g;
for (g = 1; g < RtsFlags.GcFlags.generations; g++) {
checkMutableList(capabilities[cap_no].mut_lists[g], g);
checkMutableList(capabilities[cap_no]->mut_lists[g], g);
}
}
......@@ -756,7 +756,7 @@ findMemoryLeak (void)
nat g, i;
for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
for (i = 0; i < n_capabilities; i++) {
markBlocks(capabilities[i].mut_lists[g]);
markBlocks(capabilities[i]->mut_lists[g]);
markBlocks(gc_threads[i]->gens[g].part_list);
markBlocks(gc_threads[i]->gens[g].scavd_list);
markBlocks(gc_threads[i]->gens[g].todo_bd);
......@@ -767,7 +767,7 @@ findMemoryLeak (void)
for (i = 0; i < n_capabilities; i++) {
markBlocks(nurseries[i].blocks);
markBlocks(capabilities[i].pinned_object_block);
markBlocks(capabilities[i]->pinned_object_block);
}
#ifdef PROFILING
......@@ -847,7 +847,7 @@ memInventory (rtsBool show)
for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
gen_blocks[g] = 0;
for (i = 0; i < n_capabilities; i++) {
gen_blocks[g] += countBlocks(capabilities[i].mut_lists[g]);
gen_blocks[g] += countBlocks(capabilities[i]->mut_lists[g]);
gen_blocks[g] += countBlocks(gc_threads[i]->gens[g].part_list);
gen_blocks[g] += countBlocks(gc_threads[i]->gens[g].scavd_list);
gen_blocks[g] += countBlocks(gc_threads[i]->gens[g].todo_bd);
......@@ -859,10 +859,10 @@ memInventory (rtsBool show)
for (i = 0; i < n_capabilities; i++) {
ASSERT(countBlocks(nurseries[i].blocks) == nurseries[i].n_blocks);
nursery_blocks += nurseries[i].n_blocks;
if (capabilities[i].pinned_object_block != NULL) {
nursery_blocks += capabilities[i].pinned_object_block->blocks;
if (capabilities[i]->pinned_object_block != NULL) {
nursery_blocks += capabilities[i]->pinned_object_block->blocks;
}
nursery_blocks += countBlocks(capabilities[i].pinned_object_blocks);
nursery_blocks += countBlocks(capabilities[i]->pinned_object_blocks);
}
retainer_blocks = 0;
......
......@@ -215,7 +215,7 @@ void storageAddCapabilities (nat from, nat to)
// we've moved the nurseries, so we have to update the rNursery
// pointers from the Capabilities.
for (i = 0; i < to; i++) {
capabilities[i].r.rNursery = &nurseries[i];
capabilities[i]->r.rNursery = &nurseries[i];
}
/* The allocation area. Policy: keep the allocation area
......@@ -229,7 +229,7 @@ void storageAddCapabilities (nat from, nat to)
// allocate a block for each mut list
for (n = from; n < to; n++) {
for (g = 1; g < RtsFlags.GcFlags.generations; g++) {
capabilities[n].mut_lists[g] = allocBlock();
capabilities[n]->mut_lists[g] = allocBlock();
}
}
......@@ -493,8 +493,8 @@ assignNurseriesToCapabilities (nat from, nat to)
nat i;
for (i = from; i < to; i++) {
capabilities[i].r.rCurrentNursery = nurseries[i].blocks;
capabilities[i].r.rCurrentAlloc = NULL;
capabilities[i]->r.rCurrentNursery = nurseries[i].blocks;
capabilities[i]->r.rCurrentAlloc = NULL;
}
}
......@@ -939,7 +939,7 @@ void updateNurseriesStats (void)
nat i;
for (i = 0; i < n_capabilities; i++) {
capabilities[i].total_allocated += countOccupied(nurseries[i].blocks);
capabilities[i]->total_allocated += countOccupied(nurseries[i].blocks);
}
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment